hexsha
stringlengths 40
40
| size
int64 2
1.05M
| ext
stringclasses 9
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
193
| max_stars_repo_name
stringlengths 6
109
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequence | max_stars_count
int64 1
36.6k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
193
| max_issues_repo_name
stringlengths 6
109
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequence | max_issues_count
int64 1
29.8k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
193
| max_forks_repo_name
stringlengths 6
109
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequence | max_forks_count
int64 1
11.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.05M
| avg_line_length
float64 1
404k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f74be40541ed52d3d130d98259c4e9a4a54d3644 | 3,070 | py | Python | office365/sharepoint/ui/applicationpages/clientPeoplePickerQueryParameters.py | wreiner/Office365-REST-Python-Client | 476bbce4f5928a140b4f5d33475d0ac9b0783530 | [
"MIT"
] | null | null | null | office365/sharepoint/ui/applicationpages/clientPeoplePickerQueryParameters.py | wreiner/Office365-REST-Python-Client | 476bbce4f5928a140b4f5d33475d0ac9b0783530 | [
"MIT"
] | null | null | null | office365/sharepoint/ui/applicationpages/clientPeoplePickerQueryParameters.py | wreiner/Office365-REST-Python-Client | 476bbce4f5928a140b4f5d33475d0ac9b0783530 | [
"MIT"
] | null | null | null | from office365.runtime.client_value import ClientValue
from office365.sharepoint.principal.principalSource import PrincipalSource
from office365.sharepoint.principal.principalType import PrincipalType
class ClientPeoplePickerQueryParameters(ClientValue):
def __init__(self, queryString, allowEmailAddresses=True, allowMultipleEntities=True, allowOnlyEmailAddresses=False,
allUrlZones=False, enabledClaimProviders=None, forceClaims=False, maximumEntitySuggestions=1,
principalSource=PrincipalSource.All, principalType=PrincipalType.All, urlZone=0,
urlZoneSpecified=False, sharePointGroupID=0):
"""
Specifies the properties of a principal query
:type int urlZone: Specifies a location in the topology of the farm for the principal query.
:param int sharePointGroupID: specifies a group containing allowed principals to be used in the principal query.
:param str queryString: Specifies the value to be used in the principal query.
:param int principalType: Specifies the type to be used in the principal query.
:param int principalSource: Specifies the source to be used in the principal query.
:param int maximumEntitySuggestions: Specifies the maximum number of principals to be returned by the
principal query.
:param bool forceClaims: Specifies whether the principal query SHOULD be handled by claims providers.
:param bool enabledClaimProviders: Specifies the claims providers to be used in the principal query.
:param bool allUrlZones: Specifies whether the principal query will search all locations in the topology
of the farm.
:param bool allowOnlyEmailAddresses: Specifies whether to allow the picker to resolve only email addresses as
valid entities. This property is only used when AllowEmailAddresses (section 3.2.5.217.1.1.1) is set to True.
Otherwise it is ignored.
:param bool allowMultipleEntities: Specifies whether the principal query allows multiple values.
:param bool allowEmailAddresses: Specifies whether the principal query can return a resolved principal
matching an unverified e-mail address when unable to resolve to a known principal.
"""
super().__init__()
self.QueryString = queryString
self.AllowEmailAddresses = allowEmailAddresses
self.AllowMultipleEntities = allowMultipleEntities
self.AllowOnlyEmailAddresses = allowOnlyEmailAddresses
self.AllUrlZones = allUrlZones
self.EnabledClaimProviders = enabledClaimProviders
self.ForceClaims = forceClaims
self.MaximumEntitySuggestions = maximumEntitySuggestions
self.PrincipalSource = principalSource
self.PrincipalType = principalType
self.UrlZone = urlZone
self.UrlZoneSpecified = urlZoneSpecified
self.SharePointGroupID = sharePointGroupID
@property
def entity_type_name(self):
return "SP.UI.ApplicationPages.ClientPeoplePickerQueryParameters"
| 60.196078 | 120 | 0.749511 |
f74be41a0b82d16d29e9badfcc3f1f244d2e6282 | 320 | py | Python | backend/src/app/schemas/bin.py | niikakralj/EESTEC-Team | be70366dfc249e200ade8deb84408e758123c499 | [
"MIT"
] | null | null | null | backend/src/app/schemas/bin.py | niikakralj/EESTEC-Team | be70366dfc249e200ade8deb84408e758123c499 | [
"MIT"
] | null | null | null | backend/src/app/schemas/bin.py | niikakralj/EESTEC-Team | be70366dfc249e200ade8deb84408e758123c499 | [
"MIT"
] | null | null | null | import datetime
from pydantic import BaseModel
class BinsOpened(BaseModel):
card_id: str
bin_id: int
bin_type: str
timestamp: datetime.datetime
weight: int
recycle_status_ok: bool = False
class BinsStatus(BaseModel):
bin_id: int
timestamp: datetime.datetime
waste_status: int
| 14.545455 | 35 | 0.7125 |
f74bee1552bd5c2cfc1aa8d349bc38fa0414f8a1 | 1,866 | py | Python | can-io-firmware/set_servo.py | greck2908/robot-software | 2e1e8177148a089e8883967375dde7f8ed3d878b | [
"MIT"
] | 40 | 2016-10-04T19:59:22.000Z | 2020-12-25T18:11:35.000Z | can-io-firmware/set_servo.py | greck2908/robot-software | 2e1e8177148a089e8883967375dde7f8ed3d878b | [
"MIT"
] | 209 | 2016-09-21T21:54:28.000Z | 2022-01-26T07:42:37.000Z | can-io-firmware/set_servo.py | greck2908/robot-software | 2e1e8177148a089e8883967375dde7f8ed3d878b | [
"MIT"
] | 21 | 2016-11-07T14:40:16.000Z | 2021-11-02T09:53:37.000Z | #!/usr/bin/env python3
"""
Sends a PWM via UAVCAN
"""
import argparse
import uavcan
import os
DSDL_DIR = os.path.join(os.path.dirname(__file__), "../uavcan_data_types/cvra")
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"port",
help="SocketCAN interface (e.g. can0) or SLCAN serial port (e.g. /dev/ttyACM0)",
)
parser.add_argument("id", help="ID of the board to target", type=int)
parser.add_argument("servo", help="Servo output to be controlled", type=int)
parser.add_argument(
"pos", help="Desired duty cycle on the servo output", type=float
)
parser.add_argument(
"vel",
help="Desired duty cycle rate of change",
nargs="?",
default=0,
type=float,
)
parser.add_argument(
"acc",
help="Desired duty cycle rate of rate of change",
nargs="?",
default=0,
type=float,
)
return parser.parse_args()
def set_servo(node, dst_id, values):
msg = uavcan.thirdparty.cvra.io.ServoPWM(
node_id=dst_id,
servo_pos=values["pos"],
servo_vel=values["vel"],
servo_acc=values["acc"],
)
node.broadcast(msg, priority=uavcan.TRANSFER_PRIORITY_HIGHEST)
def servo_setpoint(servo, pos, vel, acc):
setpoint = {
"pos": [0, 0, 0, 0],
"vel": [0, 0, 0, 0],
"acc": [0, 0, 0, 0],
}
setpoint["pos"][servo] = pos
setpoint["vel"][servo] = vel
setpoint["acc"][servo] = acc
return setpoint
def main():
args = parse_args()
node = uavcan.make_node(args.port, node_id=42)
uavcan.load_dsdl(DSDL_DIR)
set_servo(node, args.id, servo_setpoint(args.servo, args.pos, args.vel, args.acc))
# Spin node for 1 second
node.spin(1)
node.close()
if __name__ == "__main__":
main()
| 23.037037 | 88 | 0.608253 |
f74c06c1b4b99f15be38c16bb8537a174d705ec3 | 7,306 | py | Python | rlstructures/deprecated/batchers/buffers.py | Purple-PI/rlstructures | 9b201b083715bbda2f3534b010c84e11dfc0a1c7 | [
"MIT"
] | 281 | 2021-01-13T14:20:23.000Z | 2022-03-23T08:46:56.000Z | rlstructures/deprecated/batchers/buffers.py | Purple-PI/rlstructures | 9b201b083715bbda2f3534b010c84e11dfc0a1c7 | [
"MIT"
] | 2 | 2021-01-22T23:28:34.000Z | 2021-04-29T22:05:42.000Z | rlstructures/deprecated/batchers/buffers.py | Purple-PI/rlstructures | 9b201b083715bbda2f3534b010c84e11dfc0a1c7 | [
"MIT"
] | 13 | 2021-01-15T14:53:32.000Z | 2022-03-22T11:12:54.000Z | #
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.multiprocessing as mp
from rlstructures import TemporalDictTensor, DictTensor
class Buffer:
def get_free_slots(self, k):
raise NotImplementedError
def set_free_slots(self, s):
raise NotImplementedError
def write(self, slots, variables):
raise NotImplementedError
def close(self):
raise NotImplementedError
def get_trajectories(self, trajectories, erase=True):
raise NotImplementedError
class LocalBuffer(Buffer):
"""
Defines a shared buffer to store trajectories / transitions
The buffer is structured as nslots of size s_slots for each possible variable
"""
def __init__(
self,
n_slots=None,
s_slots=None,
specs_agent_state=None,
specs_agent_output=None,
specs_environment=None,
device=torch.device("cpu"),
):
"""
Init a new buffer
Args:
n_slots (int): the number of slots
s_slots (int): the size of each slot (temporal dimension)
specs (dict): The description of the variable to store in the buffer
"""
self._device = device
self.buffers = {}
self.n_slots = n_slots
self.s_slots = s_slots
# Creation of the storage buffers
nspecs_agent_state = {"_" + k: specs_agent_state[k] for k in specs_agent_state}
nspecs_env = {"_" + k: specs_environment[k] for k in specs_environment}
specs = {
**specs_agent_state,
**specs_agent_output,
**specs_environment,
**nspecs_agent_state,
**nspecs_env,
"position_in_slot": {"size": torch.Size([]), "dtype": torch.int64},
}
for n in specs:
size = (n_slots, s_slots) + specs[n]["size"]
print(
"Creating buffer for '"
+ n
+ "' of size "
+ str(size)
+ " and type "
+ str(specs[n]["dtype"])
)
assert not n in self.buffers, "Same key is used by the agent and the env"
self.buffers[n] = (
torch.zeros(size, dtype=specs[n]["dtype"])
.to(self._device)
.share_memory_()
)
self.position_in_slot = (
torch.zeros(n_slots).to(self._device).long().share_memory_()
)
self._free_slots_queue = mp.Queue()
self._free_slots_queue.cancel_join_thread()
for i in range(n_slots):
self._free_slots_queue.put(i, block=True)
self._full_slots_queue = mp.Queue()
self._full_slots_queue.cancel_join_thread()
def device(self):
return self._device
def get_free_slots(self, k):
"""
Returns k available slots. Wait until enough slots are free
"""
assert k > 0
x = [self._free_slots_queue.get() for i in range(k)]
for i in x:
self.position_in_slot[i] = 0
return x
def set_free_slots(self, s):
"""
Tells the buffer that it can reuse the given slots
:param s may be one slot (int) or multiple slots (list of int)
"""
assert not s is None
if isinstance(s, int):
self._free_slots_queue.put(s)
else:
for ss in s:
self._free_slots_queue.put(ss)
# logging.getLogger("buffer").debug("SET FREE " + str(s))
def write(self, slots, variables):
if not variables.device() == self._device:
variables = variables.to(self._device)
slots = torch.tensor(slots).to(self._device)
assert variables.n_elems() == len(slots)
positions = self.position_in_slot[slots]
a = torch.arange(len(slots)).to(self._device)
for n in variables.keys():
# assert variables[n].size()[0] == 1
# print(self.buffers[n][slots].size())
self.buffers[n][slots, positions] = variables[n][a].detach()
self.position_in_slot[slots] += 1
def is_slot_full(self, slot):
"""
Returns True of a slot is full
"""
return self.position_in_slot[slot] == self.s_slots
def get_single(self, slots, position):
assert isinstance(slots, list)
assert isinstance(slots[0], int)
idx = torch.tensor(slots).to(self._device).long()
d = {k: self.buffers[k][idx, position] for k in self.buffers}
return DictTensor(d)
def close(self):
"""
Close the buffer
"""
self._free_slots_queue.close()
self._full_slots_queue.close()
def get_single_slots(self, slots, erase=True):
assert isinstance(slots, list)
assert isinstance(slots[0], int)
idx = torch.tensor(slots).to(self._device).long()
lengths = self.position_in_slot[idx]
ml = lengths.max().item()
v = {k: self.buffers[k][idx, :ml].clone() for k in self.buffers}
if erase:
self.set_free_slots(slots)
return TemporalDictTensor(v, lengths)
def get_multiple_slots(self, trajectories, erase=True):
"""
Return the concatenation of multiple slots. This function is not well optimized and could be fasten
"""
assert isinstance(trajectories, list) or isinstance(trajectories, tuple)
assert isinstance(trajectories[0], list)
assert isinstance(trajectories[0][0], int)
# 1: Unify the size of all trajectories....
max_l = 0
for traj in trajectories:
max_l = max(max_l, len(traj))
ntrajectories = []
for traj in trajectories:
while not len(traj) == max_l:
traj.append(None)
ntrajectories.append(traj)
# 2: Copy the content
length = torch.zeros(len(ntrajectories)).to(self._device).long()
tensors = []
for k in range(max_l):
idxs = [traj[k] for traj in ntrajectories]
nidxs = []
for _id in idxs:
if _id is None:
nidxs.append(0)
else:
nidxs.append(_id)
nidxs = torch.tensor(nidxs).to(self._device)
v = {k: self.buffers[k][nidxs] for k in self.buffers}
pis = self.position_in_slot[nidxs]
# Check that slots are full
if k < max_l - 1:
for i in range(len(pis)):
if not ntrajectories[i][k + 1] is None:
assert pis[i] == self.s_slots
for i in range(len(pis)):
if not ntrajectories[i][k] is None:
length[i] = length[i] + pis[i]
tensors.append(v)
ftrajectories = {
k: torch.cat([t[k] for t in tensors], dim=1) for k in self.buffers
}
if erase:
for k in trajectories:
for kk in k:
if not kk is None:
self.set_free_slots(kk)
return TemporalDictTensor(ftrajectories, length).shorten()
| 33.209091 | 107 | 0.564057 |
f74c2f818b29270ec86cc6a228ac4b98548b96d6 | 15,213 | py | Python | Homework 4/107062240_hw4_train.py | ChristianLin0420/DeepRL | 143a9bfebd264229d9d26fcdc070065225774e04 | [
"MIT"
] | null | null | null | Homework 4/107062240_hw4_train.py | ChristianLin0420/DeepRL | 143a9bfebd264229d9d26fcdc070065225774e04 | [
"MIT"
] | null | null | null | Homework 4/107062240_hw4_train.py | ChristianLin0420/DeepRL | 143a9bfebd264229d9d26fcdc070065225774e04 | [
"MIT"
] | null | null | null | from osim.env import L2M2019Env
'''
TD3
'''
import copy
import numpy as np
import pandas as pd
import argparse
import os
import os.path
from os import path
import utils
from collections import Iterable
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Implementation of Twin Delayed Deep Deterministic Policy Gradients (TD3)
# Paper: https://arxiv.org/abs/1802.09477
def flatten_list(lis):
for item in lis:
if isinstance(item, Iterable) and not isinstance(item, str):
for x in flatten(item):
yield x
else:
yield item
def flatten(mydict):
new_dict = {}
for key, value in mydict.items():
if type(value) == list:
print("key: {}, value: {}".format(key, value))
new_dict[key] = flatten_list(value)
else:
new_dict[key] = value
return new_dict
def FF(ss):
state = flatten(ss)
# print(state)
new_state = []
for v in state.values():
# print(type(v))
if type(v) == dict:
temp = pd.json_normalize(v, sep = '_')
temp = list(temp.values)
for item in temp:
if type(item) == np.ndarray:
for val in item:
if type(val) == list:
for t in val:
new_state.append(float(t))
else:
new_state.append(float(val))
else:
new_state.append(item)
else:
# print("asdf")
for arr in v:
for a in arr:
for val in a:
new_state.append(float(val))
return new_state
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, action_dim)
self.max_action = max_action
def forward(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
return self.max_action * torch.tanh(self.l3(a))
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
# Q1 architecture
self.l1 = nn.Linear(state_dim + action_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, 1)
# Q2 architecture
self.l4 = nn.Linear(state_dim + action_dim, 256)
self.l5 = nn.Linear(256, 256)
self.l6 = nn.Linear(256, 1)
def forward(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
q2 = F.relu(self.l4(sa))
q2 = F.relu(self.l5(q2))
q2 = self.l6(q2)
return q1, q2
def Q1(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1
class TD3(object):
def __init__(
self,
state_dim,
action_dim,
max_action,
discount=0.99,
tau=0.005,
policy_noise=0.2,
noise_clip=0.5,
policy_freq=2
):
self.actor = Actor(state_dim, action_dim, max_action).to(device)
self.actor_target = copy.deepcopy(self.actor)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=3e-4)
self.critic = Critic(state_dim, action_dim).to(device)
self.critic_target = copy.deepcopy(self.critic)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=3e-4)
self.max_action = max_action
self.discount = discount
self.tau = tau
self.policy_noise = policy_noise
self.noise_clip = noise_clip
self.policy_freq = policy_freq
self.total_it = 0
def select_action(self, state):
new_state = FF(state)
new_state = np.array(new_state)
# print(new_state)
new_state = torch.FloatTensor(new_state.reshape(1, -1)).to(device)
return self.actor(new_state).cpu().data.numpy().flatten()
def train(self, replay_buffer, batch_size=256):
self.total_it += 1
# Sample replay buffer
state, action, next_state, reward, not_done = replay_buffer.sample(batch_size)
with torch.no_grad():
# Select action according to policy and add clipped noise
noise = (
torch.randn_like(action) * self.policy_noise
).clamp(-self.noise_clip, self.noise_clip)
next_action = (
self.actor_target(next_state) + noise
).clamp(-self.max_action, self.max_action)
# Compute the target Q value
target_Q1, target_Q2 = self.critic_target(next_state, next_action)
target_Q = torch.min(target_Q1, target_Q2)
target_Q = reward + not_done * self.discount * target_Q
# Get current Q estimates
current_Q1, current_Q2 = self.critic(state, action)
# Compute critic loss
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# Delayed policy updates
if self.total_it % self.policy_freq == 0:
# Compute actor losse
actor_loss = -self.critic.Q1(state, self.actor(state)).mean()
# Optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# Update the frozen target models
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
def save(self, filename):
torch.save(self.critic.state_dict(), filename + "_critic")
torch.save(self.critic_optimizer.state_dict(), filename + "_critic_optimizer")
torch.save(self.actor.state_dict(), filename + "_actor")
torch.save(self.actor_optimizer.state_dict(), filename + "_actor_optimizer")
def load(self, filename):
self.critic.load_state_dict(torch.load(filename + "_critic"))
self.critic_optimizer.load_state_dict(torch.load(filename + "_critic_optimizer"))
self.critic_target = copy.deepcopy(self.critic)
self.actor.load_state_dict(torch.load(filename + "_actor"))
self.actor_optimizer.load_state_dict(torch.load(filename + "_actor_optimizer"))
self.actor_target = copy.deepcopy(self.actor)
def check_existed_files(self, filename):
print ("File exists:"+str(path.exists(filename + "_critic")))
print ("File exists:" + str(path.exists(filename + "_critic_optimizer")))
print ("File exists:"+str(path.exists(filename + "_actor")))
print ("File exists:" + str(path.exists(filename + "_actor_optimizer")))
'''
REPLAYBUFFER
'''
class ReplayBuffer(object):
def __init__(self, state_dim, action_dim, max_size=int(1e6)):
self.max_size = max_size
self.ptr = 0
self.size = 0
self.state = np.zeros((max_size, state_dim))
self.action = np.zeros((max_size, action_dim))
self.next_state = np.zeros((max_size, state_dim))
self.reward = np.zeros((max_size, 1))
self.not_done = np.zeros((max_size, 1))
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def add(self, state, action, next_state, reward, done):
new_state = FF(state)
new_next_state = FF(next_state)
self.state[self.ptr] = new_state
self.action[self.ptr] = action
self.next_state[self.ptr] = new_next_state
self.reward[self.ptr] = reward
self.not_done[self.ptr] = 1. - done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample(self, batch_size):
ind = np.random.randint(0, self.size, size=batch_size)
return (
torch.FloatTensor(self.state[ind]).to(self.device),
torch.FloatTensor(self.action[ind]).to(self.device),
torch.FloatTensor(self.next_state[ind]).to(self.device),
torch.FloatTensor(self.reward[ind]).to(self.device),
torch.FloatTensor(self.not_done[ind]).to(self.device)
)
'''
MAIN
'''
# Runs policy for X episodes and returns average reward
# A fixed seed is used for the eval environment
def eval_policy(policy, env_name, seed, eval_episodes=5):
eval_env = L2M2019Env(visualize=True)
eval_env.seed(seed + 100)
avg_reward = 0.
for _ in range(eval_episodes):
state, done = eval_env.reset(), False
while not done:
action = policy.select_action(state)
for i in range(len(action)):
if action[i] > 1:
action[i] = 1
elif action[i] < 0:
action[i] = 0
# print(action)
state, reward, done, _ = eval_env.step(action)
avg_reward += reward
avg_reward /= eval_episodes
print("---------------------------------------")
print(f"Evaluation over {eval_episodes} episodes: {avg_reward:.3f}")
print("---------------------------------------")
return avg_reward
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--policy", default="TD3") # Policy name (TD3, DDPG or OurDDPG)
parser.add_argument("--env", default="HalfCheetah-v2") # OpenAI gym environment name
parser.add_argument("--seed", default=0, type=int) # Sets Gym, PyTorch and Numpy seeds
parser.add_argument("--start_timesteps", default=10e2, type=int)# Time steps initial random policy is used
parser.add_argument("--eval_freq", default=1e4, type=int) # How often (time steps) we evaluate
parser.add_argument("--max_timesteps", default=5e7, type=int) # Max time steps to run environment
parser.add_argument("--expl_noise", default=0.1) # Std of Gaussian exploration noise
parser.add_argument("--batch_size", default=256, type=int) # Batch size for both actor and critic
parser.add_argument("--discount", default=0.99) # Discount factor
parser.add_argument("--tau", default=0.005) # Target network update rate
parser.add_argument("--policy_noise", default=0.2) # Noise added to target policy during critic update
parser.add_argument("--noise_clip", default=0.5) # Range to clip target policy noise
parser.add_argument("--policy_freq", default=2, type=int) # Frequency of delayed policy updates
parser.add_argument("--save_model", action="store_true") # Save model and optimizer parameters
parser.add_argument("--load_model", default="true") # Model load file name, "" doesn't load, "default" uses file_name
args = parser.parse_args()
file_name = f"{args.policy}_{args.env}_{args.seed}"
print("---------------------------------------")
print(f"Policy: {args.policy}, Env: {args.env}, Seed: {args.seed}")
print("---------------------------------------")
if not os.path.exists("./results"):
os.makedirs("./results")
if args.save_model and not os.path.exists("./models"):
os.makedirs("./models")
env = L2M2019Env(visualize=True)
# Set seeds
env.seed(args.seed)
# env.action_space.seed(seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])
kwargs = {
"state_dim": state_dim,
"action_dim": action_dim,
"max_action": max_action,
"discount": args.discount,
"tau": args.tau,
}
# Initialize policy
if args.policy == "TD3":
# Target policy smoothing is scaled wrt the action scale
kwargs["policy_noise"] = args.policy_noise * max_action
kwargs["noise_clip"] = args.noise_clip * max_action
kwargs["policy_freq"] = args.policy_freq
policy = TD3(**kwargs)
if args.load_model != "":
policy.check_existed_files("models/107062240_HW4_data")
policy_file = file_name if args.load_model == "default" else args.load_model
policy.load("models/107062240_HW4_data")
replay_buffer = ReplayBuffer(state_dim, action_dim)
# Evaluate untrained policy
evaluations = [eval_policy(policy, args.env, args.seed)]
state, done = env.reset(), False
episode_reward = 0
episode_timesteps = 0
episode_num = 0
for t in range(int(args.max_timesteps)):
episode_timesteps += 1
# Select action randomly or according to policy
if t < args.start_timesteps:
action = env.action_space.sample()
else:
action = (
policy.select_action(state)
+ np.random.normal(0, max_action * args.expl_noise, size=action_dim)
).clip(-max_action, max_action)
for i in range(len(action)):
if action[i] > 1:
action[i] = 1
elif action[i] < 0:
action[i] = 0
# print(action)
# Perform action
next_state, reward, done, _ = env.step(action)
done_bool = float(done) if episode_timesteps < 500 else 0
# Store data in replay buffer
replay_buffer.add(state, action, next_state, reward, done_bool)
state = next_state
episode_reward += reward
# Train agent after collecting sufficient data
if t >= args.start_timesteps:
# print("Start training ........ ")
policy.train(replay_buffer, args.batch_size)
if done:
# +1 to account for 0 indexing. +0 on ep_timesteps since it will increment +1 even if done=True
print(f"Total T: {t+1} Episode Num: {episode_num+1} Episode T: {episode_timesteps} Reward: {episode_reward:.3f}")
# Reset environment
state, done = env.reset(), False
episode_reward = 0
episode_timesteps = 0
episode_num += 1
# Evaluate episode
if (t + 1) % args.eval_freq == 0:
print("save new model!")
evaluations.append(eval_policy(policy, args.env, args.seed))
np.save(f"./results/{file_name}", evaluations)
policy.save("models/107062240_HW4_data")
| 34.186517 | 137 | 0.593243 |
f74c2fb39d5fcf882799632558ba265e2f829ff7 | 6,071 | py | Python | effector_database/views.py | LuukHenk/effector_database | d9702f89ffd2a23b4f5233d4ece69c1ceebd6164 | [
"MIT"
] | null | null | null | effector_database/views.py | LuukHenk/effector_database | d9702f89ffd2a23b4f5233d4ece69c1ceebd6164 | [
"MIT"
] | null | null | null | effector_database/views.py | LuukHenk/effector_database | d9702f89ffd2a23b4f5233d4ece69c1ceebd6164 | [
"MIT"
] | null | null | null | from django.contrib import messages
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from .models import Sequence
from .forms import SequenceForm
def index(request):
""" Redirect to the search page, since this is our homepage """
return HttpResponseRedirect(reverse("effector_database:search"))
def search(request):
"""
Show the ID, name, and signal peptide of all the items in the Sequence model.
Let the user filter items using a GET form.
Also shows an option to view a single sequence object (see view function)
"""
context = {
"effector_id": request.GET.get("effector_id", ""),
"effector_name": request.GET.get("effector_name", ""),
"effector_signal_peptide": request.GET.get("effector_signal_peptide", "")
}
context["query_set"] = Sequence.objects.filter(
effector_id__icontains=context["effector_id"],
effector_name__icontains=context["effector_name"],
effector_signal_peptide__icontains=context["effector_signal_peptide"]
)
return render(request, "effector_database/search.html", context)
def submit(request):
"""
Shows a submission form for adding new sequence to the Sequence model.
If submitted, the form will be directed to the submitted page.
"""
form = SequenceForm(label_suffix='')
return render(request, "effector_database/submit.html", {"form": form})
def submitted(request):
"""
Processing a sequence submission for the Sequence model.
Shows by Djano generated error messages on the same page if the form is incorrect.
Redirects to the search page with a success message if the form is correct.
"""
if request.method == "POST":
form = SequenceForm(request.POST, label_suffix='')
if form.is_valid():
form.save()
message = "Succesfully added '{}'".format(form.cleaned_data['effector_id'])
messages.success(request, message)
return HttpResponseRedirect(reverse("effector_database:search"))
else: # No post request
form = SequenceForm(label_suffix='')
return render(request, "effector_database/submit.html", {"form": form})
def view(request, effector_id):
"""
Show information about a single sequence from the Sequence model using it's primary key
'effector_id'. If this fails, return to the index page (search page).
"""
# Try to show the effector id
try:
context = {"sequence_obj": get_object_or_404(Sequence, pk=effector_id)}
return render(request, "effector_database/view.html", context)
# If an error occurs, return to the search page
except ObjectDoesNotExist:
error_message = "ERROR - Sequence with effector ID '{}' does not exists".format(effector_id)
messages.error(request, error_message)
return HttpResponseRedirect(reverse("effector_database:search"))
def delete(request, effector_id):
"""
Try to delete selected sequence from the Sequence model using it's primary key 'effector_id'.
If this fails, return to the index page (search page).
"""
# Try to delete the object
try:
Sequence.objects.get(pk=effector_id).delete()
message = "Succesfully deleted sequence with effector ID '{}'".format(effector_id)
messages.success(request, message)
# If an error occurs, return to the search page
except ObjectDoesNotExist:
error_message = "ERROR - Sequence with effector ID '{}' does not exists".format(effector_id)
messages.error(request, error_message)
return HttpResponseRedirect(reverse("effector_database:search"))
def update(request, effector_id):
"""
Shows update form for a single sequence from the Sequence model using it's primary key
'effector_id'. If the form is submitted, it will be directed to the 'updated' function.
If not, it will return to the index page (search page).
"""
# Create and render the effector id form.
# the effector_id is used when directed to another page
try:
context = {
"form": SequenceForm(instance=get_object_or_404(Sequence, pk=effector_id)),
"effector_id": effector_id,
}
return render(request, "effector_database/update.html", context)
# If an error occurs, return to the search page
except ObjectDoesNotExist:
error_message = "ERROR - Sequence with effector ID '{}' does not exists".format(effector_id)
messages.error(request, error_message)
return HttpResponseRedirect(reverse("effector_database:search"))
def updated(request, effector_id):
"""
Process content update of a single sequence from the Sequence model using it's primary key
'effector_id'. Unable to update the effector_id, since this breaks current url.
"""
# Generate standard context
context = {
"effector_id": effector_id,
"sequence_obj": get_object_or_404(Sequence, pk=effector_id),
}
context["form"] = SequenceForm(instance=context["sequence_obj"], label_suffix='')
# Check if new data has been submitted
if request.method == "POST":
form = SequenceForm(request.POST, instance=context["sequence_obj"], label_suffix='')
if form.has_changed():
# Don't let user change the effector ID
if "effector_id" in form.changed_data:
messages.error(request, "Only admin change the effector ID")
elif form.is_valid():
form.save()
message = "Succesfully edited effector ID '{}'".format(effector_id)
messages.success(request, message)
return render(request, "effector_database/view.html", context)
else: # No changes made
return render(request, "effector_database/view.html", context)
# Render standard context if nothing had been changed
return render(request, "effector_database/update.html", context)
| 40.205298 | 100 | 0.689837 |
f74c3b3c091e218276bfe1b1c8c61621dbd615e2 | 658 | py | Python | generate_dmrpp.py | amarouane-ABDELHAK/dmrpp-file-generator-docker | 85210b80d4204c5ca5325fe7167c7e1cd8ea62e8 | [
"Apache-2.0"
] | null | null | null | generate_dmrpp.py | amarouane-ABDELHAK/dmrpp-file-generator-docker | 85210b80d4204c5ca5325fe7167c7e1cd8ea62e8 | [
"Apache-2.0"
] | null | null | null | generate_dmrpp.py | amarouane-ABDELHAK/dmrpp-file-generator-docker | 85210b80d4204c5ca5325fe7167c7e1cd8ea62e8 | [
"Apache-2.0"
] | 1 | 2021-03-24T17:23:12.000Z | 2021-03-24T17:23:12.000Z | from os import listdir
from os.path import isfile, join, basename
from dmrpp_generator.main import DMRPPGenerator
from re import match
if __name__ == "__main__":
workstation_path = "/workstation/"
join_path = lambda x: join(workstation_path, x)
input_files = [join_path(f) for f in listdir(workstation_path) if isfile(join_path(f))]
dmrpp = DMRPPGenerator(input=input_files)
dmrpp.path = workstation_path
[dmrpp.dmrpp_generate(input_file, local=True) for input_file in input_files if match(f"{dmrpp.processing_regex}$",
basename(input_file))]
| 43.866667 | 118 | 0.659574 |
f74c507307dbf6c175e0651753194a29d1feb1f8 | 38,578 | py | Python | PhenNorm/src/Similar_finding_v2.py | DBMI/iSeeDELVE | 72b51b275bd8ae2e1587f4b6709f24307dd6560f | [
"BSD-4-Clause-UC"
] | null | null | null | PhenNorm/src/Similar_finding_v2.py | DBMI/iSeeDELVE | 72b51b275bd8ae2e1587f4b6709f24307dd6560f | [
"BSD-4-Clause-UC"
] | null | null | null | PhenNorm/src/Similar_finding_v2.py | DBMI/iSeeDELVE | 72b51b275bd8ae2e1587f4b6709f24307dd6560f | [
"BSD-4-Clause-UC"
] | null | null | null | # This script cluster similar variables
# Written by Son Doan, January 2014
# Version 2.0 March 2014
# RUN:
# Example
# python Similar_finding_v2.py -d 1 -o "LabTest" -i ../data/200test/500LabTests_random_KWL.txt2_cat
"""
RULE SETS:
1. Medical History
Type=Medical History
AND
SOI= {Study Subject, Participant, Patient}={C0030705, C0679646,
AND
Topic_CUI (Var1) = Topic_CUI (Var2), where topic_CUIs in {dsyn, neop, sosy, acab, anab, biof, cgab, inpo, orgf, patf, phsf, mobd}
2. Demographics
Type=Demographics
AND
SOI= {Study Subject, Participant, Patient}={C0030705, C0679646, C0681850
AND
Topic_CUI (Var1) = Topic_CUI (Var2)
3. Lab Test
Type= Lab Test
AND
SOI= {Study Subject, Participant, Patient}={C0030705, C0679646, C0681850
AND
Topic_CUI (Var1) = Topic_CUI (Var2), where topic_CUIs in {lbpr}
Remove every remaining topics
4. Medication
Type= Medication
AND
SOI= {Study Subject, Participant, Patient}={C0030705, C0679646, C0681850}
AND
(Topic_CUI (Var1) = Topi_CUI (Var2), where topic CUI in {phsu}
OR
Keyword (Var1)=Keyword (Var2) = "medication")
5. Smoking History
Type= Smoking History
AND
SOI= {Study Subject, Participant, Patient}={C0030705, C0679646, C0681850}
AND
Keyword (Var1) & Keyword (Var2) are in {smoke, smoking, smoker, tobacco, cigarette, pipe, cigar, nicotine}
6. Healthcare Activity Finding
Prerequisite
Type= Healthcare Activity Finding
SOI= {Study Subject, Participant, Patient}={C0030705, C0679646, C0681850}
Then compare keyword if has keyword in {medical care}
Keyword 1=Keyword 2
Then compare topic CUI in {hlca}
Var 1 CUI=Var2 CUI
7. Diagnostic Procedure
Prerequisite
Type= Diagnostic Procedure
SOI= {Study Subject, Participant, Patient}={C0030705, C0679646, C0681850}
Then compare keyword if has keyword in {ECG, electrocardiogram, t-wave, wave feature, QRS, RR interval, R wave, P wave, Q duration, S wave}
Keyword 1=Keyword 2
Then compare topic CUI in {diap}
Var 1 CUI=Var2 CUI
8. Therapeutic or Preventative Procedure
Prerequisite
Type= Therapeutic or Preventative Procedure
SOI= {Study Subject, Participant, Patient}={C0030705, C0679646, C0681850}
Then compare topic CUI in {topp}
Var 1 CUI=Var2 CUI
"""
import os,sys
import getopt
import re
# ==================================
# Global variables
# ==================================
Group = {}
Group['Medical History']=[]
Group['Demographics']=[]
Group['Lab Tests']=[]
Group['Medication']=[]
Group['Smoking History']=[]
Group['Mental or Emotional Finding']=[]
Group['Drinking History']=[]
Group['Diagnostic Procedure']=[]
Group['Research Attributes']=[]
Group['Clinical Attributes']=[]
Group['Eating or Nutritional Finding']=[]
Group['Healthcare Activity Finding']=[]
Group['Daily or Recreational Activity']=[]
Group['Self-care Status']=[]
Group['Therapeutic or Preventive Procedure']=[]
Group['Substance Use History']=[]
Group['Healthcare Encounter']=[]
def readinput2(input_file, DescIdx, Select):
Phen1 = {}
PhenGroup = {}
IDList = []
CheckDup = {}
#Select = 'All' # DEFAULT
#Default DescIdx = 0, indicating the original PhenDesc is in field #0
# DescIdx = 0
fin = open(input_file,'r')
for items in fin.readlines():
#item = items.split(':::')
item = items.split('\t')
# Add ID for each variable
ID = item[0].strip()
#print ID
if not ID in IDList:
IDList.append(ID)
CheckDup[ID]=0
else:
CheckDup[ID]=1
# Original PhenDesc
PhenDesc = item[DescIdx].strip()
# Normalized PhenDesc
PhenText = item[DescIdx+1].strip()
Theme = item[DescIdx + 2].strip()
ThemePCN = item[DescIdx + 3]
ThemeCUI = item[DescIdx + 4]
ThemeSem = item[DescIdx + 5]
TopicPCN = item[DescIdx + 6]
TopicCUI = item[DescIdx + 7]
TopicSem = item[DescIdx + 8]
#print PhenDesc
#print TopicCUI
#print TopicSem
TopicCUI1 = TopicCUI.split(';')
TopicSem1 = TopicSem.split(';')
TopicCUI2 = []
TopicSem2 = []
for item1 in TopicSem1:
if item1.find('lbpr')>=0:
TopicSem2.append(item1)
TopicCUI2.append(TopicCUI1[TopicSem1.index(item1)].strip())
SOIPCN = item[DescIdx + 9]
SOICUI = item[DescIdx + 10]
SOISem = item[DescIdx + 11]
#print SOICUI
#print SOISem
Category = item[-1].strip().split(';')
#print item[0]
#print Category
TopicCUIL = sorted(TopicCUI2)
SOI_Topic = SOICUI + ':' +'-'.join(TopicCUIL[0:])
if len(Category[0])>0:
for iCat in Category:
#print iCat
iCat = iCat.replace('Family','').strip()
iCat = iCat.replace('Patient','').strip()
if not Group.has_key(iCat):
Group[iCat] = [item]
else:
if not item in Group[iCat] and CheckDup[ID]==0:
Group[iCat].append(item)
fin.close()
#### GLOBAL VARIABLES ####
##################################
##### RULE FOR Medical History ###
##################################
"""
1. Medical History
Type=Medical History
AND
SOI= {Study Subject, Participant, Patient}={C0030705, C0679646, C0681850)
AND
Topic_CUI (Var1) = Topic_CUI (Var2), where topic_CUIs in {dsyn, neop, sosy, acab, anab, biof, cgab, inpo, orgf, patf, phsf, mobd, fndg}
"""
if Select == "MedHist" or Select=="All":
print "MEDICAL HISTORY CLUSTERS"
Group1 = Group['Medical History']
List1 = ['dsyn', 'neop', 'sosy', 'acab', 'anab', 'biof', 'cgab', 'inpo', 'orgf', 'patf', 'phsf', 'mobd', 'fndg']
cluster1(DescIdx,Group1,List1)
##################################
##### RULE FOR DEMOGRAPHICS ######
##################################
"""
Demographics
Type=Demographics
AND
SOI= {Study Subject, Participant, Patient}={C0030705, C0679646, C0681850}
AND
Topic_CUI (Var1) = Topic_CUI (Var2)
"""
if Select == "Demo" or Select=="All":
print "DEMOGRAPHICS CLUSTERS"
Group1 = Group['Demographics']
cluster(DescIdx,Group1)
##################################
##### RULE FOR LAB TESTS ######
##################################
"""
3. Lab Test
Type= Lab Test
AND
SOI= {Study Subject, Participant, Patient}={C0030705, C0679646, C0681850}
AND
Topic_CUI (Var1) = Topic_CUI (Var2), where topic_CUIs in {lbpr}
Remove every remaining topics
"""
if Select == "LabTest" or Select=="All":
print "LAB TESTS CLUSTERS"
Group1 = Group['Lab Tests']
List1 = ['lbpr']
cluster1(DescIdx,Group1,List1)
##################################
##### RULE FOR MEDICATIONS ######
##################################
"""
4. Medication
Type= Medication
AND
SOI= {Study Subject, Participant, Patient}={C0030705, C0679646, C0681850}
AND
(Topic_CUI (Var1) = Topi_CUI (Var2), where topic CUI in {phsu}
OR
Keyword (Var1)=Keyword (Var2) = "medication")
"""
if Select == "Med" or Select=="All":
print "MEDICATION CLUSTERS"
Group1 = Group['Medication']
List1 = ['phsu']
cluster1(DescIdx,Group1,List1)
##################################
##### RULE FOR Smoking History ###
##################################
"""
Prerequisite
Type= Smoking History
SOI= {Study Subject, Participant, Patient}={C0030705, C0679646, C0681850
Then
Compare keyword if has keyword in {start*, onset, first, 1st}
These variables are in the same cluster "Start"
OR
Compare keyword if has keyword in {stop*, last, quit*, end*, recency}
These variables are in the same cluster "Last use"
OR
Compare keyword if has keyword in {amount, AMT, pack, /day, per day, per week, /week, /wk, per month, /month, how many, how much, pack-year, chain smoke, # of, number of, NO of, regularly, daily, regular, how often, days, times, TMS, frequency, duration, how long, years, # years, once a,}
These variables are in the same cluster "Amount"
OR
Compare keyword if has keyword in {W/D, withdrawal}
These variables are in the same cluster "Withdrawal"
"""
if Select == "Smoking" or Select=="All":
print "SMOKING CLUSTERS"
Group1 = Group['Smoking History']
#print Group1
Keywords = ['start', 'onset', 'first', '1st']
cluster_synonyms(DescIdx,Group1,Keywords)
Keywords1 = ['stop', 'last', 'quit', 'end', 'recency']
cluster_synonyms(DescIdx,Group1,Keywords1)
Keywords2 = ['amount', 'amt', 'pack', '/day', 'per day', 'days', 'per week', '/week', '/wk', 'per month', '/month', 'how many', 'how much', 'pack-year', 'pack year', 'chain smoke', '# of', 'number of', 'no of', 'regularly', 'daily', 'regular', 'how often', 'days', 'times', 'tms', 'frequency', 'duration', 'how long', 'year', 'years', '# years', 'once a']
cluster_synonyms(DescIdx,Group1,Keywords2)
Keywords3 = ['w/d', 'withdrawal']
cluster_synonyms(DescIdx,Group1,Keywords3)
###############################################
###### RULE FOR Mental or Emotional Finding ###
##############################################
"""
6. Mental or Emotional Finding
Type= Mental or Emotional Finding
AND
SOI= {Study Subject, Participant, Patient}={C0030705, C0679646, C0681850}
AND
Topic_CUI (Var1) = Topic_CUI (Var2), where topic_CUIs in {menp}
"""
if Select == "MentalHealth" or Select=="All":
print "Mental or Emotional Finding CLUSTERS"
Group1 = Group['Mental or Emotional Finding']
List1 = ['menp']
ExCUIs = ['C1527305', 'C0013987', 'C0039869', 'C0596545', 'C0237607', 'C2911692']
cluster1_exclude_CUIs(DescIdx,Group1,List1,ExCUIs)
##############################################
##### RULE FOR Heathcare Activity Finding ####
##############################################
"""
Prerequisite
Type= Healthcare Activity Finding
SOI= {Study Subject, Participant, Patient}={C0030705, C0679646, C0681850
Then compare keyword if has keyword in {'medical care', 'hospital', 'appointment', 'follow up','f/u', 'follow-up','visit', 'encounter', 'service'}
Keyword 1=Keyword 2
Then compare topic CUI in {hlca}
Var 1 CUI=Var2 CUI
"""
if Select == "Activ" or Select=="All":
print "HEALTHCARE ACTIVITIES FINDING CLUSTERS"
Group1 = Group['Healthcare Activity Finding']
Keywords = ['medical care','hospital', 'appointment', 'follow up','f/u', 'follow-up','visit', 'encounter', 'service']
cluster_keywords(DescIdx,Group1,Keywords)
List1 = ['hlca']
cluster1(DescIdx,Group1,List1)
#############################################
##### RULE FOR Dignostic Procedure #######
#############################################
"""
Prerequisite
Type= Diagnostic Procedure
SOI= {Study Subject, Participant, Patient}={C0030705, C0679646, C0681850
Then compare keyword if has keyword in {ECG, electrocardiogram, t-wave, wave feature, QRS, RR interval, R wave, P wave, Q duration, S wave}
Keyword 1=Keyword 2
Then compare topic CUI in {diap}
Var 1 CUI=Var2 CUI
"""
if Select == "Diag" or Select=="All":
print "DIAGNOSTICS CLUSTERS"
Group1 = Group['Diagnostic Procedure']
Keywords = ['ecg', 'ekg' ,'electrocardiogram', 't-wave', 'wave feature', 'qrs', 'rr interval', 'r wave', 'p wave', 'q duration', 's wave', 'q wave', 't wave']
#cluster_keywords(DescIdx,Group1,Keywords)
cluster_synonyms(DescIdx,Group1,Keywords)
List1 = ['diap']
cluster1(DescIdx,Group1,List1)
##############################################################
##### RULE FOR Therapeutic or Preventative Procedure #######
##############################################################
"""
Prerequisite
Type= Therapeutic or Preventative Procedure
SOI= {Study Subject, Participant, Patient}={C0030705, C0679646, C0681850
Then compare topic CUI in {topp}
Var 1 CUI=Var2 CUI
"""
if Select == "Thera" or Select=="All":
print " Therapeutic or Preventative Procedure CLUSTERS"
Group1 = Group['Therapeutic or Preventive Procedure']
List1 = ['topp']
cluster1(DescIdx,Group1,List1)
##############################################
###### RULE FOR Drinking History #######
##############################################
"""
Prerequisite
Type= Drinking History
SOI= {Study Subject, Participant, Patient}={C0030705, C0679646, C0681850}
Then
Compare keyword if has keyword in {start*, onset, first, 1st}
These variables are in the same cluster "Start"
OR
Compare keyword if has keyword in {stop*, last, quit*, end*, recency}
These variables are in the same cluster "Last use"
OR
Compare keyword if has keyword in {amount, AMT, glass, bottle, can, /day, per day, per week, /week, /wk, per month, /month, how many, how much, # of, number of, NO of, regularly, daily, regular, how often, days, times, TMS, frequency, duration, how long, years, # years, once a, FFQ}
These variables are in the same cluster "Amount"
OR
Compare keyword if has keyword in {W/D, withdrawal}
These variables are in the same cluster " Withdrawal"
"""
if Select == "Drinking" or Select=="All":
print "DRINKING HISTORY CLUSTERS"
Group1 = Group['Drinking History']
Keywords = ['start', 'onset', 'first', '1st']
cluster_synonyms(DescIdx,Group1,Keywords)
Keywords1 = ['stop', 'last', 'quit', 'end', 'recency', 'stopped']
cluster_synonyms(DescIdx,Group1,Keywords1)
Keywords2 = ['amount', 'amt', 'glass', 'bottle', 'can', '/day', 'per day', 'per week', '/week', '/wk', 'per month', '/month', 'how many', 'how much', '# of', 'number of', 'no of', 'regularly', 'daily', 'regular', 'how often', 'days', 'times', 'tms', 'frequency', 'duration', 'how long', 'years', '# years', 'once a','ffq','ordinarily']
cluster_synonyms(DescIdx,Group1,Keywords2)
Keywords3 = ['w/d', 'withdrawal']
cluster_synonyms(DescIdx,Group1,Keywords3)
##############################################
###### RULE FOR Substance Use History #######
##############################################
"""
Prerequisite
Type= Substance Use History
SOI= {Study Subject, Participant, Patient}={C0030705, C0679646, C0681850
Then compare keyword if has keyword in {cocaine, opiate, stimulant, marijuana, pot, cannabis}
Keyword 1 = Keyword 2
Then compare topic CUI in {hops}
Var 1 CUI=Var2 CUI
"""
if Select == "Substance" or Select=="All":
print "Substance Use History CLUSTERS"
Group1 = Group['Substance Use History']
# Rule for group 1
Keywords = ['cocaine', 'opiate', 'opioid', 'heroin' , 'stimulant', 'marijuana', 'pot', 'cannabis']
cluster_keywords(DescIdx,Group1,Keywords)
Group2 = []
for item in Group1:
if not Match(Keywords,item[2]):
Group2.append(item)
List1 = ['hops']
cluster1(DescIdx,Group2,List1)
# Rule for group 2
Keywords = ['start', 'onset', 'first', '1st']
cluster_synonyms(DescIdx,Group1,Keywords)
Keywords1 = ['stop', 'last', 'quit', 'end', 'recency']
cluster_synonyms(DescIdx,Group1,Keywords1)
Keywords2 = ['amount', 'amt', '/day', 'per day', 'per week', '/week', '/wk', 'per month', '/month', 'how many', 'how much', '# of', 'number of', 'no of', 'regularly', 'daily', 'regular', 'how often', 'days', 'times', 'tms', 'frequency', 'duration', 'how long', 'years', '# years', 'once a']
cluster_synonyms(DescIdx,Group1,Keywords2)
Keywords3 = ['w/d', 'withdrawal']
cluster_synonyms(DescIdx,Group1,Keywords3)
########################################################
###### RULE FOR Daily or Recreational Activity #######
########################################################
"""
Prerequisite
Type= Daily or Recreational Activity
SOI= {Study Subject, Participant, Patient}={C0030705, C0679646, C0681850
Then compare keyword if has keyword in {gait, walking, exercise, sports, workout, gambling, sleep, toilet, chore, stand, eat out}
Keyword 1=Keyword 2
Then compare topic CUI in {dora}
Var 1 CUI=Var2 CUI
"""
if Select == "DailyActivity" or Select=="All":
print "Daily or Recreational Activity CLUSTERS"
Group1 = Group['Daily or Recreational Activity']
#Keywords = ['gait', 'walking', 'exercise', 'sports', 'workout', 'gambling', 'sleep','toilet', 'chore', 'stand', 'eat out']
#Keywords = ['walk', 'exercis', 'sport', 'workout', 'gambl', 'chore', 'eat out', 'gait', 'stand', 'sit', 'rest']
#Keywords = ['walk', 'exercis', 'sport', 'workout', 'gambl', 'chore', 'eat out', 'gait', 'stand', 'rest', 'sleep']
# Modified on July 14
Keywords = ['gait', 'walk', 'exercis', 'sport', 'workout', 'gambl', 'sleep','chore', 'stand', 'eat out', 'eatout']
cluster_keywords(DescIdx,Group1,Keywords)
Keywords2 = ['sit', 'sits', 'sitted','sat', 'sitten', 'sitting']
cluster_synonyms_exact(DescIdx,Group1,Keywords2)
Group2 = []
for item in Group1:
if not Match(Keywords,item[2]):
Group2.append(item)
List1 = ['dora']
cluster1(DescIdx,Group1,List1)
#cluster1(DescIdx,Group2,List1)
########################################################
###### RULE FOR Eating or Nutritional Finding #######
########################################################
"""
Prerequisite
Type= Eating or Nutritional Finding
SOI= {Study Subject, Participant, Patient}={C0030705, C0679646, C0681850
Then compare keyword if has keyword in {food, vitamin, nutrition, water}
Keyword 1=Keyword 2
Then compare topic CUI in {food}
Var 1 CUI=Var2 CUI
"""
if Select == "Eating" or Select=="All":
print "Eating or Nutritional Finding CLUSTERS"
Group1 = Group['Eating or Nutritional Finding']
Keywords = ['food', 'vitamin', 'nutrition', 'water']
cluster_keywords(DescIdx,Group1,Keywords)
List1 = ['food']
cluster1(DescIdx,Group1,List1)
########################################################
###### RULE FOR Self-Care Status #####
########################################################
"""
Prerequisite
Type= Self-care Status
SOI= {Study Subject, Participant, Patient}={C0030705, C0679646, C0681850}
Then
Compare keyword if has keywords in {bath, bathing}
These variables are in the same cluster
OR
Compare keyword if has keyword in {self-care, dress*, groom*, bathing, eating, toilet*, hygiene}
Keyword 1=Keyword 2
Note: Remove 'bathroom'
"""
if Select == "SelfCare" or Select=="All":
print "SelfCare CLUSTERS"
Group1 = Group['Self-care Status']
Keywords1 = ['bath','bathing']
cluster_synonyms_exact(DescIdx,Group1,Keywords1)
Keywords2 = ['eating', 'eat', 'eats', 'ate', 'eaten']
cluster_synonyms_exact(DescIdx,Group1,Keywords2)
Keywords = ['self-care', 'dress', 'groom', 'toilet', 'hygiene']
cluster_keywords(DescIdx,Group1,Keywords)
########################################################
###### RULE FOR Research Attribute #####
########################################################
"""
Prerequisite
Type=Research Attribute
SOI= {Study Subject, Participant, Patient}={C0030705, C0679646, C0681850}
Then
Compare keyword if has keyword in {control group, control status, case, case control }
These variables are in the same cluster
Then
Compare keyword if has keyword in {protocol}
Keyword 1=Keyword 2
Then else (for the remaining variable)
Compare Topic_CUI in {resa}
Var 1 CUI=Var 2 CUI
"""
if Select == "Research" or Select=="All":
print "Research Attribute CLUSTERS"
Group1 = Group['Research Attributes']
Keywords = ['control group', 'control status', 'case', 'case control']
cluster_synonyms(DescIdx,Group1,Keywords)
Keywords2 = ['protocol']
cluster_keywords(DescIdx,Group1,Keywords2)
WordList1 = ['control group', 'control status', 'case', 'case control', 'protocol']
Group2 = []
for item in Group1:
if not Match(WordList1,item[2]):
Group2.append(item)
List1 = ['resa']
cluster1(DescIdx,Group2,List1)
########################################################
###### RULE FOR Clinical Attribute ####
########################################################
"""
Prerequisite
Type= Clinical Attribute
SOI= {Study Subject, Participant, Patient}={C0030705, C0679646, C0681850}
Then
Compare keyword if has keyword in {heartbeat, heart rate, pulse, pulse deficit, pulse rate, vital sign}
These variables are in the same cluster
Then
Compare keyword if has keyword in {blood pressure, diastolic blood pressure, diastolic pressure, resting pressure, systolic blood pressure, systolic pressure, vital sign}
These variables are in the same cluster
Then
Compare keyword if has keyword in {body mass index, body weight, weight, body surface area, birth weight}
These variables are in the same cluster
Then
Compare keyword if has keyword in {body temperature, temperature, vital sign}
These variables are in the same cluster
Then
Compare keyword if has keyword in {pupil equality, pupil reactivity to light, pupil size}
These variables are in the same cluster
Then
Compare keyword if has keyword in {respiration, respiration rate, respiration depth}
These variables are in the same cluster
Then
Compare keyword if has keyword in {pulse oximetry, oxygen saturation}
These variables are in the same cluster
Then
Compare keyword if has keyword in {adiposity, basal metabolic rate, body fat distribution. chest circumference, diameter, head circumference, height, pain, perimeter, waist circumference, waist-hip ratio, gestational age}
Keyword 1=Keyword 2
"""
if Select == "Clinical" or Select=="All":
print "Clinical Attribute CLUSTERS"
Group1 = Group['Clinical Attributes']
Keywords = ['heartbeat', 'heart rate', 'pulse', 'pulse deficit', 'pulse rate', 'vital sign']
cluster_synonyms(DescIdx,Group1,Keywords)
Keywords2 = ['blood pressure', 'bp' ,'diastolic blood pressure', 'diastolic pressure', 'diastolic', 'resting pressure', 'systolic blood pressure', 'systolic pressure', 'systolic', 'vital sign']
cluster_synonyms(DescIdx,Group1,Keywords2)
Keywords3 = ['body mass index','bmi', 'body weight', 'weight', 'body surface area', 'birth weight']
cluster_synonyms(DescIdx,Group1,Keywords3)
Keywords4=['pupil equality', 'pupil reactivity to light', 'pupil size']
cluster_synonyms(DescIdx,Group1,Keywords4)
Keywords5=['respiration', 'respiration rate', 'respiration depth']
cluster_synonyms(DescIdx,Group1,Keywords5)
Keywords6=['pulse oximetry', 'oxygen saturation']
cluster_synonyms(DescIdx,Group1,Keywords6)
Keywords7=['adiposity', 'basal metabolic rate', 'body fat distribution', 'chest circumference', 'diameter', 'head circumference', 'height', 'pain', 'perimeter', 'waist circumference', 'waist-hip ratio', 'gestational age','visual acuity']
cluster_keywords(DescIdx,Group1,Keywords7)
########################################################
###### RULE FOR Healthcare Encounter #####
########################################################
"""
Healthcare Encounter
Prerequisite
Type= Healthcare Encounter
SOI= {Study Subject, Participant, Patient}={C0030705, C0679646, C0681850
Then
Compare keyword if has keyword in {ER visit, E.R. visit, emergency room, emergency department},
These variables are in same group "ER visit"
OR
Compare keyword if has keyword in {hospital*, rehabilitation}
Keyword 1=Keyword 2
"""
if Select == "Healthcare Encounter" or Select=="All":
print "Healthcare Encounter CLUSTERS"
Group1 = Group['Healthcare Encounter']
Keywords = ['er visit', 'e.r. visit', 'emergency room', 'emergency department']
cluster_synonyms(DescIdx,Group1,Keywords)
Keywords2 = ['rehabilitation','hospital']
cluster_keywords(DescIdx,Group1,Keywords2)
#Keywords3 = ['hospital']
#cluster_keywords(DescIdx,Group1,Keywords3)
"""
Check if Str1 contain a word in the wordlist
"""
def Match(WordList,Str1):
for item in WordList:
if Str1.lower().find(item.lower())>=0:
return 1
return 0
"""
Check if Str1 contain a word in the wordlist - tokenized by space
Cannot match first word since the case of "CIGS/day" where '/day' is a keyword matched
"""
def Match2(WordList,Str1):
Str1List = Str1.split()
for item1 in Str1List:
for item in WordList:
if item1.lower().find(item.lower())>=0:
return 1
return 0
"""
Check if Str1 contain a word in the wordlist - tokenized by space
"""
def MatchStrict(WordList,Str1):
#print "**********"
#print WordList
#print Str1
#print "========="
Str1List = Str1.lower().split()
Str1List2 = []
for xitem in Str1List:
# Tokenize item
if xitem.find('/')>=0:
item1 = xitem.split('/')
elif xitem.find('-')>=0:
item1 = xitem.split('-')
else:
item1 = xitem.split()
for item2 in item1:
item3 = item2.strip().strip('?').strip('!').strip(',').strip('.').strip(';').strip('-').strip(':')
Str1List2.append(item3)
#print Str1
#print Str1List2
for item in WordList:
if item in Str1List2:
return 1
return 0
"""
Cluster algorithms -- for other except Demographics: first compare STT, then compare CUI
"""
def cluster1(DescIdx,Group1,List1):
Cluster = {}
for i in range(0,len(Group1)):
for j in range(i+1,len(Group1)):
SOICUI = Group1[i][DescIdx + 10]
#if SOICUI.find('C0681850')>=0 or SOICUI.find('C0030705')>=0 or SOICUI.find('C0679646')>=0:
if 2>1:
"""
If the same STT - DescIdx + 8
"""
# =================================
# Firstly, check Semantic
# =================================
# Check the case of multiple categories
tempL1 = Group1[i][DescIdx + 8].strip().replace(',',';').split(';')
tempL2 = Group1[j][DescIdx + 8].strip().replace(',',';').split(';')
TopicSTT1 = set(tempL1)
TopicSTT2 = set(tempL2)
InterTopicSTT = list(TopicSTT1&TopicSTT2)
#print InterTopicSTT
JoinListL = list(set(InterTopicSTT)&set(List1))
JoinList = set(InterTopicSTT)&set(List1)
JoinListStr = ':'.join(list(set(InterTopicSTT)&set(List1))[0:])
#print JoinList
if len(JoinList)>0:
# =================================
# Secondly, check CUI
# =================================
##### CONVERT ITEM 1
TopicCUI1L = Group1[i][DescIdx + 7].strip().split(';')
TopicSTT1L = Group1[i][DescIdx + 8].strip().split(';')
temp1=[]
temp2=[]
for item in TopicCUI1L:
idx = TopicCUI1L.index(item)
#print JoinListL
#print TopicSTT1L[idx]
if checkItem(TopicSTT1L[idx],JoinListL):
temp1.append(item)
temp2.append(JoinListL[0])
TopicCUI1L = temp1
TopicSTT1L = temp2
#print "SECOND PHASE"
#print TopicCUI1L
#print TopicSTT1L
##### CONVERT ITEM 2
TopicCUI2L = Group1[j][DescIdx + 7].strip().split(';')
TopicSTT2L = Group1[j][DescIdx + 8].strip().split(';')
temp1=[]
temp2=[]
for item in TopicCUI2L:
idx = TopicCUI2L.index(item)
if checkItem(TopicSTT2L[idx],JoinListL):
temp1.append(item)
temp2.append(JoinListL[0])
TopicCUI2L = temp1
TopicSTT2L = temp2
#print TopicCUI2L
#print TopicSTT2L
### DO JOINING
InterTopicCUI = list(set(TopicCUI1L)&set(TopicCUI2L))
InterTopicCUI1 =':'.join(InterTopicCUI[0:])
#print InterTopicCUI
if len(InterTopicCUI)>0:
if not Cluster.has_key(InterTopicCUI1):
Cluster[InterTopicCUI1]=[Group1[i],Group1[j]]
else:
if not Group1[i] in Cluster[InterTopicCUI1]:
Cluster[InterTopicCUI1].append(Group1[i])
if not Group1[j] in Cluster[InterTopicCUI1]:
Cluster[InterTopicCUI1].append(Group1[j])
for key in Cluster.keys():
print key
for item in Cluster[key]:
#print item[0] + '\t' + item[DescIdx] + '\t' + item[DescIdx + 7] + '\t' + item[DescIdx + 8]
print '\t' + item[0] + '\t' + item[DescIdx] + '\t' + item[DescIdx + 7] + '\t' + item[DescIdx + 8]
print "==============="
"""
Cluster algorithms -- for other except Demographics: first compare STT, then compare CUI
Exclude several CUIs
1 cluster is 1 CUIs, remove combination
"""
def cluster1_exclude_CUIs(DescIdx,Group1,List1,ExCUIs):
Cluster = {}
for i in range(0,len(Group1)):
for j in range(i+1,len(Group1)):
SOICUI = Group1[i][DescIdx + 10]
#if SOICUI.find('C0681850')>=0 or SOICUI.find('C0030705')>=0 or SOICUI.find('C0679646')>=0:
if 2>1:
"""
If the same STT - DescIdx + 8
"""
# =================================
# Firstly, check Semantic
# =================================
# Check the case of multiple categories
tempL1 = Group1[i][DescIdx + 8].strip().replace(',',';').split(';')
tempL2 = Group1[j][DescIdx + 8].strip().replace(',',';').split(';')
TopicSTT1 = set(tempL1)
TopicSTT2 = set(tempL2)
InterTopicSTT = list(TopicSTT1&TopicSTT2)
#print InterTopicSTT
JoinListL = list(set(InterTopicSTT)&set(List1))
JoinList = set(InterTopicSTT)&set(List1)
JoinListStr = ':'.join(list(set(InterTopicSTT)&set(List1))[0:])
#print JoinList
if len(JoinList)>0:
# =================================
# Secondly, check CUI
# =================================
##### CONVERT ITEM 1
TopicCUI1L = Group1[i][DescIdx + 7].strip().split(';')
TopicSTT1L = Group1[i][DescIdx + 8].strip().split(';')
temp1=[]
temp2=[]
for item in TopicCUI1L:
idx = TopicCUI1L.index(item)
#print JoinListL
#print TopicSTT1L[idx]
if checkItem(TopicSTT1L[idx],JoinListL):
temp1.append(item)
temp2.append(JoinListL[0])
TopicCUI1L = temp1
TopicSTT1L = temp2
#print "SECOND PHASE"
#print TopicCUI1L
#print TopicSTT1L
##### CONVERT ITEM 2
TopicCUI2L = Group1[j][DescIdx + 7].strip().split(';')
TopicSTT2L = Group1[j][DescIdx + 8].strip().split(';')
temp1=[]
temp2=[]
for item in TopicCUI2L:
idx = TopicCUI2L.index(item)
if checkItem(TopicSTT2L[idx],JoinListL):
temp1.append(item)
temp2.append(JoinListL[0])
TopicCUI2L = temp1
TopicSTT2L = temp2
#print TopicCUI2L
#print TopicSTT2L
### DO JOINING
InterTopicCUI = list(set(TopicCUI1L)&set(TopicCUI2L))
InterTopicCUI1 =':'.join(InterTopicCUI[0:])
#print "************"
#print TopicCUI1L
#print TopicCUI2L
#print InterTopicCUI
#print "************"
if len(InterTopicCUI)>0:
for xtem in InterTopicCUI:
if not xtem in ExCUIs:
if not Cluster.has_key(xtem):
Cluster[xtem]=[Group1[i],Group1[j]]
else:
if not Group1[i] in Cluster[xtem]:
Cluster[xtem].append(Group1[i])
if not Group1[j] in Cluster[xtem]:
Cluster[xtem].append(Group1[j])
for key in Cluster.keys():
print key
for item in Cluster[key]:
#print item[0] + '\t' + item[DescIdx] + '\t' + item[DescIdx + 7] + '\t' + item[DescIdx + 8]
print '\t' + item[0] + '\t' + item[DescIdx] + '\t' + item[DescIdx + 7] + '\t' + item[DescIdx + 8]
print "==============="
"""
Check item in list or not
"""
def checkItem(Str1, List):
for item in List:
if Str1.find(item)>=0:
return 1
return 0
"""
Cluster algorithms -- for demographics
"""
def cluster(DescIdx,Group1):
##################################
##### RULE FOR DEMOGRAPHICS ######
##################################
"""
Demographics
Type=Demographics
AND
SOI= {Study Subject, Participant, Patient}={C0030705, C0679646, C0681850}
AND
Topic_CUI (Var1) = Topic_CUI (Var2)
"""
#Group1 = Group['Demographics']
Cluster = {}
for i in range(0,len(Group1)):
for j in range(i+1,len(Group1)):
SOICUI = Group1[i][DescIdx + 10]
if SOICUI.find('C0681850')>=0 or SOICUI.find('C0030705')>=0 or SOICUI.find('C0679646')>=0:
"""
If the same Theme - DescIdx + 3
"""
Theme1 = set(Group1[i][DescIdx + 3].strip().split(';'))
Theme2 = set(Group1[j][DescIdx + 3].strip().split(';'))
InterTheme = list(Theme1&Theme2)
InterTheme1 =':'.join(InterTheme[0:])
if len(InterTheme)>0:
if not Cluster.has_key(InterTheme1):
Cluster[InterTheme1]=[Group1[i],Group1[j]]
else:
if not Group1[i] in Cluster[InterTheme1]:
Cluster[InterTheme1].append(Group1[i])
if not Group1[j] in Cluster[InterTheme1]:
Cluster[InterTheme1].append(Group1[j])
for key in Cluster.keys():
print key
for item in Cluster[key]:
print '\t' + item[0] + '\t' + item[DescIdx] + '\t' + item[DescIdx + 3]
print "==============="
"""
Cluster algorithms -- for comparison of keywords instead
"""
def cluster_keywords(DescIdx,Group1,WordList):
Cluster = {}
for i in range(0,len(Group1)):
SOICUI = Group1[i][DescIdx + 10]
#if SOICUI.find('C0681850')>=0 or SOICUI.find('C0030705')>=0 or SOICUI.find('C0679646')>=0:
if 2>1:
"""
Check keywords
"""
Desc = Group1[i][DescIdx]
DescNorm = Group1[i][DescIdx + 1]
for item in WordList:
if DescNorm.find(item)>=0:
if not Cluster.has_key(item):
Cluster[item] = [Group1[i]]
else:
if not Group1[i] in Cluster[item]:
Cluster[item].append(Group1[i])
for key in Cluster.keys():
if len(Cluster[key])>1:
print key
for item in Cluster[key]:
#print "\t" + item[DescIdx]
print "\t" + item[0] + '\t' + item[DescIdx]
print "==============="
"""
Synonym cluster algorithms
"""
def cluster_synonyms(DescIdx,Group1,SynonymList):
Cluster = {}
KeySyn = SynonymList[0]
for i in range(0,len(Group1)):
SOICUI = Group1[i][DescIdx + 10]
#if SOICUI.find('C0681850')>=0 or SOICUI.find('C0030705')>=0 or SOICUI.find('C0679646')>=0:
# Change SOI check
if 2>1:
"""
Check synonyms
"""
Desc = Group1[i][DescIdx]
DescNorm = Group1[i][DescIdx + 1]
# Change back to DescNorm, change back on July 19 2014
DescNorm = DescNorm.lower()
# Deal with exception cases: "end*"
DescNorm1 = DescNorm.split()
EndList = ['end', 'ends', 'ending', 'ended','quit','quits','quitted','quitting','recency','stop','stops','stopped','stopping']
LastList = ['last','lasts','lasted', 'lasting']
# Changed in July 19, 2014
# For class "stop"
if 'stop' in SynonymList:
# CHECK ENDLIST first
if len(set(EndList).intersection(DescNorm1))>0:
KeySyn='stop'
if not Cluster.has_key(KeySyn):
Cluster[KeySyn] = [Group1[i]]
else:
if not Group1[i] in Cluster[KeySyn]:
Cluster[KeySyn].append(Group1[i])
# Deal with exception cases: "last year/hour/month etc"
# RULE:
# except last {year, week, hour, number +hr, number+days, number +weeks, number +years, exam, night, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday}
# Changed on July 19 2014
# If contain term "last"
elif len(set(LastList).intersection(DescNorm1))>0:
# case of last + time
DescNorm1 = DescNorm.split()
Lidx = DescNorm1.index('last')
Next = ''
if len(DescNorm1)>Lidx+2:
Next = DescNorm1[Lidx+2]
ExList = ['last year','last week','last hour','last exam', 'last night', 'last monday', 'last tuesday', 'last wednesday', 'last thursday', 'last friday', 'last saturday', 'last sunday']
ExList2 = ['hr','hours','days','weeks','years']
#print DescNorm
#print Next
#print Match(ExList, DescNorm)
#print Match(ExList2, Next)
# RULE: Exclude "last" + {years,month, number + days}
# Ex. IF NOT "last year" AND NOT "last 30 days"
if Match(ExList, DescNorm)==0 and Match(ExList2, Next)==0 :
KeySyn='stop'
if not Cluster.has_key(KeySyn):
Cluster[KeySyn] = [Group1[i]]
else:
if not Group1[i] in Cluster[KeySyn]:
Cluster[KeySyn].append(Group1[i])
# For the remaining cases -- not "stop"
else:
KeySyn = SynonymList[0]
ItemList = SynonymList
DescNorm1 = DescNorm.split()
#print DescNorm1
#print ItemList
#print set(ItemList).intersection(DescNorm1)
for item in SynonymList:
if DescNorm.find(item)>=0:
#if len(set(ItemList).intersection(DescNorm1))>0:
if not Cluster.has_key(KeySyn):
Cluster[KeySyn] = [Group1[i]]
else:
if not Group1[i] in Cluster[KeySyn]:
#print Group1[i]
Cluster[KeySyn].append(Group1[i])
#print Cluster
for key in Cluster.keys():
if len(Cluster[key])>1:
print key
for item in Cluster[key]:
#print "\t" + item[DescIdx]
print "\t" + item[0] + '\t' + item[DescIdx]
print "==============="
"""
Synonym cluster algorithms -- cluster with exact keyword matching
"""
def cluster_synonyms_exact(DescIdx,Group1,SynonymList):
Cluster = {}
KeySyn = SynonymList[0]
for i in range(0,len(Group1)):
SOICUI = Group1[i][DescIdx + 10]
#if SOICUI.find('C0681850')>=0 or SOICUI.find('C0030705')>=0 or SOICUI.find('C0679646')>=0:
if 2>1:
"""
Check synonyms
"""
Desc = Group1[i][DescIdx]
DescNorm = Group1[i][DescIdx + 1]
## Match only with Desc, not DescNorm
#DescNorm = Desc.lower()
# Change back to DescNorm - noted on July 19, 2014
DescNorm = DescNorm.lower()
if MatchStrict(SynonymList, DescNorm):
if not Cluster.has_key(KeySyn):
Cluster[KeySyn] = [Group1[i]]
else:
if not Group1[i] in Cluster[KeySyn]:
Cluster[KeySyn].append(Group1[i])
for key in Cluster.keys():
if len(Cluster[key])>1:
print key
for item in Cluster[key]:
#print "\t" + item[DescIdx]
print "\t" + item[0] + '\t' + item[DescIdx]
print "==============="
"""
Usage function
"""
def usage():
print """"python [prog] -i <file> -d <number> -t <text> -o <selection>
where input can be one of those:
-i : input file
-d : deliminator number, e.g., 0, 5, 6. Default is 0, means no meta-data
OR
-t : text input
-o : select which types to display
All - Display all
MedHist - Medical History
Demo - Demographics
LabTest - Lab Test
Med - Medication
DrHist - Drinking History
Smoking - Smoking History
Mental - Mental or Emotional Finding
Activ - Healthcare Activity Finding
Diag - Diagnostic Procedure
Thera - Theurapeutic or Preventive Procedure
MentalHealth - Mental or Emotional Finding
SelfCare - Self-Care Status
Research - Research Attribute
Clinical - Clinical Attribute
"""
def main():
try:
options,remainder = getopt.getopt(sys.argv[1:], 'i:d:t:o:hdv', ['input=','deliminator=','text=','option=','help','debug','version'])
except getopt.GetoptError:
usage()
sys.exit(2)
delim = 0 # Default vallue, mean original PhenDesc start from number 0
text_inp = ''
select = 'All'
for opt,arg in options:
if opt in ('-h', '--help'):
usage()
sys.exit()
elif opt in ('-i', '--input'):
Qinput = arg
elif opt in ('-d', '--deliminator'):
delim = int(arg)
elif opt in ('-t', '--text'):
text_inp = arg
elif opt in ('-o', '--option'):
select = arg
readinput2(Qinput,delim,select)
if __name__=="__main__":
main()
| 30.233542 | 357 | 0.618461 |
f74c615471184ac9ab3bf5511e84c837815443df | 930 | py | Python | odfdo/version.py | jdum/odfdo | 2494d0bed39f5a55974643206e9bafeed40f3a6b | [
"Apache-2.0"
] | 18 | 2018-04-19T08:30:48.000Z | 2022-02-14T11:00:27.000Z | odfdo/version.py | jdum/odfdo | 2494d0bed39f5a55974643206e9bafeed40f3a6b | [
"Apache-2.0"
] | 15 | 2018-04-22T00:52:41.000Z | 2021-07-05T10:16:38.000Z | odfdo/version.py | jdum/odfdo | 2494d0bed39f5a55974643206e9bafeed40f3a6b | [
"Apache-2.0"
] | 6 | 2018-04-22T00:14:12.000Z | 2021-12-06T01:42:07.000Z | # Copyright 2018-2020 Jérôme Dumonteil
# Copyright (c) 2009-2012 Ars Aperta, Itaapy, Pierlis, Talend.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Authors (odfdo project): jerome.dumonteil@gmail.com
# The odfdo project is a derivative work of the lpod-python project:
# https://github.com/lpod/lpod-python
# Authors: Jerome Dumonteil <jerome.dumonteil@itaapy.com>
"""Version number of the package
"""
__version__ = "3.3.1"
| 38.75 | 74 | 0.755914 |
f74c7da14d5d5d8737fb278ca6bf9d0741d533e0 | 4,235 | py | Python | mcarch/views/mods.py | Scotsguy/MCArchive | 89847bab722c6782fa53c7b11ee83f1f5b2d9f05 | [
"MIT"
] | null | null | null | mcarch/views/mods.py | Scotsguy/MCArchive | 89847bab722c6782fa53c7b11ee83f1f5b2d9f05 | [
"MIT"
] | null | null | null | mcarch/views/mods.py | Scotsguy/MCArchive | 89847bab722c6782fa53c7b11ee83f1f5b2d9f05 | [
"MIT"
] | null | null | null | from flask import Blueprint, render_template, jsonify, request, url_for, redirect, flash, \
abort, current_app as app
from mcarch.model.mod import Mod, ModAuthor, ModVersion, GameVersion
from mcarch.model.mod.draft import DraftMod
from mcarch.model.mod.logs import LogMod, gen_diffs
from mcarch.model.user import roles
from mcarch.login import login_required, cur_user, insecure_cur_user
from mcarch.jsonschema import ModSchema, ModAuthorSchema, GameVersionSchema
from mcarch.util.minecraft import key_mc_version
from mcarch.app import db
modbp = Blueprint('mods', __name__, template_folder="templates")
@modbp.route("/mods")
def browse():
requested_drafts = request.args.get('drafts', type=bool)
drafts_only = False
if requested_drafts:
user = cur_user()
if user:
drafts_only = user.has_role(roles.archivist)
if drafts_only:
mod_query = DraftMod.query
else:
mod_query = Mod.query
by_author = request.args.get('author')
by_gvsn = request.args.get('gvsn')
# list of filters to be listed on the page
filters = []
if by_author:
filters.append(('author', by_author))
mod_query = mod_query.join(ModAuthor, Mod.authors).filter(ModAuthor.name == by_author)
if by_gvsn:
filters.append(('gvsn', by_gvsn))
mod_query = mod_query.join(ModVersion) \
.join(GameVersion, ModVersion.game_vsns) \
.filter(GameVersion.name == by_gvsn)
if drafts_only:
filters.append(('drafts', True))
mods = mod_query.all()
return render_template("mods/browse.html", mods=mods, filters=filters, gvsn=by_gvsn)
@modbp.route("/mods/<slug>")
def mod_page(slug):
mod = Mod.query.filter_by(slug=slug).first_or_404()
vsns = mod.vsns_by_game_vsn()
by_gvsn = request.args.get('gvsn')
if by_gvsn:
vsns = { by_gvsn: vsns.get(by_gvsn) }
return render_template("mods/mod.html", mod=mod, vsns_grouped=vsns, by_gvsn=by_gvsn)
@modbp.route("/mods/<slug>.json")
def mod_page_json(slug):
mod = Mod.query.filter_by(slug=slug).first_or_404()
return jsonify(ModSchema().dump(mod))
@modbp.route("/authors")
def authors():
authors = ModAuthor.query.all()
return render_template('mods/authors.html', authors=authors)
@modbp.route("/authors.json")
def authors_json():
authors = ModAuthor.query.all()
return jsonify([{"id": a.id, "name": a.name} for a in authors])
@modbp.route("/gamevsns")
def gamevsns():
gamevsns = sorted(GameVersion.query.all(), key=lambda a: key_mc_version(a.name), reverse=True)
return render_template('mods/gamevsns.html', gamevsns=gamevsns)
@modbp.route("/gamevsns.json")
def gamevsns_json():
gamevsns = sorted(GameVersion.query.all(), key=lambda a: key_mc_version(a.name), reverse=True)
return jsonify([{"id": g.id, "name": g.name} for g in gamevsns])
@modbp.route("/mods/<slug>/history")
@login_required(role=roles.archivist)
def mod_history(slug):
mod = Mod.query.filter_by(slug=slug).first_or_404()
changes = gen_diffs(mod)
return render_template("mods/history.html", mod=mod, changes=changes)
@modbp.route("/mods/<slug>/history/<index>")
@login_required(role=roles.archivist)
def mod_revision(slug, index):
mod = Mod.query.filter_by(slug=slug).first_or_404()
rev = LogMod.query.filter_by(cur_id=mod.id, index=index).first_or_404()
vsns = rev.vsns_by_game_vsn()
return render_template("mods/mod.html", mod=rev, rev=rev, vsns_grouped=vsns)
@modbp.route("/mods/<slug>/history/<index>/revert", methods=['GET', 'POST'])
@login_required(role=roles.moderator, pass_user=True)
def revert_mod(user, slug, index):
mod = Mod.query.filter_by(slug=slug).first_or_404()
revto = LogMod.query.filter_by(index=index, cur_id=mod.id).first_or_404()
if request.method == 'POST':
mod.revert_to(revto)
db.session.commit()
mod.log_change(user=user)
db.session.commit()
flash('Mod reverted to revision {}'.format(index))
return redirect(url_for('mods.mod_page', slug=slug))
else:
diff = mod.diff(revto)
return render_template("mods/revert_confirm.html", mod=mod, revto=revto, diff=diff)
| 36.508621 | 98 | 0.691854 |
f74cab154d9a09a50918d0af2a9ef62841faa0c8 | 1,897 | py | Python | scripts/dataToParquet.py | AeRabelais/bdt_pipeline | 35d0cc3f7ada35e082c384d0755916605daa5feb | [
"MIT"
] | null | null | null | scripts/dataToParquet.py | AeRabelais/bdt_pipeline | 35d0cc3f7ada35e082c384d0755916605daa5feb | [
"MIT"
] | null | null | null | scripts/dataToParquet.py | AeRabelais/bdt_pipeline | 35d0cc3f7ada35e082c384d0755916605daa5feb | [
"MIT"
] | null | null | null | """
@Title: dataToParquet.py
@author: Ashia Lewis
GOAL: Create and update the parquet files for the air and soil data, separately.
"""
import os
import glob
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
#CODE TO BE USED FOR THE BATCH DATA
"""
#file directories for the air and soil files
air_dir = r"D:\sample_biodiversitree\data\export_data\air_data"
soil_dir = r"D:\sample_biodiversitree\scripts\data\export_data\soil_data"
#all_air_files = glob.glob(air_dir + '/**/*.csv', recursive=True)
all_soil_files = glob.glob(soil_dir + '/**/*.csv', recursive=True)
#air_data = pd.concat((pd.read_csv(f) for f in all_air_files ))
#air_data.to_parquet('air_data.parquet')
#need to look at soil's clean up job
soil_data = pd.concat((pd.read_csv(f) for f in all_soil_files ))
soil_data.to_parquet('soil_data.parquet')
"""
#CODE TO BE USED IN THE ACTUAL PIPELINE
# file directories for the air and soil files
air_dir = r"D:\sample_biodiversitree\data\export_data\air_data"
soil_dir = r"D:\sample_biodiversitree\data\export_data\soil_data"
#concatentate all of files' data
all_air_files = glob.glob(air_dir + '/**/*.csv', recursive=True)
all_soil_files = glob.glob(soil_dir + '/**/*.csv', recursive=True)
#put the data in a dataframe
air_data = pd.concat((pd.read_csv(f) for f in all_air_files))
soil_data = pd.concat((pd.read_csv(f) for f in all_soil_files))
#add data to existing parquet files
air_table = pa.Table.from_pandas(air_data)
soil_table = pa.Table.from_pandas(soil_data)
air_writer = pq.ParquetWriter('air_data.parquet', air_table.schema)
air_writer.write_table(table = air_table)
if air_writer:
air_writer.close()
soil_writer = pq.ParquetWriter('soil_data.parquet', soil_table.schema)
soil_writer.write_table(table = soil_table)
if soil_writer:
soil_writer.close()
| 27.1 | 81 | 0.727992 |
f74cee003c3c4f2391f62fefc98bc111aa961f18 | 29,998 | py | Python | synapseclient/utils.py | zimingd/synapsePythonClient | 6bddd4d9de2f06f6f79d8ce814ac57970d6bebee | [
"Apache-2.0"
] | null | null | null | synapseclient/utils.py | zimingd/synapsePythonClient | 6bddd4d9de2f06f6f79d8ce814ac57970d6bebee | [
"Apache-2.0"
] | null | null | null | synapseclient/utils.py | zimingd/synapsePythonClient | 6bddd4d9de2f06f6f79d8ce814ac57970d6bebee | [
"Apache-2.0"
] | null | null | null | """
*****************
Utility Functions
*****************
Utility functions useful in the implementation and testing of the Synapse client.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future.utils import implements_iterator
from builtins import str
import six
try:
from urllib.parse import urlparse
from urllib.parse import urlencode
from urllib.parse import parse_qs
from urllib.parse import urlunparse
from urllib.parse import ParseResult
from urllib.parse import urlsplit
except ImportError:
from urlparse import urlparse
from urllib import urlencode
from urlparse import parse_qs
from urlparse import urlunparse
from urlparse import ParseResult
from urlparse import urlsplit
try:
import urllib.request, urllib.error
except ImportError:
import urllib
import os, sys
import hashlib, re
import cgi
import errno
import inspect
import random
import requests
import collections
import tempfile
import platform
import functools
import threading
import uuid
import importlib
from datetime import datetime as Datetime
from datetime import date as Date
from datetime import timedelta
from numbers import Number
UNIX_EPOCH = Datetime(1970, 1, 1, 0, 0)
ISO_FORMAT = "%Y-%m-%dT%H:%M:%S.000Z"
ISO_FORMAT_MICROS = "%Y-%m-%dT%H:%M:%S.%fZ"
GB = 2**30
MB = 2**20
KB = 2**10
BUFFER_SIZE = 8*KB
def md5_for_file(filename, block_size=2*MB):
"""
Calculates the MD5 of the given file. See `source <http://stackoverflow.com/questions/1131220/get-md5-hash-of-a-files-without-open-it-in-python>`_.
:param filename: The file to read in
:param block_size: How much of the file to read in at once (bytes).
Defaults to 2 MB
:returns: The MD5
"""
md5 = hashlib.md5()
with open(filename,'rb') as f:
while True:
data = f.read(block_size)
if not data:
break
md5.update(data)
return(md5)
def download_file(url, localFilepath=None):
"""
Downloads a remote file.
:param localFilePath: May be None, in which case a temporary file is created
:returns: localFilePath
"""
f = None
try:
if localFilepath:
dir = os.path.dirname(localFilepath)
if not os.path.exists(dir):
os.makedirs(dir)
f = open(localFilepath, 'wb')
else:
f = tempfile.NamedTemporaryFile(delete=False)
localFilepath = f.name
r = requests.get(url, stream=True)
toBeTransferred = float(r.headers['content-length'])
for nChunks, chunk in enumerate(r.iter_content(chunk_size=1024*10)):
if chunk:
f.write(chunk)
printTransferProgress(nChunks*1024*10, toBeTransferred)
finally:
if f:
f.close()
printTransferProgress(toBeTransferred, toBeTransferred)
return localFilepath
def extract_filename(content_disposition_header, default_filename=None):
"""
Extract a filename from an HTTP content-disposition header field.
See `this memo <http://tools.ietf.org/html/rfc6266>`_
and `this package <http://pypi.python.org/pypi/rfc6266>`_
for cryptic details.
"""
if not content_disposition_header:
return default_filename
value, params = cgi.parse_header(content_disposition_header)
return params.get('filename', default_filename)
def extract_user_name(profile):
"""
Extract a displayable user name from a user's profile
"""
if 'userName' in profile and profile['userName']:
return profile['userName']
elif 'displayName' in profile and profile['displayName']:
return profile['displayName']
else:
if 'firstName' in profile and profile['firstName'] and 'lastName' in profile and profile['lastName']:
return profile['firstName'] + ' ' + profile['lastName']
elif 'lastName' in profile and profile['lastName']:
return profile['lastName']
elif 'firstName' in profile and profile['firstName']:
return profile['firstName']
else:
return str(profile.get('id', 'Unknown-user'))
def _get_from_members_items_or_properties(obj, key):
try:
if hasattr(obj, key):
return getattr(obj, key)
if hasattr(obj, 'properties') and key in obj.properties:
return obj.properties[key]
except (KeyError, TypeError, AttributeError): pass
try:
if key in obj:
return obj[key]
elif 'properties' in obj and key in obj['properties']:
return obj['properties'][key]
except (KeyError, TypeError): pass
return None
## TODO: what does this do on an unsaved Synapse Entity object?
def id_of(obj):
"""
Try to figure out the Synapse ID of the given object.
:param obj: May be a string, Entity object, or dictionary
:returns: The ID or throws an exception
"""
if isinstance(obj, six.string_types):
return str(obj)
if isinstance(obj, Number):
return str(obj)
id_attr_names = ['id', 'ownerId', 'tableId'] #possible attribute names for a synapse Id
for attribute_name in id_attr_names:
syn_id = _get_from_members_items_or_properties(obj, attribute_name)
if syn_id is not None:
return str(syn_id)
raise ValueError('Invalid parameters: couldn\'t find id of ' + str(obj))
def is_in_path(id, path):
"""Determines whether id is in the path as returned from /entity/{id}/path
:param id: synapse id string
:param path: object as returned from '/entity/{id}/path'
:returns: True or False
"""
return id in [item['id'] for item in path['path']]
def get_properties(entity):
"""Returns the dictionary of properties of the given Entity."""
return entity.properties if hasattr(entity, 'properties') else entity
def is_url(s):
"""Return True if the string appears to be a valid URL."""
if isinstance(s, six.string_types):
try:
url_parts = urlsplit(s)
## looks like a Windows drive letter?
if len(url_parts.scheme)==1 and url_parts.scheme.isalpha():
return False
if url_parts.scheme == 'file' and bool(url_parts.path):
return True
return bool(url_parts.scheme) and bool(url_parts.netloc)
except Exception as e:
return False
return False
def as_url(s):
"""Tries to convert the input into a proper URL."""
url_parts = urlsplit(s)
## Windows drive letter?
if len(url_parts.scheme)==1 and url_parts.scheme.isalpha():
return 'file:///%s' % str(s).replace("\\","/")
if url_parts.scheme:
return url_parts.geturl()
else:
return 'file://%s' % str(s)
def guess_file_name(string):
"""Tries to derive a filename from an arbitrary string."""
path = normalize_path(urlparse(string).path)
tokens = [x for x in path.split('/') if x != '']
if len(tokens) > 0:
return tokens[-1]
# Try scrubbing the path of illegal characters
if len(path) > 0:
path = re.sub(r"[^a-zA-Z0-9_.+() -]", "", path)
if len(path) > 0:
return path
raise ValueError("Could not derive a name from %s" % string)
def normalize_path(path):
"""Transforms a path into an absolute path with forward slashes only."""
if path is None:
return None
return re.sub(r'\\', '/', os.path.normcase(os.path.abspath(path)))
def equal_paths(path1, path2):
"""
Compare file paths in a platform neutral way
"""
return normalize_path(path1) == normalize_path(path2)
def file_url_to_path(url, verify_exists=False):
"""
Convert a file URL to a path, handling some odd cases around Windows paths.
:param url: a file URL
:param verify_exists: If true, return an populated dict only if the
resulting file path exists on the local file system.
:returns: a path or None if the URL is not a file URL.
"""
parts = urlsplit(url)
if parts.scheme=='file' or parts.scheme=='':
path = parts.path
## A windows file URL, for example file:///c:/WINDOWS/asdf.txt
## will get back a path of: /c:/WINDOWS/asdf.txt, which we need to fix by
## lopping off the leading slash character. Apparently, the Python developers
## think this is not a bug: http://bugs.python.org/issue7965
if re.match(r'\/[A-Za-z]:', path):
path = path[1:]
if os.path.exists(path) or not verify_exists:
return path
return None
def is_same_base_url(url1, url2):
"""Compares two urls to see if they are the same excluding up to the base path
:param url1: a URL
:param url2: a second URL
:returns: Boolean
"""
url1 = urlsplit(url1)
url2 = urlsplit(url2)
return (url1.scheme==url2.scheme and
url1.hostname==url2.hostname)
def is_synapse_id(obj):
"""If the input is a Synapse ID return it, otherwise return None"""
if isinstance(obj, six.string_types):
m = re.match(r'(syn\d+)', obj)
if m:
return m.group(1)
return None
def _is_date(dt):
"""Objects of class datetime.date and datetime.datetime will be recognized as dates"""
return isinstance(dt,Date) or isinstance(dt,Datetime)
def _to_list(value):
"""Convert the value (an iterable or a scalar value) to a list."""
if isinstance(value, collections.Iterable) and not isinstance(value, six.string_types):
return list(value)
else:
return [value]
def _to_iterable(value):
"""Convert the value (an iterable or a scalar value) to an iterable."""
if isinstance(value, six.string_types):
return (value,)
if isinstance(value, collections.Iterable):
return value
return (value,)
def make_bogus_data_file(n=100, seed=None):
"""
Makes a bogus data file for testing.
It is the caller's responsibility to clean up the file when finished.
:param n: How many random floating point numbers to be written into the file, separated by commas
:param seed: Random seed for the random numbers
:returns: The name of the file
"""
if seed is not None:
random.seed(seed)
data = [random.gauss(mu=0.0, sigma=1.0) for i in range(n)]
f = tempfile.NamedTemporaryFile(mode="w", suffix=".txt", delete=False)
try:
f.write(", ".join(str(n) for n in data))
f.write("\n")
finally:
f.close()
return normalize_path(f.name)
def make_bogus_binary_file(n=1*MB, filepath=None, printprogress=False):
"""
Makes a bogus binary data file for testing.
It is the caller's responsibility to clean up the file when finished.
:param n: How many bytes to write
:returns: The name of the file
"""
with open(filepath, 'wb') if filepath else tempfile.NamedTemporaryFile(mode='wb', suffix=".dat", delete=False) as f:
if not filepath:
filepath = f.name
progress = 0
remaining = n
while remaining > 0:
buff_size = int(min(remaining, 1*MB))
f.write(os.urandom(buff_size))
remaining -= buff_size
if printprogress:
progress += buff_size
printTransferProgress(progress, n, 'Generated ', filepath)
return normalize_path(filepath)
def to_unix_epoch_time(dt):
"""
Convert either `datetime.date or datetime.datetime objects
<http://docs.python.org/2/library/datetime.html>`_ to UNIX time.
"""
if type(dt) == Date:
return (dt - UNIX_EPOCH.date()).total_seconds() * 1000
return int((dt - UNIX_EPOCH).total_seconds() * 1000)
def to_unix_epoch_time_secs(dt):
"""
Convert either `datetime.date or datetime.datetime objects
<http://docs.python.org/2/library/datetime.html>`_ to UNIX time.
"""
if type(dt) == Date:
return (dt - UNIX_EPOCH.date()).total_seconds()
return (dt - UNIX_EPOCH).total_seconds()
def from_unix_epoch_time_secs(secs):
"""Returns a Datetime object given milliseconds since midnight Jan 1, 1970."""
if isinstance(secs, six.string_types):
secs = float(secs)
# utcfromtimestamp() fails for negative values (dates before 1970-1-1) on Windows
# so, here's a hack that enables ancient events, such as Chris's birthday to be
# converted from milliseconds since the UNIX epoch to higher level Datetime objects. Ha!
if platform.system()=='Windows' and secs < 0:
mirror_date = Datetime.utcfromtimestamp(abs(secs))
return (UNIX_EPOCH - (mirror_date-UNIX_EPOCH))
return Datetime.utcfromtimestamp(secs)
def from_unix_epoch_time(ms):
"""Returns a Datetime object given milliseconds since midnight Jan 1, 1970."""
if isinstance(ms, six.string_types):
ms = float(ms)
return from_unix_epoch_time_secs(ms/1000.0)
def datetime_to_iso(dt, sep="T"):
## Round microseconds to milliseconds (as expected by older clients)
## and add back the "Z" at the end.
## see: http://stackoverflow.com/questions/30266188/how-to-convert-date-string-to-iso8601-standard
fmt = "{time.year:04}-{time.month:02}-{time.day:02}{sep}{time.hour:02}:{time.minute:02}:{time.second:02}.{millisecond:03}{tz}"
if dt.microsecond >= 999500:
dt -= timedelta(microseconds=dt.microsecond)
dt += timedelta(seconds=1)
return fmt.format(time=dt, millisecond=int(round(dt.microsecond/1000.0)), tz="Z", sep=sep)
def iso_to_datetime(iso_time):
return Datetime.strptime(iso_time, ISO_FORMAT_MICROS)
def format_time_interval(seconds):
"""Format a time interval given in seconds to a readable value, e.g. \"5 minutes, 37 seconds\"."""
periods = (
('year', 60*60*24*365),
('month', 60*60*24*30),
('day', 60*60*24),
('hour', 60*60),
('minute', 60),
('second', 1),)
result=[]
for period_name,period_seconds in periods:
if seconds > period_seconds or period_name=='second':
period_value, seconds = divmod(seconds, period_seconds)
if period_value > 0 or period_name=='second':
if period_value == 1:
result.append("%d %s" % (period_value, period_name))
else:
result.append("%d %ss" % (period_value, period_name))
return ", ".join(result)
def _find_used(activity, predicate):
"""Finds a particular used resource in an activity that matches a predicate."""
for resource in activity['used']:
if predicate(resource):
return resource
return None
def itersubclasses(cls, _seen=None):
"""
http://code.activestate.com/recipes/576949/ (r3)
itersubclasses(cls)
Generator over all subclasses of a given class, in depth first order.
>>> list(itersubclasses(int)) == [bool]
True
>>> class A(object): pass
>>> class B(A): pass
>>> class C(A): pass
>>> class D(B,C): pass
>>> class E(D): pass
>>>
>>> for cls in itersubclasses(A):
... print(cls.__name__)
B
D
E
C
>>> # get ALL (new-style) classes currently defined
>>> [cls.__name__ for cls in itersubclasses(object)] #doctest: +ELLIPSIS
['type', ...'tuple', ...]
"""
if not isinstance(cls, type):
raise TypeError('itersubclasses must be called with '
'new-style classes, not %.100r' % cls)
if _seen is None: _seen = set()
try:
subs = cls.__subclasses__()
except TypeError: # fails only when cls is type
subs = cls.__subclasses__(cls)
for sub in subs:
if sub not in _seen:
_seen.add(sub)
yield sub
for sub in itersubclasses(sub, _seen):
yield sub
def normalize_whitespace(s):
"""
Strips the string and replace all whitespace sequences and other
non-printable characters with a single space.
"""
assert isinstance(s, six.string_types)
return re.sub(r'[\x00-\x20\s]+', ' ', s.strip())
def normalize_lines(s):
assert isinstance(s, six.string_types)
s2 = re.sub(r'[\t ]*\n[\t ]*', '\n', s.strip())
return re.sub(r'[\t ]+', ' ', s2)
def _synapse_error_msg(ex):
"""
Format a human readable error message
"""
if isinstance(ex, six.string_types):
return ex
return '\n' + ex.__class__.__name__ + ': ' + str(ex) + '\n\n'
def _limit_and_offset(uri, limit=None, offset=None):
"""
Set limit and/or offset query parameters of the given URI.
"""
parts = urlparse(uri)
query = parse_qs(parts.query)
if limit is None:
query.pop('limit', None)
else:
query['limit'] = limit
if offset is None:
query.pop('offset', None)
else:
query['offset'] = offset
## in Python 2, urllib expects encoded byte-strings
if six.PY2:
new_query = {}
for k,v in query.items():
if isinstance(v,list):
v = [unicode(element).encode('utf-8') for element in v]
elif isinstance(v,str):
v = unicode(v).encode('utf-8')
new_query[unicode(k).encode('utf-8')] = v
query = new_query
new_query_string = urlencode(query, doseq=True)
return urlunparse(ParseResult(
scheme=parts.scheme,
netloc=parts.netloc,
path=parts.path,
params=parts.params,
query=new_query_string,
fragment=parts.fragment))
def query_limit_and_offset(query, hard_limit=1000):
"""
Extract limit and offset from the end of a query string.
:returns: A triple containing the query with limit and offset removed, the
limit at most equal to the hard_limit, and the offset which
defaults to 1
"""
# Regex a lower-case string to simplify matching
tempQueryStr = query.lower()
regex = '\A(.*\s)(offset|limit)\s*(\d*\s*)\Z'
# Continue to strip off and save the last limit/offset
match = re.search(regex, tempQueryStr)
options = {}
while match is not None:
options[match.group(2)] = int(match.group(3))
tempQueryStr = match.group(1)
match = re.search(regex, tempQueryStr)
# Get a truncated version of the original query string (not in lower-case)
query = query[:len(tempQueryStr)].strip()
# Continue querying until the entire query has been fetched (or crash out)
limit = min(options.get('limit',hard_limit), hard_limit)
offset = options.get('offset',1)
return query, limit, offset
def _extract_synapse_id_from_query(query):
"""
An unfortunate hack to pull the synapse ID out of a table query of the
form "select column1, column2 from syn12345 where...." needed to build
URLs for table services.
"""
m = re.search(r"from\s+(syn\d+)", query, re.IGNORECASE)
if m:
return m.group(1)
else:
raise ValueError("Couldn't extract synapse ID from query: \"%s\"" % query)
#Derived from https://wiki.python.org/moin/PythonDecoratorLibrary#Memoize
def memoize(obj):
cache = obj._memoize_cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
refresh = kwargs.pop('refresh', False)
key = str(args) + str(kwargs)
if refresh or key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
def printTransferProgress(transferred, toBeTransferred, prefix = '', postfix='', isBytes=True, dt=None, previouslyTransferred = 0):
"""Prints a progress bar
:param transferred: a number of items/bytes completed
:param toBeTransferred: total number of items/bytes when completed
:param prefix: String printed before progress bar
:param prefix: String printed after progress bar
:param isBytes: A boolean indicating whether to convert bytes to kB, MB, GB etc.
:param dt: The time in seconds that has passed since transfer started is used to calculate rate.
:param previouslyTransferred: the number of bytes that were already transferred before this transfer began( e.g. someone ctrl+c'd out of an upload and restarted it later)
"""
if not sys.stdout.isatty():
return
barLength = 20 # Modify this to change the length of the progress bar
status = ''
rate = ''
if dt is not None and dt != 0:
rate = (transferred - previouslyTransferred)/float(dt)
rate = '(%s/s)' % humanizeBytes(rate) if isBytes else rate
if toBeTransferred<0:
defaultToBeTransferred = (barLength*1*MB)
if transferred > defaultToBeTransferred:
progress = float(transferred % defaultToBeTransferred) / defaultToBeTransferred
else:
progress = float(transferred) / defaultToBeTransferred
elif toBeTransferred==0: #There is nothing to be transferred
progress = 1
status = "Done...\n"
else:
progress = float(transferred) / toBeTransferred
if progress >= 1:
progress = 1
status = "Done...\n"
block = int(round(barLength*progress))
nbytes = humanizeBytes(transferred) if isBytes else transferred
if toBeTransferred>0:
outOf = "/%s" % (humanizeBytes(toBeTransferred) if isBytes else toBeTransferred)
percentage = "%4.2f%%"%(progress*100)
else:
outOf = ""
percentage = ""
text = "\r%s [%s]%s %s%s %s %s %s " % (prefix,
"#"*block + "-"*(barLength-block),
percentage,
nbytes, outOf, rate,
postfix, status)
sys.stdout.write(text)
sys.stdout.flush()
def humanizeBytes(bytes):
bytes = float(bytes)
units = ['bytes', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB']
for i, unit in enumerate(units):
if bytes<1024:
return '%3.1f%s' %(bytes, units[i])
else:
bytes /= 1024
return 'Oops larger than Exabytes'
def touch(path, times=None):
"""
Make sure a file exists. Update its access and modified times.
"""
basedir = os.path.dirname(path)
if not os.path.exists(basedir):
try:
os.makedirs(basedir)
except OSError as err:
## alternate processes might be creating these at the same time
if err.errno != errno.EEXIST:
raise
with open(path, 'a'):
os.utime(path, times)
return path
def _is_json(content_type):
"""detect if a content-type is JSON"""
## The value of Content-Type defined here:
## http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7
return content_type.lower().strip().startswith('application/json') if content_type else False
def find_data_file_handle(bundle):
"""Return the fileHandle whose ID matches the dataFileHandleId in an entity bundle"""
for fileHandle in bundle['fileHandles']:
if fileHandle['id'] == bundle['entity']['dataFileHandleId']:
return fileHandle
return None
def unique_filename(path):
"""Returns a unique path by appending (n) for some number n to the end of the filename."""
base, ext = os.path.splitext(path)
counter = 0
while os.path.exists(path):
counter += 1
path = base + ("(%d)" % counter) + ext
return path
@implements_iterator
class threadsafe_iter:
"""Takes an iterator/generator and makes it thread-safe by
serializing call to the `next` method of given iterator/generator.
See: http://anandology.com/blog/using-iterators-and-generators/
"""
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
def __iter__(self):
return self
def __next__(self):
with self.lock:
return next(self.it)
def threadsafe_generator(f):
"""A decorator that takes a generator function and makes it thread-safe.
See: http://anandology.com/blog/using-iterators-and-generators/
"""
def g(*a, **kw):
return threadsafe_iter(f(*a, **kw))
return g
def extract_prefix(keys):
"""
Takes a list of strings and extracts a common prefix delimited by a dot,
for example:
>>> extract_prefix(["entity.bang", "entity.bar", "entity.bat"])
entity.
"""
prefixes = set()
for key in keys:
parts = key.split(".")
if len(parts) > 1:
prefixes.add(parts[0])
else:
return ""
if len(prefixes) == 1:
return prefixes.pop() + "."
return ""
def temp_download_filename(destination, file_handle_id):
suffix = "synapse_download_" + (str(file_handle_id) \
if file_handle_id else \
str(uuid.uuid4()))
return os.path.join(destination, suffix) \
if os.path.isdir(destination) else \
destination + '.' + suffix
def _extract_zip_file_to_directory(zip_file, zip_entry_name, target_dir):
"""
Extracts a specified file in a zip to the specified directory
:param zip_file: an opened zip file. e.g. "with zipfile.ZipFile(zipfilepath) as zip_file:"
:param zip_entry_name: the name of the file to be extracted from the zip e.g. folderInsideZipIfAny/fileName.txt
:param target_dir: the directory to which the file will be extracted
:return: full path to the extracted file
"""
file_base_name = os.path.basename(zip_entry_name) # base name of the file
filepath = os.path.join(target_dir, file_base_name) # file path to the cached file to write
# Create the cache directory if it does not exist
if not os.path.exists(target_dir):
os.makedirs(target_dir)
# write the file from the zip into the cache
with open(filepath, 'wb') as cache_file:
cache_file.write(zip_file.read(zip_entry_name))
return filepath
def _is_integer(x):
try:
return float.is_integer(x)
except TypeError:
try:
int(x)
return True
except (ValueError, TypeError):
## anything that's not an integer, for example: empty string, None, 'NaN' or float('Nan')
return False
def topolgical_sort(graph):
"""Given a graph in the form of a dictionary returns a sorted list
Adapted from: http://blog.jupo.org/2012/04/06/topological-sorting-acyclic-directed-graphs/
:param graph: a dictionary with values containing lists of keys referencing back into the dictionary
:returns: sorted list of items
"""
graph_unsorted = graph.copy()
graph_sorted = []
# Convert the unsorted graph into a hash table. This gives us
# constant-time lookup for checking if edges are unresolved
# Run until the unsorted graph is empty.
while graph_unsorted:
# Go through each of the node/edges pairs in the unsorted
# graph. If a set of edges doesn't contain any nodes that
# haven't been resolved, that is, that are still in the
# unsorted graph, remove the pair from the unsorted graph,
# and append it to the sorted graph. Note here that by using
# using the items() method for iterating, a copy of the
# unsorted graph is used, allowing us to modify the unsorted
# graph as we move through it. We also keep a flag for
# checking that that graph is acyclic, which is true if any
# nodes are resolved during each pass through the graph. If
# not, we need to bail out as the graph therefore can't be
# sorted.
acyclic = False
for node, edges in list(graph_unsorted.items()):
for edge in edges:
if edge in graph_unsorted:
break
else:
acyclic = True
del graph_unsorted[node]
graph_sorted.append((node, edges))
if not acyclic:
# We've passed through all the unsorted nodes and
# weren't able to resolve any of them, which means there
# are nodes with cyclic edges that will never be resolved,
# so we bail out with an error.
raise RuntimeError("A cyclic dependency occurred. Some files in provenance reference each other circularly.")
return graph_sorted
def caller_module_name(current_frame):
"""
:param current_frame: use inspect.currentframe().
:return: the name of the module calling the function, foo(), in which this calling_module() is invoked. Ignores callers that belong in the same module as foo()
"""
current_frame_filename = current_frame.f_code.co_filename #filename in which foo() resides
#go back a frame takes us to the frame calling foo()
caller_frame = current_frame.f_back
caller_filename = caller_frame.f_code.co_filename
# find the first frame that does not have the same filename. this ensures that we don't consider functions within the same module as foo() that use foo() as a helper function
while(caller_filename == current_frame_filename):
caller_frame = caller_frame.f_back
caller_filename = caller_frame.f_code.co_filename
return inspect.getmodulename(caller_filename)
def attempt_import(module_name, fail_message):
try:
return importlib.import_module(module_name)
except ImportError:
sys.stderr.write(
(fail_message +
"To install this library on Mac or Linux distributions:\n"
" (sudo) pip install %s\n\n"
"On Windows, right click the Command Prompt(cmd.exe) and select 'Run as administrator' then:\n"
" pip install %s\n\n"
"\n\n\n" % (module_name, module_name)))
raise
| 32.92865 | 178 | 0.634709 |
f74cf35b76e4ab4dbb98854f098dd2d45783cde0 | 424 | py | Python | libs/interface.py | spidermila/mikrotik_control | 65bf4405be180cf5073cdb129aa345df4ddfac52 | [
"MIT"
] | null | null | null | libs/interface.py | spidermila/mikrotik_control | 65bf4405be180cf5073cdb129aa345df4ddfac52 | [
"MIT"
] | null | null | null | libs/interface.py | spidermila/mikrotik_control | 65bf4405be180cf5073cdb129aa345df4ddfac52 | [
"MIT"
] | 1 | 2022-03-20T02:57:03.000Z | 2022-03-20T02:57:03.000Z | class Interface:
def __init__(
self,
number: int,
name: str,
disabled: bool,
running: bool,
slave: bool,
dynamic: bool,
comment: str,
) -> None:
self.number = number
self.name = name
self.disabled = disabled
self.running = running
self.slave = slave
self.dynamic = dynamic
self.comment = comment
| 22.315789 | 32 | 0.514151 |
f74d01e15784d436a563807618590b57448c0f48 | 3,730 | py | Python | xos/core/dashboard/views/interactions.py | xmaruto/mcord | 3678a3d10c3703c2b73f396c293faebf0c82a4f4 | [
"Apache-2.0"
] | null | null | null | xos/core/dashboard/views/interactions.py | xmaruto/mcord | 3678a3d10c3703c2b73f396c293faebf0c82a4f4 | [
"Apache-2.0"
] | 5 | 2020-06-05T17:47:15.000Z | 2021-09-23T23:21:27.000Z | xos/core/dashboard/views/interactions.py | pan2za/xos | c2a4da2ccaa12360b2718be303b247866aefdfe6 | [
"Apache-2.0"
] | null | null | null | from view_common import *
class DashboardSliceInteractions(View):
def get(self, request, name="users", **kwargs):
colors = ["#005586", "#6ebe49", "orange", "#707170", "#00c4b3", "#077767", "dodgerblue", "#a79b94", "#c4e76a", "red"]
groups = []
matrix = []
slices = list(Slice.objects.all())
ids_by_slice = self.build_id_list(slices, name)
slices = [x for x in slices if (len(ids_by_slice[x])>0)]
for i,slice in enumerate(slices):
groups.append({"name": slice.name, "color": colors[i%len(colors)]})
row=self.buildMatrix(slice, slices, name, ids_by_slice)
matrix.append(row)
result = {"groups": groups, "matrix": matrix}
if name=="users":
result["title"] = "Slice interactions by user privilege"
result["objectName"] = "users"
elif name=="networks":
result["title"] = "Slice interactions by network membership"
result["objectName"] = "networks"
elif name=="sites":
result["title"] = "Slice interactions by site ownership"
result["objectName"] = "sites"
elif name=="instance_sites":
result["title"] = "Slice interactions by instance sites"
result["objectName"] = "sites"
elif name=="instance_nodes":
result["title"] = "Slice interactions by instance nodes"
result["objectName"] = "nodes"
return HttpResponse(json.dumps(result), content_type='application/javascript')
def build_id_list(self, slices, name):
ids_by_slice = {}
for slice in slices:
# build up a list of object ids that are used by each slice
ids_by_slice[slice] = self.getIds(slice, name)
return ids_by_slice
def buildMatrix(self, slice, slices, name, ids_by_slice):
not_only_my_ids = []
# build up a list of object ids that are used by other slices
for otherSlice in ids_by_slice.keys():
if (slice != otherSlice):
for id in ids_by_slice[otherSlice]:
if not id in not_only_my_ids:
not_only_my_ids.append(id)
# build up a list of ids that are used only by the slice, and not
# shared with any other slice
only_my_ids = []
for id in ids_by_slice[slice]:
if id not in not_only_my_ids:
only_my_ids.append(id)
row = []
for otherSlice in ids_by_slice.keys():
if (otherSlice == slice):
row.append(len(only_my_ids))
else:
row.append(self.inCommonIds(ids_by_slice[slice], ids_by_slice[otherSlice]))
return row
def getIds(self, slice, name):
ids=[]
if name=="users":
for sp in slice.slice_privileges.all():
if sp.user.id not in ids:
ids.append(sp.user.id)
elif name=="networks":
for sp in slice.networkslices.all():
if sp.network.id not in ids:
ids.append(sp.network.id)
elif name=="sites":
ids = [slice.site.id]
elif name=="instance_sites":
for sp in slice.instances.all():
if sp.node.site.id not in ids:
ids.append(sp.node.site.id)
elif name=="instance_nodes":
for sp in slice.instances.all():
if sp.node.id not in ids:
ids.append(sp.node.id)
return ids
def inCommonIds(self, ids1, ids2):
count = 0
for id in ids1:
if id in ids2:
count+=1
return count
| 35.865385 | 125 | 0.551743 |
f74d025fc102159173e84e6023fcc9058865349c | 268 | py | Python | utils/util.py | daili0015/ModelFeast | 0689ced4d0f37be438d3a91908e5e4cc5b7d54b8 | [
"MIT"
] | 247 | 2019-03-05T07:12:29.000Z | 2022-03-29T01:51:17.000Z | utils/util.py | jungerschwarz/ModelFeast | 03afca0b129532135910ee2ac72a3b85be795289 | [
"MIT"
] | 8 | 2019-05-21T03:05:27.000Z | 2021-12-09T03:22:51.000Z | utils/util.py | jungerschwarz/ModelFeast | 03afca0b129532135910ee2ac72a3b85be795289 | [
"MIT"
] | 47 | 2019-03-05T07:14:13.000Z | 2021-11-11T01:04:28.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
def ensure_dir(path):
if not os.path.exists(path):
os.makedirs(path)
def get_instance(module, name, config, *args):
return getattr(module, config[name]['type'])(*args, **config[name]['args'])
| 20.615385 | 79 | 0.63806 |
f74d2ccd36714d8c8feb1fa3fdbdedb0c184a37b | 429 | py | Python | hstphot/read_ds9region.py | bkornpob/hstphot | eafaebc382450061690c5795ce350336801d8037 | [
"MIT"
] | null | null | null | hstphot/read_ds9region.py | bkornpob/hstphot | eafaebc382450061690c5795ce350336801d8037 | [
"MIT"
] | null | null | null | hstphot/read_ds9region.py | bkornpob/hstphot | eafaebc382450061690c5795ce350336801d8037 | [
"MIT"
] | null | null | null | import numpy as np
def read_ds9region(ds9regfile):
"""
Assume ds9regfile in the format as ds9, and coordinate system as image
"""
out = {}
f = open(ds9regfile,'r')
for i,ii in enumerate(f.readlines()):
if i < 3:
continue
x,y,_ = np.array(ii.split('(')[1].split(')')[0].split(',')).astype(float)
z = ii.split('{')[1].split('}')[0]
out[z] = (x,y)
return out
| 26.8125 | 81 | 0.531469 |
f74d618ffd0980a092c540577b8989c31c36d997 | 20,645 | py | Python | zerver/lib/streams.py | networksneaker/zulip | fae365f8c7e2d6ee041024a22b6ba5a64cbffe4e | [
"Apache-2.0"
] | 6 | 2019-05-09T20:43:20.000Z | 2022-03-29T05:53:50.000Z | zerver/lib/streams.py | networksneaker/zulip | fae365f8c7e2d6ee041024a22b6ba5a64cbffe4e | [
"Apache-2.0"
] | 2 | 2016-10-18T04:01:56.000Z | 2016-10-20T18:19:09.000Z | zerver/lib/streams.py | networksneaker/zulip | fae365f8c7e2d6ee041024a22b6ba5a64cbffe4e | [
"Apache-2.0"
] | 7 | 2016-08-10T02:24:32.000Z | 2022-03-28T15:14:18.000Z | from typing import Any, Iterable, List, Mapping, Optional, Set, Tuple, Union
from django.conf import settings
from django.db.models.query import QuerySet
from django.utils.translation import ugettext as _
from zerver.lib.bugdown import convert as bugdown_convert
from zerver.lib.request import JsonableError
from zerver.models import (
DefaultStreamGroup,
Realm,
Recipient,
Stream,
Subscription,
UserProfile,
active_non_guest_user_ids,
bulk_get_streams,
get_realm_stream,
get_stream,
get_stream_by_id_in_realm,
is_cross_realm_bot_email,
)
from zerver.tornado.event_queue import send_event
def get_default_value_for_history_public_to_subscribers(
realm: Realm,
invite_only: bool,
history_public_to_subscribers: Optional[bool],
) -> bool:
if invite_only:
if history_public_to_subscribers is None:
# A private stream's history is non-public by default
history_public_to_subscribers = False
else:
# If we later decide to support public streams without
# history, we can remove this code path.
history_public_to_subscribers = True
if realm.is_zephyr_mirror_realm:
# In the Zephyr mirroring model, history is unconditionally
# not public to subscribers, even for public streams.
history_public_to_subscribers = False
return history_public_to_subscribers
def render_stream_description(text: str) -> str:
return bugdown_convert(text, no_previews=True)
def send_stream_creation_event(stream: Stream, user_ids: List[int]) -> None:
event = dict(type="stream", op="create",
streams=[stream.to_dict()])
send_event(stream.realm, event, user_ids)
def create_stream_if_needed(realm: Realm,
stream_name: str,
*,
invite_only: bool=False,
stream_post_policy: int=Stream.STREAM_POST_POLICY_EVERYONE,
history_public_to_subscribers: Optional[bool]=None,
stream_description: str="",
message_retention_days: Optional[int]=None) -> Tuple[Stream, bool]:
history_public_to_subscribers = get_default_value_for_history_public_to_subscribers(
realm, invite_only, history_public_to_subscribers)
(stream, created) = Stream.objects.get_or_create(
realm=realm,
name__iexact=stream_name,
defaults = dict(
name=stream_name,
description=stream_description,
invite_only=invite_only,
stream_post_policy=stream_post_policy,
history_public_to_subscribers=history_public_to_subscribers,
is_in_zephyr_realm=realm.is_zephyr_mirror_realm,
message_retention_days=message_retention_days,
),
)
if created:
recipient = Recipient.objects.create(type_id=stream.id, type=Recipient.STREAM)
stream.recipient = recipient
stream.rendered_description = render_stream_description(stream_description)
stream.save(update_fields=["recipient", "rendered_description"])
if stream.is_public():
send_stream_creation_event(stream, active_non_guest_user_ids(stream.realm_id))
else:
realm_admin_ids = [user.id for user in
stream.realm.get_admin_users_and_bots()]
send_stream_creation_event(stream, realm_admin_ids)
return stream, created
def create_streams_if_needed(realm: Realm,
stream_dicts: List[Mapping[str, Any]]) -> Tuple[List[Stream], List[Stream]]:
"""Note that stream_dict["name"] is assumed to already be stripped of
whitespace"""
added_streams: List[Stream] = []
existing_streams: List[Stream] = []
for stream_dict in stream_dicts:
stream, created = create_stream_if_needed(
realm,
stream_dict["name"],
invite_only=stream_dict.get("invite_only", False),
stream_post_policy=stream_dict.get("stream_post_policy", Stream.STREAM_POST_POLICY_EVERYONE),
history_public_to_subscribers=stream_dict.get("history_public_to_subscribers"),
stream_description=stream_dict.get("description", ""),
message_retention_days=stream_dict.get("message_retention_days", None)
)
if created:
added_streams.append(stream)
else:
existing_streams.append(stream)
return added_streams, existing_streams
def check_stream_name(stream_name: str) -> None:
if stream_name.strip() == "":
raise JsonableError(_("Invalid stream name '{}'").format(stream_name))
if len(stream_name) > Stream.MAX_NAME_LENGTH:
raise JsonableError(_("Stream name too long (limit: {} characters).").format(Stream.MAX_NAME_LENGTH))
for i in stream_name:
if ord(i) == 0:
raise JsonableError(_("Stream name '{}' contains NULL (0x00) characters.").format(stream_name))
def subscribed_to_stream(user_profile: UserProfile, stream_id: int) -> bool:
return Subscription.objects.filter(
user_profile=user_profile,
active=True,
recipient__type=Recipient.STREAM,
recipient__type_id=stream_id).exists()
def access_stream_for_send_message(sender: UserProfile,
stream: Stream,
forwarder_user_profile: Optional[UserProfile]) -> None:
# Our caller is responsible for making sure that `stream` actually
# matches the realm of the sender.
# Organization admins can send to any stream, irrespective of the stream_post_policy value.
if sender.is_realm_admin or is_cross_realm_bot_email(sender.delivery_email):
pass
elif sender.is_bot and (sender.bot_owner is not None and
sender.bot_owner.is_realm_admin):
pass
elif stream.stream_post_policy == Stream.STREAM_POST_POLICY_ADMINS:
raise JsonableError(_("Only organization administrators can send to this stream."))
elif stream.stream_post_policy == Stream.STREAM_POST_POLICY_RESTRICT_NEW_MEMBERS:
if sender.is_bot and (sender.bot_owner is not None and
sender.bot_owner.is_new_member):
raise JsonableError(_("New members cannot send to this stream."))
elif sender.is_new_member:
raise JsonableError(_("New members cannot send to this stream."))
if not (stream.invite_only or sender.is_guest):
# This is a public stream and sender is not a guest user
return
if subscribed_to_stream(sender, stream.id):
# It is private, but your are subscribed
return
if sender.is_api_super_user:
return
if (forwarder_user_profile is not None and forwarder_user_profile.is_api_super_user):
return
if sender.is_bot and (sender.bot_owner is not None and
subscribed_to_stream(sender.bot_owner, stream.id)):
# Bots can send to any stream their owner can.
return
if sender.delivery_email == settings.WELCOME_BOT:
# The welcome bot welcomes folks to the stream.
return
if sender.delivery_email == settings.NOTIFICATION_BOT:
return
# All other cases are an error.
raise JsonableError(_("Not authorized to send to stream '{}'").format(stream.name))
def check_for_exactly_one_stream_arg(stream_id: Optional[int], stream: Optional[str]) -> None:
if stream_id is None and stream is None:
raise JsonableError(_("Please supply 'stream'."))
if stream_id is not None and stream is not None:
raise JsonableError(_("Please choose one: 'stream' or 'stream_id'."))
def access_stream_for_delete_or_update(user_profile: UserProfile, stream_id: int) -> Stream:
# We should only ever use this for realm admins, who are allowed
# to delete or update all streams on their realm, even private streams
# to which they are not subscribed. We do an assert here, because
# all callers should have the require_realm_admin decorator.
assert(user_profile.is_realm_admin)
error = _("Invalid stream id")
try:
stream = Stream.objects.get(id=stream_id)
except Stream.DoesNotExist:
raise JsonableError(error)
if stream.realm_id != user_profile.realm_id:
raise JsonableError(error)
return stream
# Only set allow_realm_admin flag to True when you want to allow realm admin to
# access unsubscribed private stream content.
def access_stream_common(user_profile: UserProfile, stream: Stream,
error: str,
require_active: bool=True,
allow_realm_admin: bool=False) -> Tuple[Recipient, Optional[Subscription]]:
"""Common function for backend code where the target use attempts to
access the target stream, returning all the data fetched along the
way. If that user does not have permission to access that stream,
we throw an exception. A design goal is that the error message is
the same for streams you can't access and streams that don't exist."""
# First, we don't allow any access to streams in other realms.
if stream.realm_id != user_profile.realm_id:
raise JsonableError(error)
recipient = stream.recipient
try:
sub = Subscription.objects.get(user_profile=user_profile,
recipient=recipient,
active=require_active)
except Subscription.DoesNotExist:
sub = None
# If the stream is in your realm and public, you can access it.
if stream.is_public() and not user_profile.is_guest:
return (recipient, sub)
# Or if you are subscribed to the stream, you can access it.
if sub is not None:
return (recipient, sub)
# For some specific callers (e.g. getting list of subscribers,
# removing other users from a stream, and updating stream name and
# description), we allow realm admins to access stream even if
# they are not subscribed to a private stream.
if user_profile.is_realm_admin and allow_realm_admin:
return (recipient, sub)
# Otherwise it is a private stream and you're not on it, so throw
# an error.
raise JsonableError(error)
def access_stream_by_id(user_profile: UserProfile,
stream_id: int,
require_active: bool=True,
allow_realm_admin: bool=False) -> Tuple[Stream, Recipient, Optional[Subscription]]:
stream = get_stream_by_id(stream_id)
error = _("Invalid stream id")
(recipient, sub) = access_stream_common(user_profile, stream, error,
require_active=require_active,
allow_realm_admin=allow_realm_admin)
return (stream, recipient, sub)
def get_public_streams_queryset(realm: Realm) -> 'QuerySet[Stream]':
return Stream.objects.filter(realm=realm, invite_only=False,
history_public_to_subscribers=True)
def get_stream_by_id(stream_id: int) -> Stream:
error = _("Invalid stream id")
try:
stream = Stream.objects.get(id=stream_id)
except Stream.DoesNotExist:
raise JsonableError(error)
return stream
def check_stream_name_available(realm: Realm, name: str) -> None:
check_stream_name(name)
try:
get_stream(name, realm)
raise JsonableError(_("Stream name '{}' is already taken.").format(name))
except Stream.DoesNotExist:
pass
def access_stream_by_name(user_profile: UserProfile,
stream_name: str,
allow_realm_admin: bool=False) -> Tuple[Stream, Recipient, Optional[Subscription]]:
error = _("Invalid stream name '{}'").format(stream_name)
try:
stream = get_realm_stream(stream_name, user_profile.realm_id)
except Stream.DoesNotExist:
raise JsonableError(error)
(recipient, sub) = access_stream_common(user_profile, stream, error,
allow_realm_admin=allow_realm_admin)
return (stream, recipient, sub)
def access_stream_for_unmute_topic_by_name(user_profile: UserProfile,
stream_name: str,
error: str) -> Stream:
"""
It may seem a little silly to have this helper function for unmuting
topics, but it gets around a linter warning, and it helps to be able
to review all security-related stuff in one place.
Our policy for accessing streams when you unmute a topic is that you
don't necessarily need to have an active subscription or even "legal"
access to the stream. Instead, we just verify the stream_id has been
muted in the past (not here, but in the caller).
Long term, we'll probably have folks just pass us in the id of the
MutedTopic row to unmute topics.
"""
try:
stream = get_stream(stream_name, user_profile.realm)
except Stream.DoesNotExist:
raise JsonableError(error)
return stream
def access_stream_for_unmute_topic_by_id(user_profile: UserProfile,
stream_id: int,
error: str) -> Stream:
try:
stream = Stream.objects.get(id=stream_id, realm_id=user_profile.realm_id)
except Stream.DoesNotExist:
raise JsonableError(error)
return stream
def can_access_stream_history(user_profile: UserProfile, stream: Stream) -> bool:
"""Determine whether the provided user is allowed to access the
history of the target stream. The stream is specified by name.
This is used by the caller to determine whether this user can get
historical messages before they joined for a narrowing search.
Because of the way our search is currently structured,
we may be passed an invalid stream here. We return
False in that situation, and subsequent code will do
validation and raise the appropriate JsonableError.
Note that this function should only be used in contexts where
access_stream is being called elsewhere to confirm that the user
can actually see this stream.
"""
if stream.is_history_realm_public() and not user_profile.is_guest:
return True
if stream.is_history_public_to_subscribers():
# In this case, we check if the user is subscribed.
error = _("Invalid stream name '{}'").format(stream.name)
try:
(recipient, sub) = access_stream_common(user_profile, stream, error)
except JsonableError:
return False
return True
return False
def can_access_stream_history_by_name(user_profile: UserProfile, stream_name: str) -> bool:
try:
stream = get_stream(stream_name, user_profile.realm)
except Stream.DoesNotExist:
return False
return can_access_stream_history(user_profile, stream)
def can_access_stream_history_by_id(user_profile: UserProfile, stream_id: int) -> bool:
try:
stream = get_stream_by_id_in_realm(stream_id, user_profile.realm)
except Stream.DoesNotExist:
return False
return can_access_stream_history(user_profile, stream)
def filter_stream_authorization(user_profile: UserProfile,
streams: Iterable[Stream]) -> Tuple[List[Stream], List[Stream]]:
streams_subscribed: Set[int] = set()
recipient_ids = [stream.recipient_id for stream in streams]
subs = Subscription.objects.filter(user_profile=user_profile,
recipient_id__in=recipient_ids,
active=True)
for sub in subs:
streams_subscribed.add(sub.recipient.type_id)
unauthorized_streams: List[Stream] = []
for stream in streams:
# The user is authorized for their own streams
if stream.id in streams_subscribed:
continue
# Users are not authorized for invite_only streams, and guest
# users are not authorized for any streams
if stream.invite_only or user_profile.is_guest:
unauthorized_streams.append(stream)
authorized_streams = [stream for stream in streams if
stream.id not in {stream.id for stream in unauthorized_streams}]
return authorized_streams, unauthorized_streams
def list_to_streams(streams_raw: Iterable[Mapping[str, Any]],
user_profile: UserProfile,
autocreate: bool=False) -> Tuple[List[Stream], List[Stream]]:
"""Converts list of dicts to a list of Streams, validating input in the process
For each stream name, we validate it to ensure it meets our
requirements for a proper stream name using check_stream_name.
This function in autocreate mode should be atomic: either an exception will be raised
during a precheck, or all the streams specified will have been created if applicable.
@param streams_raw The list of stream dictionaries to process;
names should already be stripped of whitespace by the caller.
@param user_profile The user for whom we are retrieving the streams
@param autocreate Whether we should create streams if they don't already exist
"""
# Validate all streams, getting extant ones, then get-or-creating the rest.
stream_set = {stream_dict["name"] for stream_dict in streams_raw}
for stream_name in stream_set:
# Stream names should already have been stripped by the
# caller, but it makes sense to verify anyway.
assert stream_name == stream_name.strip()
check_stream_name(stream_name)
existing_streams: List[Stream] = []
missing_stream_dicts: List[Mapping[str, Any]] = []
existing_stream_map = bulk_get_streams(user_profile.realm, stream_set)
message_retention_days_not_none = False
for stream_dict in streams_raw:
stream_name = stream_dict["name"]
stream = existing_stream_map.get(stream_name.lower())
if stream is None:
if stream_dict.get('message_retention_days', None) is not None:
message_retention_days_not_none = True
missing_stream_dicts.append(stream_dict)
else:
existing_streams.append(stream)
if len(missing_stream_dicts) == 0:
# This is the happy path for callers who expected all of these
# streams to exist already.
created_streams: List[Stream] = []
else:
# autocreate=True path starts here
if not user_profile.can_create_streams():
raise JsonableError(_('User cannot create streams.'))
elif not autocreate:
raise JsonableError(_("Stream(s) ({}) do not exist").format(
", ".join(stream_dict["name"] for stream_dict in missing_stream_dicts),
))
elif message_retention_days_not_none:
if not user_profile.is_realm_owner:
raise JsonableError(_('User cannot create stream with this settings.'))
user_profile.realm.ensure_not_on_limited_plan()
# We already filtered out existing streams, so dup_streams
# will normally be an empty list below, but we protect against somebody
# else racing to create the same stream. (This is not an entirely
# paranoid approach, since often on Zulip two people will discuss
# creating a new stream, and both people eagerly do it.)
created_streams, dup_streams = create_streams_if_needed(realm=user_profile.realm,
stream_dicts=missing_stream_dicts)
existing_streams += dup_streams
return existing_streams, created_streams
def access_default_stream_group_by_id(realm: Realm, group_id: int) -> DefaultStreamGroup:
try:
return DefaultStreamGroup.objects.get(realm=realm, id=group_id)
except DefaultStreamGroup.DoesNotExist:
raise JsonableError(_("Default stream group with id '{}' does not exist.").format(group_id))
def get_stream_by_narrow_operand_access_unchecked(operand: Union[str, int], realm: Realm) -> Stream:
"""This is required over access_stream_* in certain cases where
we need the stream data only to prepare a response that user can access
and not send it out to unauthorized recipients.
"""
if isinstance(operand, str):
return get_stream(operand, realm)
return get_stream_by_id_in_realm(operand, realm)
| 42.83195 | 109 | 0.675902 |
f74d9307c18f268eb92583ff062d93033f2347d6 | 1,333 | py | Python | python_solutions/fizz_buzz/codeeval_fizzbuzz.py | joelstanner/codeeval | ef0591fabcad39d45f10287d7a1330f342ab96e0 | [
"MIT"
] | null | null | null | python_solutions/fizz_buzz/codeeval_fizzbuzz.py | joelstanner/codeeval | ef0591fabcad39d45f10287d7a1330f342ab96e0 | [
"MIT"
] | null | null | null | python_solutions/fizz_buzz/codeeval_fizzbuzz.py | joelstanner/codeeval | ef0591fabcad39d45f10287d7a1330f342ab96e0 | [
"MIT"
] | null | null | null | """Accept a text file and do the fizzbuzz upon it.
Your program should accept a file as its first argument.
The file contains multiple separated lines; each line contains
3 numbers that are space delimited. The first number is the first
divider (X), the second number is the second divider (Y), and the
third number is how far you should count (N). You may assume that
the input file is formatted correctly and the numbers are valid
positive integers.
"""
from __future__ import print_function
import sys
INPUT_FILE = sys.argv[1]
def parse_input(input_file):
values = []
with open(input_file, mode='r') as f:
for line in f:
#convert the strings to ints
nums = [int(s) for s in line.split()]
values.append(nums)
return values
def fizzbuzz(X, Y, N):
for val in range(1, N + 1):
if val % X == 0 and val % Y == 0:
print('FB', end='')
elif val % X == 0:
print('F', end='')
elif val % Y == 0:
print('B', end='')
else:
print(val, end='')
if val == N:
print()
elif val < N:
print(' ', end='')
def process():
values = parse_input(INPUT_FILE)
for val in values:
X, Y, Z = val[0], val[1], val[2]
fizzbuzz(X, Y, Z)
process()
| 23.385965 | 65 | 0.580645 |
f74dbc0edbbc975785b88e27d1296ba416992478 | 287 | py | Python | awswrangler/__metadata__.py | Kyruski/aws-data-wrangler | 2c240e398bef06d00a69ac690c23ffb50666df69 | [
"Apache-2.0"
] | null | null | null | awswrangler/__metadata__.py | Kyruski/aws-data-wrangler | 2c240e398bef06d00a69ac690c23ffb50666df69 | [
"Apache-2.0"
] | 56 | 2021-06-04T13:41:52.000Z | 2022-03-28T08:53:21.000Z | awswrangler/__metadata__.py | Kyruski/aws-data-wrangler | 2c240e398bef06d00a69ac690c23ffb50666df69 | [
"Apache-2.0"
] | 1 | 2022-02-06T02:04:32.000Z | 2022-02-06T02:04:32.000Z | """Metadata Module.
Source repository: https://github.com/awslabs/aws-data-wrangler
Documentation: https://aws-data-wrangler.readthedocs.io/
"""
__title__: str = "awswrangler"
__description__: str = "Pandas on AWS."
__version__: str = "2.12.1"
__license__: str = "Apache License 2.0"
| 23.916667 | 63 | 0.735192 |
f74dd43f7f25aee6abb1ef96f5623bb0c8999057 | 2,908 | py | Python | influxdb_client/domain/flux_response.py | kelseiv/influxdb-client-python | 9a0d2d659157cca96f6a04818fdeb215d699bdd7 | [
"MIT"
] | 1 | 2021-06-06T10:39:47.000Z | 2021-06-06T10:39:47.000Z | influxdb_client/domain/flux_response.py | kelseiv/influxdb-client-python | 9a0d2d659157cca96f6a04818fdeb215d699bdd7 | [
"MIT"
] | null | null | null | influxdb_client/domain/flux_response.py | kelseiv/influxdb-client-python | 9a0d2d659157cca96f6a04818fdeb215d699bdd7 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Influx API Service
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class FluxResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'flux': 'str'
}
attribute_map = {
'flux': 'flux'
}
def __init__(self, flux=None): # noqa: E501
"""FluxResponse - a model defined in OpenAPI""" # noqa: E501
self._flux = None
self.discriminator = None
if flux is not None:
self.flux = flux
@property
def flux(self):
"""Gets the flux of this FluxResponse. # noqa: E501
:return: The flux of this FluxResponse. # noqa: E501
:rtype: str
"""
return self._flux
@flux.setter
def flux(self, flux):
"""Sets the flux of this FluxResponse.
:param flux: The flux of this FluxResponse. # noqa: E501
:type: str
"""
self._flux = flux
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FluxResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.734513 | 124 | 0.547455 |
f74de0e388005ba39e43708024aba7ca7ac8c7ff | 10,202 | py | Python | lreid/models/LwFnet.py | TPCD/LifelongReID | cb33f9c29fe398e7546db345fab1c338dda8252f | [
"MIT"
] | 63 | 2021-03-20T15:33:11.000Z | 2022-03-30T03:04:14.000Z | lreid/models/LwFnet.py | TPCD/LifelongReID | cb33f9c29fe398e7546db345fab1c338dda8252f | [
"MIT"
] | 5 | 2021-03-23T08:04:21.000Z | 2022-03-10T02:28:43.000Z | lreid/models/LwFnet.py | TPCD/LifelongReID | cb33f9c29fe398e7546db345fab1c338dda8252f | [
"MIT"
] | 10 | 2021-04-30T11:14:10.000Z | 2022-03-18T16:44:55.000Z | import torch.nn as nn
import torchvision
import copy
import torch
import numpy as np
from .bnneck import BNClassifier, Classifier, Classifier_without_bias
from torch.autograd import Variable
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
nn.init.constant_(m.bias, 0.0)
elif classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif classname.find('BatchNorm') != -1:
if m.affine:
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0)
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.normal_(m.weight, std=0.001)
if m.bias:
nn.init.constant_(m.bias, 0.0)
class GlobalPoolFlat(nn.Module):
def __init__(self, pool_mode='avg'):
super(GlobalPoolFlat, self).__init__()
if pool_mode == 'avg':
self.pool = nn.AdaptiveAvgPool2d(1)
else:
self.pool = nn.AdaptiveMaxPool2d(1)
def forward(self, x):
x = self.pool(x)
if len(x.size()) == 4:
n, c = x.size(0), x.size(1)
else:
assert len(x.size()) == 4
flatted = x.view(n, -1)
assert flatted.size(1) == c
return flatted
class LwFNet(nn.Module):
def __init__(self, class_num_list, pretrained=True):
super(LwFNet, self).__init__()
self.class_num_list = class_num_list
# backbone and optimize its architecture
resnet = torchvision.models.resnet50(pretrained=pretrained)
resnet.layer4[0].conv2.stride = (1, 1)
resnet.layer4[0].downsample[0].stride = (1, 1)
self.backbone = nn.Sequential(
copy.deepcopy(resnet.conv1),
copy.deepcopy(resnet.bn1),
# copy.deepcopy(resnet.relu), # no relu
copy.deepcopy(resnet.maxpool),
copy.deepcopy(resnet.layer1),
copy.deepcopy(resnet.layer2),
copy.deepcopy(resnet.layer3[0])) # conv4_1
# cnn backbone
res_conv4 = nn.Sequential(*resnet.layer3[1:])
res_conv5 = resnet.layer4
self.feature_dim = resnet.fc.in_features
self.encoder_feature = nn.Sequential(copy.deepcopy(res_conv4),
copy.deepcopy(res_conv5),
GlobalPoolFlat(pool_mode='avg'),
)
del resnet
# classifier
self.classifier_dict = nn.ModuleDict()
for step, num in enumerate(self.class_num_list):
self.classifier_dict[f'step:{step}'] = BNClassifier(self.feature_dim, num)
def forward(self, x, current_step=0):
if isinstance(current_step, list):
feature_maps = self.backbone(x)
cls_score_list = []
features = self.encoder_feature(feature_maps)
for c_s in current_step:
bned_features, cls_score = self.classifier_dict[f'step:{c_s}'](features)
cls_score_list.append(cls_score)
if self.training:
# cls_score = torch.cat(cls_score_list, dim=1)
return features, cls_score_list, feature_maps
else:
return bned_features, feature_maps
else:
feature_maps = self.backbone(x)
features = self.encoder_feature(feature_maps)
bned_features, cls_score = self.classifier_dict[f'step:{current_step}'](features)
if self.training:
return features, cls_score, feature_maps
else:
return bned_features, feature_maps
def classify_latent_codes(self, latent_codes, current_step):
if isinstance(current_step, list):
cls_score_list = []
for c_s in current_step:
bned_features, cls_score = self.classifier_dict[f'step:{c_s}'](latent_codes)
cls_score_list.append(cls_score)
if self.training:
# cls_score = torch.cat(cls_score_list, dim=1)
return None, cls_score_list, None
else:
return bned_features, None
else:
bned_features, cls_score = self.classifier_dict[f'step:{current_step}'](latent_codes)
if self.training:
return None, cls_score, None
else:
return bned_features, None
class LwFNet_without_bn(nn.Module):
def __init__(self, class_num_list, pretrained=True):
super(LwFNet_without_bn, self).__init__()
self.class_num_list = class_num_list
# backbone and optimize its architecture
resnet = torchvision.models.resnet50(pretrained=pretrained)
resnet.layer4[0].conv2.stride = (1, 1)
resnet.layer4[0].downsample[0].stride = (1, 1)
self.backbone = nn.Sequential(
copy.deepcopy(resnet.conv1),
copy.deepcopy(resnet.bn1),
# copy.deepcopy(resnet.relu), # no relu
copy.deepcopy(resnet.maxpool),
copy.deepcopy(resnet.layer1),
copy.deepcopy(resnet.layer2),
copy.deepcopy(resnet.layer3[0])) # conv4_1
# cnn backbone
res_conv4 = nn.Sequential(*resnet.layer3[1:])
res_conv5 = resnet.layer4
feature_dim = resnet.fc.in_features
self.encoder_feature = nn.Sequential(copy.deepcopy(res_conv4),
copy.deepcopy(res_conv5),
GlobalPoolFlat(pool_mode='avg'),
)
del resnet
# classifier
self.classifier_dict = nn.ModuleDict()
for step, num in enumerate(self.class_num_list):
self.classifier_dict[f'step:{step}'] = Classifier(feature_dim, num)
def forward(self, x, current_step):
if isinstance(current_step, list):
feature_maps = self.backbone(x)
cls_score_list = []
features = self.encoder_feature(feature_maps)
for c_s in current_step:
bned_features, cls_score = self.classifier_dict[f'step:{c_s}'](features)
cls_score_list.append(cls_score)
if self.training:
# cls_score = torch.cat(cls_score_list, dim=1)
return features, cls_score_list, feature_maps
else:
return bned_features, feature_maps
else:
feature_maps = self.backbone(x)
features = self.encoder_feature(feature_maps)
bned_features, cls_score = self.classifier_dict[f'step:{current_step}'](features)
if self.training:
return features, cls_score, feature_maps
else:
return bned_features, feature_maps
def classify_featuremaps(self, featuremaps):
features = self.encoder_feature(featuremaps)
bned_features, cls_score = self.classifier(features)
return cls_score
def classify_latent_codes(self, latent_codes, current_step):
bned_features, cls_score = self.classifier_dict[f'step:{current_step}'](latent_codes)
return cls_score
class LwFNet_without_bn_bias(nn.Module):
def __init__(self, class_num_list, pretrained=True):
super(LwFNet_without_bn_bias, self).__init__()
self.class_num_list = class_num_list
# backbone and optimize its architecture
resnet = torchvision.models.resnet50(pretrained=pretrained)
resnet.layer4[0].conv2.stride = (1, 1)
resnet.layer4[0].downsample[0].stride = (1, 1)
self.backbone = nn.Sequential(
copy.deepcopy(resnet.conv1),
copy.deepcopy(resnet.bn1),
# copy.deepcopy(resnet.relu), # no relu
copy.deepcopy(resnet.maxpool),
copy.deepcopy(resnet.layer1),
copy.deepcopy(resnet.layer2),
copy.deepcopy(resnet.layer3[0])) # conv4_1
# cnn backbone
res_conv4 = nn.Sequential(*resnet.layer3[1:])
res_conv5 = resnet.layer4
feature_dim = resnet.fc.in_features
self.encoder_feature = nn.Sequential(copy.deepcopy(res_conv4),
copy.deepcopy(res_conv5),
GlobalPoolFlat(pool_mode='avg'),
)
del resnet
# classifier
self.classifier_dict = nn.ModuleDict()
for step, num in enumerate(self.class_num_list):
self.classifier_dict[f'step:{step}'] = Classifier_without_bias(feature_dim, num)
def forward(self, x, current_step):
if isinstance(current_step, list):
feature_maps = self.backbone(x)
cls_score_list = []
features = self.encoder_feature(feature_maps)
for c_s in current_step:
bned_features, cls_score = self.classifier_dict[f'step:{c_s}'](features)
cls_score_list.append(cls_score)
if self.training:
# cls_score = torch.cat(cls_score_list, dim=1)
return features, cls_score_list, feature_maps
else:
return bned_features, feature_maps
else:
feature_maps = self.backbone(x)
features = self.encoder_feature(feature_maps)
bned_features, cls_score = self.classifier_dict[f'step:{current_step}'](features)
if self.training:
return features, cls_score, feature_maps
else:
return bned_features, feature_maps
def classify_featuremaps(self, featuremaps):
features = self.encoder_feature(featuremaps)
bned_features, cls_score = self.classifier(features)
return cls_score
def classify_latent_codes(self, latent_codes, current_step):
bned_features, cls_score = self.classifier_dict[f'step:{current_step}'](latent_codes)
return cls_score
| 39.088123 | 97 | 0.596746 |
f74e0200adbfb470ed10ccb5f8fb109eef363145 | 2,430 | py | Python | posthog/test/test_team.py | thinhnguyenuit/posthog | 4758e66790485587d29a617174158d07341342f8 | [
"MIT"
] | 1 | 2021-04-09T09:13:23.000Z | 2021-04-09T09:13:23.000Z | posthog/test/test_team.py | thinhnguyenuit/posthog | 4758e66790485587d29a617174158d07341342f8 | [
"MIT"
] | 1 | 2021-10-13T10:05:26.000Z | 2021-10-13T10:05:26.000Z | posthog/test/test_team.py | thinhnguyenuit/posthog | 4758e66790485587d29a617174158d07341342f8 | [
"MIT"
] | 1 | 2021-09-16T18:18:07.000Z | 2021-09-16T18:18:07.000Z | import random
from unittest import mock
from django.conf import settings
from posthog.models import EventDefinition, Organization, PluginConfig, PropertyDefinition, Team, User
from posthog.plugins.test.mock import mocked_plugin_requests_get
from .base import BaseTest
class TestTeam(BaseTest):
def test_team_has_expected_defaults(self):
team: Team = Team.objects.create(name="New Team", organization=self.organization)
self.assertEqual(team.timezone, "UTC")
self.assertEqual(team.data_attributes, ["data-attr"])
def test_create_team_with_test_account_filters(self):
team = Team.objects.create_with_data(organization=self.organization)
self.assertEqual(
team.test_account_filters,
[
{"key": "email", "value": "@posthog.com", "operator": "not_icontains", "type": "person"},
{
"key": "$host",
"operator": "is_not",
"value": ["localhost:8000", "localhost:5000", "127.0.0.1:8000", "127.0.0.1:3000", "localhost:3000"],
},
],
)
# test generic emails
user = User.objects.create(email="test@gmail.com")
organization = Organization.objects.create()
organization.members.set([user])
team = Team.objects.create_with_data(organization=organization)
self.assertEqual(
team.test_account_filters,
[
{
"key": "$host",
"operator": "is_not",
"value": ["localhost:8000", "localhost:5000", "127.0.0.1:8000", "127.0.0.1:3000", "localhost:3000"],
},
],
)
@mock.patch("requests.get", side_effect=mocked_plugin_requests_get)
def test_preinstalled_are_autoenabled(self, mock_get):
with self.settings(
MULTI_TENANCY=False, PLUGINS_PREINSTALLED_URLS=["https://github.com/PostHog/helloworldplugin/"]
):
_, _, new_team = Organization.objects.bootstrap(
self.user, plugins_access_level=Organization.PluginsAccessLevel.INSTALL
)
self.assertEqual(PluginConfig.objects.filter(team=new_team, enabled=True).count(), 1)
self.assertEqual(PluginConfig.objects.filter(team=new_team, enabled=True).get().plugin.name, "helloworldplugin")
self.assertEqual(mock_get.call_count, 2)
| 40.5 | 120 | 0.620165 |
f74e08e4c8d1d1fe496413d1edcd2c47c6720d19 | 1,230 | py | Python | cvrf2csaf/section_handlers/document_publisher.py | sthagen/csaf-tools-CVRF-CSAF-Converter | bc458eb1a71cf12c5a7c45223b56ab233819bdbd | [
"Apache-2.0",
"MIT"
] | 9 | 2021-12-07T07:52:28.000Z | 2022-03-23T10:26:25.000Z | cvrf2csaf/section_handlers/document_publisher.py | sthagen/csaf-tools-CVRF-CSAF-Converter | bc458eb1a71cf12c5a7c45223b56ab233819bdbd | [
"Apache-2.0",
"MIT"
] | 71 | 2021-12-03T09:40:56.000Z | 2022-03-29T21:47:38.000Z | cvrf2csaf/section_handlers/document_publisher.py | sthagen/csaf-tools-CVRF-CSAF-Converter | bc458eb1a71cf12c5a7c45223b56ab233819bdbd | [
"Apache-2.0",
"MIT"
] | 3 | 2021-12-07T07:52:32.000Z | 2022-02-17T09:55:00.000Z | """ Module containing DocumentPublisher class """
from ..common.common import SectionHandler
# pylint: disable=too-few-public-methods
class DocumentPublisher(SectionHandler):
""" Responsible for converting the DocumentPublisher section:
- /cvrf:cvrfdoc/cvrf:DocumentPublisher
"""
type_category_mapping = {
'Vendor': 'vendor',
'Coordinator': 'coordinator',
'User': 'user',
'Discoverer': 'discoverer',
'Other': 'other',
}
def __init__(self, config):
super().__init__()
self.name = config.get('publisher_name')
self.namespace = config.get('publisher_namespace')
def _process_mandatory_elements(self, root_element):
self.csaf['name'] = self.name
self.csaf['namespace'] = self.namespace
self.csaf['category'] = self.type_category_mapping[root_element.attrib['Type']]
def _process_optional_elements(self, root_element):
# optional values
if hasattr(root_element, 'ContactDetails'):
self.csaf['contact_details'] = root_element.ContactDetails.text
if hasattr(root_element, 'IssuingAuthority'):
self.csaf['issuing_authority'] = root_element.IssuingAuthority.text
| 36.176471 | 87 | 0.669919 |
f74e413462485792cfeac1ed30e015ec7d596664 | 2,273 | py | Python | arjuna-samples/arjex/test/pkg/webui_adv/check_webuiadv_09_element_waiters.py | ChandraMouliDisturbs/arjuna | 4965622fbb01a5e5b6459110c413accc5c483424 | [
"Apache-2.0"
] | null | null | null | arjuna-samples/arjex/test/pkg/webui_adv/check_webuiadv_09_element_waiters.py | ChandraMouliDisturbs/arjuna | 4965622fbb01a5e5b6459110c413accc5c483424 | [
"Apache-2.0"
] | null | null | null | arjuna-samples/arjex/test/pkg/webui_adv/check_webuiadv_09_element_waiters.py | ChandraMouliDisturbs/arjuna | 4965622fbb01a5e5b6459110c413accc5c483424 | [
"Apache-2.0"
] | null | null | null | # This file is a part of Arjuna
# Copyright 2015-2020 Rahul Verma
# Website: www.RahulVerma.net
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from arjuna import *
@test
def check_wait_until_absent_nested(request, logged_in_wordpress):
# Should be validated in root element.
logged_in_wordpress.element(id="adminmenu").wait_until_absent(id="something")
try:
# It is present
logged_in_wordpress.element(id="adminmenu").wait_until_absent(tag="div")
except GuiWidgetPresentError as e:
print("Exception as Expected")
print(str(e))
except Exception as e:
raise Exception("Unexpected exception raise: ", str(e))
else:
raise Exception("Exception not raised.")
@test
def check_absent_nested_locator_max_wait(request, logged_in_wordpress):
try:
b = time.time()
logged_in_wordpress.element(id="adminmenu").wait_until_absent(tag="div")
except GuiWidgetPresentError:
print(time.time() - b)
try:
b = time.time()
logged_in_wordpress.element(id="adminmenu").wait_until_absent(tag="div", max_wait=10)
except GuiWidgetPresentError:
print(time.time() - b)
@test
def check_contains_nested(request, logged_in_wordpress):
# Should be validated in root element.
print(logged_in_wordpress.element(id="adminmenu").contains(tag="div"))
print(logged_in_wordpress.element(id="adminmenu").contains(id="something"))
@test
def check_contains_nested_locator_max_wait(request, logged_in_wordpress):
b = time.time()
logged_in_wordpress.element(id="adminmenu").contains(id="something")
print(time.time() - b)
b = time.time()
logged_in_wordpress.element(id="adminmenu").contains(id="something", max_wait=10)
print(time.time() - b) | 33.925373 | 93 | 0.721953 |
f74e4287c01a51060cf687db264b8e8ef4e28055 | 3,974 | py | Python | src/bintools/run/utils.py | Simonll/bintools | 009c9616545f3fc51b897da9bc53f35fb23c99eb | [
"MIT"
] | null | null | null | src/bintools/run/utils.py | Simonll/bintools | 009c9616545f3fc51b897da9bc53f35fb23c99eb | [
"MIT"
] | null | null | null | src/bintools/run/utils.py | Simonll/bintools | 009c9616545f3fc51b897da9bc53f35fb23c99eb | [
"MIT"
] | null | null | null | import os
import shlex
import subprocess
import sys
from textwrap import dedent
from typing import Any
from typing import Optional
from typing import Tuple
def joint_kwargs(**kwargs) -> str:
return " ".join([k + " " + v for k, v in kwargs.items()])
def sub(
cmd: str, cwd: Optional[str] = None, stdout: bool = False, stder: bool = False
) -> Tuple[Any, Any]:
if cwd is not None:
print(cmd + " " + cwd)
else:
print(cmd)
cmd_ = shlex.split(cmd)
subp = subprocess.Popen(
cmd_,
stdout=subprocess.PIPE if stdout else None,
stderr=subprocess.PIPE if stder else None,
shell=False,
cwd=cwd,
)
return subp.communicate()
def run_shell_command(cmd, raise_errors=False, extra_env=None, cwd=None):
"""
Run the given command string via Bash with error checking.
Returns True if the command exits normally. Returns False if the command
exits with failure and "raise_errors" is False (the default). When
"raise_errors" is True, exceptions are rethrown.
If an *extra_env* mapping is passed, the provided keys and values are
overlayed onto the default subprocess environment.
"""
return ShellCommandRunner(
cmd, raise_errors=raise_errors, extra_env=extra_env, cwd=cwd
).run()
class ShellCommandRunner:
"""
Run the given command string via Bash with error checking.
TODO move to method docstrings. Returns True if the command exits normally. Returns False if the command
exits with failure and "raise_errors" is False (the default). When
"raise_errors" is True, exceptions are rethrown.
If an *extra_env* mapping is passed, the provided keys and values are
overlayed onto the default subprocess environment.
"""
def __init__(self, cmd, *, raise_errors=False, extra_env=None, cwd=None):
self.cmd = cmd
self.raise_errors = raise_errors
self.extra_env = extra_env
self.cwd = cwd
def run(self):
try:
self.invoke_command()
except Exception as error:
self.print_error_message(error)
if self.raise_errors:
raise error
return False
return True
def invoke_command(self):
return subprocess.check_output(
self.shell_executable + self.shell_args,
shell=False,
stderr=subprocess.STDOUT,
env=self.modified_env,
cwd=self.cwd,
)
@property
def shell_executable(self):
if os.name == "posix":
return ["/bin/bash"]
else:
# We try best effort on other systems. For now that means nt/java.
return ["env", "bash"]
@property
def shell_args(self):
return ["-c", "set -euo pipefail; " + self.cmd]
@property
def modified_env(self):
env = os.environ.copy()
if self.extra_env:
env.update(self.extra_env)
return env
def print_error_message(self, error):
if isinstance(error, subprocess.CalledProcessError):
message = f"{error.output}\nshell exited {error.returncode} when running: {self.cmd}"
if error.returncode == 127:
message += "\nAre you sure this program is installed?"
elif isinstance(error, FileNotFoundError):
shell = " and ".join(self.shell_executable)
message = f"""
Unable to run shell commands using {shell}!
Augur requires {shell} to be installed. Please open an issue on GitHub
<https://github.com/nextstrain/augur/issues/new> if you need assistance.
"""
else:
message = str(error)
self.print_error(message)
@staticmethod
def print_error(message):
"""Prints message to STDERR formatted with textwrap.dedent"""
print("\nERROR: " + dedent(message).lstrip("\n") + "\n", file=sys.stderr)
| 30.806202 | 109 | 0.623805 |
f74eb6a1211b850dd0c08d41c1bfb52d80014d5e | 7,431 | py | Python | tests/mock/responses/get_artist_albums.py | steinitzu/spoffy | 40cce0f00accbe006084a610d0d50396c21ec96c | [
"Apache-2.0"
] | 1 | 2019-04-24T19:50:03.000Z | 2019-04-24T19:50:03.000Z | tests/mock/responses/get_artist_albums.py | steinitzu/spoffy | 40cce0f00accbe006084a610d0d50396c21ec96c | [
"Apache-2.0"
] | 3 | 2019-10-11T20:31:57.000Z | 2020-04-13T16:06:43.000Z | tests/mock/responses/get_artist_albums.py | steinitzu/spoffy | 40cce0f00accbe006084a610d0d50396c21ec96c | [
"Apache-2.0"
] | null | null | null | import json
_jalbums_relinked = """
{
"href": "https://api.spotify.com/v1/artists/1vCWHaC5f2uS3yhpwWbIA6/albums?offset=0&limit=5&include_groups=album,single,compilation,appears_on&market=ES",
"items": [
{
"album_group": "album",
"album_type": "album",
"artists": [
{
"external_urls": {
"spotify": "https://open.spotify.com/artist/1vCWHaC5f2uS3yhpwWbIA6"
},
"href": "https://api.spotify.com/v1/artists/1vCWHaC5f2uS3yhpwWbIA6",
"id": "1vCWHaC5f2uS3yhpwWbIA6",
"name": "Avicii",
"type": "artist",
"uri": "spotify:artist:1vCWHaC5f2uS3yhpwWbIA6"
}
],
"external_urls": {
"spotify": "https://open.spotify.com/album/7dqftJ3kas6D0VAdmt3k3V"
},
"href": "https://api.spotify.com/v1/albums/7dqftJ3kas6D0VAdmt3k3V",
"id": "7dqftJ3kas6D0VAdmt3k3V",
"images": [
{
"height": 640,
"url": "https://i.scdn.co/image/a76f75493d039938b5dcfabbd5a6c1081f270a6c",
"width": 640
},
{
"height": 300,
"url": "https://i.scdn.co/image/1685a59f97e828e423a20ba080754c8d58466756",
"width": 300
},
{
"height": 64,
"url": "https://i.scdn.co/image/95191136789abd43fc7ad7b4ea5526eca2986c26",
"width": 64
}
],
"name": "Stories",
"release_date": "2015-10-02",
"release_date_precision": "day",
"total_tracks": 14,
"type": "album",
"uri": "spotify:album:7dqftJ3kas6D0VAdmt3k3V"
},
{
"album_group": "album",
"album_type": "album",
"artists": [
{
"external_urls": {
"spotify": "https://open.spotify.com/artist/1vCWHaC5f2uS3yhpwWbIA6"
},
"href": "https://api.spotify.com/v1/artists/1vCWHaC5f2uS3yhpwWbIA6",
"id": "1vCWHaC5f2uS3yhpwWbIA6",
"name": "Avicii",
"type": "artist",
"uri": "spotify:artist:1vCWHaC5f2uS3yhpwWbIA6"
}
],
"external_urls": {
"spotify": "https://open.spotify.com/album/0h2knr6qpiAq0tV5ri5JMF"
},
"href": "https://api.spotify.com/v1/albums/0h2knr6qpiAq0tV5ri5JMF",
"id": "0h2knr6qpiAq0tV5ri5JMF",
"images": [
{
"height": 640,
"url": "https://i.scdn.co/image/1da50cf44c25f8aad1b39ab640dff5137ee72dbb",
"width": 640
},
{
"height": 300,
"url": "https://i.scdn.co/image/baca6767c796817bded72c60f4a1b67f28cc75da",
"width": 300
},
{
"height": 64,
"url": "https://i.scdn.co/image/f8dc9e20bf7dd876bd646ce472c345a0fa9dfae3",
"width": 64
}
],
"name": "The Days / Nights",
"release_date": "2014-01-01",
"release_date_precision": "day",
"total_tracks": 4,
"type": "album",
"uri": "spotify:album:0h2knr6qpiAq0tV5ri5JMF"
},
{
"album_group": "album",
"album_type": "album",
"artists": [
{
"external_urls": {
"spotify": "https://open.spotify.com/artist/1vCWHaC5f2uS3yhpwWbIA6"
},
"href": "https://api.spotify.com/v1/artists/1vCWHaC5f2uS3yhpwWbIA6",
"id": "1vCWHaC5f2uS3yhpwWbIA6",
"name": "Avicii",
"type": "artist",
"uri": "spotify:artist:1vCWHaC5f2uS3yhpwWbIA6"
}
],
"external_urls": {
"spotify": "https://open.spotify.com/album/0ignCov9foaLxuqND5GMtl"
},
"href": "https://api.spotify.com/v1/albums/0ignCov9foaLxuqND5GMtl",
"id": "0ignCov9foaLxuqND5GMtl",
"images": [
{
"height": 640,
"url": "https://i.scdn.co/image/288730e58599d056b32a3934a0d519e6f1152265",
"width": 640
},
{
"height": 300,
"url": "https://i.scdn.co/image/141c2d2604305a3694ed9ef7d3a94e4c9e5f492a",
"width": 300
},
{
"height": 64,
"url": "https://i.scdn.co/image/c82fb92d61e5d25c47f4c819bd669944ee51abfc",
"width": 64
}
],
"name": "True: Avicii By Avicii",
"release_date": "2014-01-01",
"release_date_precision": "day",
"total_tracks": 9,
"type": "album",
"uri": "spotify:album:0ignCov9foaLxuqND5GMtl"
},
{
"album_group": "album",
"album_type": "album",
"artists": [
{
"external_urls": {
"spotify": "https://open.spotify.com/artist/1vCWHaC5f2uS3yhpwWbIA6"
},
"href": "https://api.spotify.com/v1/artists/1vCWHaC5f2uS3yhpwWbIA6",
"id": "1vCWHaC5f2uS3yhpwWbIA6",
"name": "Avicii",
"type": "artist",
"uri": "spotify:artist:1vCWHaC5f2uS3yhpwWbIA6"
}
],
"external_urls": {
"spotify": "https://open.spotify.com/album/1s9tU91VJt4sU5owi29GD3"
},
"href": "https://api.spotify.com/v1/albums/1s9tU91VJt4sU5owi29GD3",
"id": "1s9tU91VJt4sU5owi29GD3",
"images": [
{
"height": 640,
"url": "https://i.scdn.co/image/98c5699709d8c2497f34a177d159e1b1733f25bb",
"width": 640
},
{
"height": 300,
"url": "https://i.scdn.co/image/ccacb3b04352cc4ac7230aa02779171943717a10",
"width": 300
},
{
"height": 64,
"url": "https://i.scdn.co/image/7dabeff1e09d78b1e96d88be2b5509f00ef6ae5e",
"width": 64
}
],
"name": "True",
"release_date": "2013-01-01",
"release_date_precision": "day",
"total_tracks": 12,
"type": "album",
"uri": "spotify:album:1s9tU91VJt4sU5owi29GD3"
},
{
"album_group": "single",
"album_type": "single",
"artists": [
{
"external_urls": {
"spotify": "https://open.spotify.com/artist/1vCWHaC5f2uS3yhpwWbIA6"
},
"href": "https://api.spotify.com/v1/artists/1vCWHaC5f2uS3yhpwWbIA6",
"id": "1vCWHaC5f2uS3yhpwWbIA6",
"name": "Avicii",
"type": "artist",
"uri": "spotify:artist:1vCWHaC5f2uS3yhpwWbIA6"
}
],
"external_urls": {
"spotify": "https://open.spotify.com/album/7Jx7doYIXITyR2LQB0Hvbc"
},
"href": "https://api.spotify.com/v1/albums/7Jx7doYIXITyR2LQB0Hvbc",
"id": "7Jx7doYIXITyR2LQB0Hvbc",
"images": [
{
"height": 640,
"url": "https://i.scdn.co/image/2cc4d3aa28ebae67ed93040315342ca43aa7080d",
"width": 640
},
{
"height": 300,
"url": "https://i.scdn.co/image/38acfd3d2517343bc564d3093d54982aa3dc155c",
"width": 300
},
{
"height": 64,
"url": "https://i.scdn.co/image/b375ef37db875d690965f1afb969cc2c86e21219",
"width": 64
}
],
"name": "SOS",
"release_date": "2019-04-10",
"release_date_precision": "day",
"total_tracks": 1,
"type": "album",
"uri": "spotify:album:7Jx7doYIXITyR2LQB0Hvbc"
}
],
"limit": 5,
"next": "https://api.spotify.com/v1/artists/1vCWHaC5f2uS3yhpwWbIA6/albums?offset=5&limit=5&include_groups=album,single,compilation,appears_on&market=ES",
"offset": 0,
"previous": null,
"total": 382
}
"""
artist_albums_relinked = json.loads(_jalbums_relinked)
| 31.35443 | 155 | 0.543534 |
f74f17b1cd8e2279c5414ec03d52994e78e9f11a | 7,521 | py | Python | src/gcg/tf/tf_utils.py | gkahn13/GtS | 8186177de430d4bfef253bb0ea584ee60dc58d3a | [
"MIT"
] | 74 | 2019-02-12T04:52:01.000Z | 2021-06-11T00:19:45.000Z | src/gcg/tf/tf_utils.py | gkahn13/GtS | 8186177de430d4bfef253bb0ea584ee60dc58d3a | [
"MIT"
] | 5 | 2019-02-27T13:38:56.000Z | 2020-11-30T11:29:28.000Z | src/gcg/tf/tf_utils.py | gkahn13/GtS | 8186177de430d4bfef253bb0ea584ee60dc58d3a | [
"MIT"
] | 16 | 2019-02-20T12:09:25.000Z | 2022-02-09T12:49:09.000Z | import os
import tensorflow as tf
######################
### Graph creation ###
######################
def create_session_and_graph(gpu_device=None, gpu_frac=None):
if gpu_device is None:
gpu_device = 0
if gpu_frac is None:
gpu_frac = 0.95
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_device)
tf_graph = tf.Graph()
if len(str(gpu_device)) > 0:
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_frac)
config = tf.ConfigProto(
gpu_options=gpu_options,
log_device_placement=False,
allow_soft_placement=True,
)
else:
config = tf.ConfigProto(
device_count={'GPU': 0},
log_device_placement=False,
allow_soft_placement=True,
)
tf_sess = tf.Session(graph=tf_graph, config=config)
return tf_sess, tf_graph
##################
### Optimizing ###
##################
def minimize_and_clip(optimizer, objective, var_list, clip_val=10):
"""Minimized `objective` using `optimizer` w.r.t. variables in
`var_list` while ensure the norm of the gradients for each
variable is clipped to `clip_val`
"""
gradients = optimizer.compute_gradients(objective, var_list=var_list)
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_norm(grad, clip_val), var)
return optimizer.apply_gradients(gradients)
##################
### Operations ###
##################
def spatial_soft_argmax(features, dtype=tf.float32):
"""
features shape is [N, H, W, C]
"""
N = tf.shape(features)[0]
val_shape = features.get_shape()
H, W, C = val_shape[1].value, val_shape[2].value, val_shape[3].value
features = tf.reshape(
tf.transpose(features, [0, 3, 1, 2]),
[-1, H * W])
softmax = tf.nn.softmax(features)
spatial_softmax = tf.transpose(tf.reshape(softmax, [N, C, H, W]), [0, 2, 3, 1])
spatial_softmax_pos = tf.expand_dims(spatial_softmax, -1)
# TODO shape [H, W, 1, 2]
# TODO H or W is 1
assert(H != 1 and W != 1)
delta_h = 2. / tf.cast(H - 1, dtype)
delta_w = 2. / tf.cast(W - 1, dtype)
ran_h = tf.tile(tf.expand_dims(tf.range(-1., 1. + delta_h, delta_h, dtype=dtype), 1), [1, W])
ran_w = tf.tile(tf.expand_dims(tf.range(-1., 1 + delta_w, delta_w, dtype=dtype), 0), [H, 1])
image_pos = tf.expand_dims(tf.stack([ran_h, ran_w], 2), 2)
spatial_soft_amax = tf.reduce_sum(spatial_softmax_pos * image_pos, axis=[1, 2])
shaped_ssamax = tf.reshape(spatial_soft_amax, [N, C * 2])
return shaped_ssamax
def repeat_2d(x, reps, axis):
assert(axis == 0 or axis == 1)
if axis == 1:
x = tf.transpose(x)
static_shape = list(x.get_shape())
dyn_shape = tf.shape(x)
x_repeat = tf.reshape(tf.tile(x, [1, reps]), (dyn_shape[0] * reps, dyn_shape[1]))
if static_shape[0].value is not None:
static_shape[0] = tf.Dimension(static_shape[0].value *reps)
x_repeat.set_shape(static_shape)
if axis == 1:
x_repeat = tf.transpose(x_repeat)
return x_repeat
def batch_outer_product(X, Y):
"""
:param X: [N, U]
:param Y: [N, V]
"""
# tf.assert_equal(tf.shape(X)[0], tf.shape(Y)[0])
X_batch = tf.expand_dims(X, 2) # [N, U, 1]
Y_batch = tf.expand_dims(Y, 1) # [N, 1, V]
results = tf.batch_matmul(X_batch, Y_batch) # [N, U, V]
return results
def batch_outer_product_2d(X, Y):
"""
:param X: [N, U]
:param Y: [N, V]
:return [N, U * V]
"""
U = X.get_shape()[1].value
V = Y.get_shape()[1].value
assert(U is not None)
assert(V is not None)
X_tile = tf.tile(X, (1, V))
Y_repeat = repeat_2d(Y, U, 1)
return tf.multiply(X_tile, Y_repeat)
def gather_2d(x, idxs):
"""
:param x: 2d tensor
:param idxs: 1d tensor indexing the columns of x to gather
:return: 1d tensor
"""
assert(len(x.get_shape()) == 2)
tf.assert_equal(tf.shape(x)[0], tf.shape(idxs)[0])
idxs = tf.transpose(tf.pack([tf.range(tf.shape(idxs)[0]), idxs]))
x_gather = tf.gather_nd(x, idxs)
return x_gather
def block_diagonal(matrices, dtype=tf.float32):
"""Constructs block-diagonal matrices from a list of batched 2D tensors.
Args:
matrices: A list of Tensors with shape [..., N_i, M_i] (i.e. a list of
matrices with the same batch dimension).
dtype: Data type to use. The Tensors in `matrices` must match this dtype.
Returns:
A matrix with the input matrices stacked along its main diagonal, having
shape [..., \sum_i N_i, \sum_i M_i].
"""
matrices = [tf.convert_to_tensor(matrix, dtype=dtype) for matrix in matrices]
blocked_rows = tf.Dimension(0)
blocked_cols = tf.Dimension(0)
batch_shape = tf.TensorShape(None)
for matrix in matrices:
full_matrix_shape = matrix.get_shape().with_rank_at_least(2)
batch_shape = batch_shape.merge_with(full_matrix_shape[:-2])
blocked_rows += full_matrix_shape[-2]
blocked_cols += full_matrix_shape[-1]
ret_columns_list = []
for matrix in matrices:
matrix_shape = tf.shape(matrix)
ret_columns_list.append(matrix_shape[-1])
ret_columns = tf.add_n(ret_columns_list)
row_blocks = []
current_column = 0
for matrix in matrices:
matrix_shape = tf.shape(matrix)
row_before_length = current_column
current_column += matrix_shape[-1]
row_after_length = ret_columns - current_column
row_blocks.append(tf.pad(
tensor=matrix,
paddings=tf.concat(0,
[tf.zeros([tf.rank(matrix) - 1, 2], dtype=tf.int32),
[(row_before_length, row_after_length)]])))
blocked = tf.concat(-2, row_blocks)
blocked.set_shape(batch_shape.concatenate((blocked_rows, blocked_cols)))
return blocked
def sample_categorical(p):
# TODO change to tf.distributions once update tf version
dist = tf.contrib.distributions.Categorical(probs=p)
sample = dist.sample()
return sample
###############
### Asserts ###
###############
def assert_shape(tensor, shape):
assert(len(tensor.get_shape()) == len(shape))
tensor_shape = tf.shape(tensor)
for i, s_i in enumerate(shape):
tf.assert_equal(tensor_shape[i], tf.cast(s_i, tf.int32))
def assert_equal_approx(tensor, value, eps=1e-5, name=None):
return tf.assert_equal(tf.cast(tf.abs(tensor - value) < 1e-5, tf.int32), 1, name=name)
if __name__ == '__main__':
import numpy as np
np.random.seed(0)
tf.set_random_seed(0)
### repeat_2d test
a = tf.constant(np.random.random((2, 4)))
a0 = repeat_2d(a, 2, 0)
a1 = repeat_2d(a, 2, 1)
sess = tf.Session()
a_eval, a0_eval, a1_eval = sess.run([a, a0, a1])
print('\nrepeat 2d test')
print('a:\n{0}'.format(a_eval))
print('a0\n{0}'.format(a0_eval))
print('a1\n{0}'.format(a1_eval))
### test batch outer
a = tf.constant(np.random.random((3, 2)))
b = tf.constant(np.random.randint(0, 2, (3, 2)).astype(np.float64))
ab_outer = tf.reshape(batch_outer_product(b, a), (a.get_shape()[0].value, -1))
ab_outer_2d = batch_outer_product_2d(a, b)
a_eval, b_eval, ab_outer_eval, ab_outer_2d_eval = sess.run([a, b, ab_outer, ab_outer_2d])
print('\nbatch outer test')
print('a:\n{0}'.format(a_eval))
print('b:\n{0}'.format(b_eval))
print('ab_outer:\n{0}'.format(ab_outer_eval))
print('ab_outer_2d:\n{0}'.format(ab_outer_2d_eval))
| 33.426667 | 97 | 0.622391 |
f74f3829da2705e46843cc6a81bb5eb4e6ca8f5d | 8,514 | py | Python | nova/virt/libvirt/storage/lvm.py | ebalduf/nova-backports | 6bf97ec73467de522d34ab7a17ca0e0874baa7f9 | [
"Apache-2.0"
] | null | null | null | nova/virt/libvirt/storage/lvm.py | ebalduf/nova-backports | 6bf97ec73467de522d34ab7a17ca0e0874baa7f9 | [
"Apache-2.0"
] | null | null | null | nova/virt/libvirt/storage/lvm.py | ebalduf/nova-backports | 6bf97ec73467de522d34ab7a17ca0e0874baa7f9 | [
"Apache-2.0"
] | 1 | 2020-07-24T00:41:18.000Z | 2020-07-24T00:41:18.000Z | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2011 OpenStack Foundation
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils import units
import six
import nova.conf
from nova import exception
from nova.i18n import _
from nova.i18n import _LW
from nova.virt.libvirt import utils
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
def create_volume(vg, lv, size, sparse=False):
"""Create LVM image.
Creates a LVM image with given size.
:param vg: existing volume group which should hold this image
:param lv: name for this image (logical volume)
:size: size of image in bytes
:sparse: create sparse logical volume
"""
vg_info = get_volume_group_info(vg)
free_space = vg_info['free']
def check_size(vg, lv, size):
if size > free_space:
raise RuntimeError(_('Insufficient Space on Volume Group %(vg)s.'
' Only %(free_space)db available,'
' but %(size)d bytes required'
' by volume %(lv)s.') %
{'vg': vg,
'free_space': free_space,
'size': size,
'lv': lv})
if sparse:
preallocated_space = 64 * units.Mi
check_size(vg, lv, preallocated_space)
if free_space < size:
LOG.warning(_LW('Volume group %(vg)s will not be able'
' to hold sparse volume %(lv)s.'
' Virtual volume size is %(size)d bytes,'
' but free space on volume group is'
' only %(free_space)db.'),
{'vg': vg,
'free_space': free_space,
'size': size,
'lv': lv})
cmd = ('lvcreate', '-L', '%db' % preallocated_space,
'--virtualsize', '%db' % size, '-n', lv, vg)
else:
check_size(vg, lv, size)
cmd = ('lvcreate', '-L', '%db' % size, '-n', lv, vg)
utils.execute(*cmd, run_as_root=True, attempts=3)
def get_volume_group_info(vg):
"""Return free/used/total space info for a volume group in bytes
:param vg: volume group name
:returns: A dict containing:
:total: How big the filesystem is (in bytes)
:free: How much space is free (in bytes)
:used: How much space is used (in bytes)
"""
out, err = utils.execute('vgs', '--noheadings', '--nosuffix',
'--separator', '|',
'--units', 'b', '-o', 'vg_size,vg_free', vg,
run_as_root=True)
info = out.split('|')
if len(info) != 2:
raise RuntimeError(_("vg %s must be LVM volume group") % vg)
return {'total': int(info[0]),
'free': int(info[1]),
'used': int(info[0]) - int(info[1])}
def list_volumes(vg):
"""List logical volumes paths for given volume group.
:param vg: volume group name
:returns: Return a logical volume list for given volume group
: Data format example
: ['volume-aaa', 'volume-bbb', 'volume-ccc']
"""
out, err = utils.execute('lvs', '--noheadings', '-o', 'lv_name', vg,
run_as_root=True)
return [line.strip() for line in out.splitlines()]
def volume_info(path):
"""Get logical volume info.
:param path: logical volume path
:returns: Return a dict object including info of given logical volume
: Data format example
: {'#Seg': '1', 'Move': '', 'Log': '', 'Meta%': '', 'Min': '-1',
: ...
: 'Free': '9983', 'LV': 'volume-aaa', 'Host': 'xyz.com',
: 'Active': 'active', 'Path': '/dev/vg/volume-aaa', '#LV': '3',
: 'Maj': '-1', 'VSize': '50.00g', 'VFree': '39.00g', 'Pool': '',
: 'VG Tags': '', 'KMaj': '253', 'Convert': '', 'LProfile': '',
: '#Ext': '12799', 'Attr': '-wi-a-----', 'VG': 'vg',
: ...
: 'LSize': '1.00g', '#PV': '1', '#VMdaCps': 'unmanaged'}
"""
out, err = utils.execute('lvs', '-o', 'vg_all,lv_all',
'--separator', '|', path, run_as_root=True)
info = [line.split('|') for line in out.splitlines()]
if len(info) != 2:
raise RuntimeError(_("Path %s must be LVM logical volume") % path)
return dict(zip(*info))
def get_volume_size(path):
"""Get logical volume size in bytes.
:param path: logical volume path
:raises: processutils.ProcessExecutionError if getting the volume size
fails in some unexpected way.
:raises: exception.VolumeBDMPathNotFound if the volume path does not exist.
"""
try:
out, _err = utils.execute('blockdev', '--getsize64', path,
run_as_root=True)
except processutils.ProcessExecutionError:
if not utils.path_exists(path):
raise exception.VolumeBDMPathNotFound(path=path)
else:
raise
return int(out)
def _zero_volume(path, volume_size):
"""Write zeros over the specified path
:param path: logical volume path
:param size: number of zeros to write
"""
bs = units.Mi
direct_flags = ('oflag=direct',)
sync_flags = ()
remaining_bytes = volume_size
# The loop efficiently writes zeros using dd,
# and caters for versions of dd that don't have
# the easier to use iflag=count_bytes option.
while remaining_bytes:
zero_blocks = remaining_bytes / bs
seek_blocks = (volume_size - remaining_bytes) / bs
zero_cmd = ('dd', 'bs=%s' % bs,
'if=/dev/zero', 'of=%s' % path,
'seek=%s' % seek_blocks, 'count=%s' % zero_blocks)
zero_cmd += direct_flags
zero_cmd += sync_flags
if zero_blocks:
utils.execute(*zero_cmd, run_as_root=True)
remaining_bytes %= bs
bs /= units.Ki # Limit to 3 iterations
# Use O_DIRECT with initial block size and fdatasync otherwise
direct_flags = ()
sync_flags = ('conv=fdatasync',)
def clear_volume(path):
"""Obfuscate the logical volume.
:param path: logical volume path
"""
volume_clear = CONF.libvirt.volume_clear
if volume_clear == 'none':
return
volume_clear_size = int(CONF.libvirt.volume_clear_size) * units.Mi
try:
volume_size = get_volume_size(path)
except exception.VolumeBDMPathNotFound:
LOG.warning(_LW('ignoring missing logical volume %(path)s'),
{'path': path})
return
if volume_clear_size != 0 and volume_clear_size < volume_size:
volume_size = volume_clear_size
if volume_clear == 'zero':
# NOTE(p-draigbrady): we could use shred to do the zeroing
# with -n0 -z, however only versions >= 8.22 perform as well as dd
_zero_volume(path, volume_size)
elif volume_clear == 'shred':
utils.execute('shred', '-n3', '-s%d' % volume_size, path,
run_as_root=True)
def remove_volumes(paths):
"""Remove one or more logical volume."""
errors = []
for path in paths:
clear_volume(path)
lvremove = ('lvremove', '-f', path)
try:
utils.execute(*lvremove, attempts=3, run_as_root=True)
except processutils.ProcessExecutionError as exp:
errors.append(six.text_type(exp))
if errors:
raise exception.VolumesNotRemoved(reason=(', ').join(errors))
| 35.181818 | 79 | 0.576345 |
f74f442f8a8539ca9d7302fa625db30470e394c4 | 833 | py | Python | 1679. Max Number of K-Sum Pairs.py | Dharaneeshwar/Leetcode | cc3ed07f6ac5f4d6e3f60c57a94a06a8be2f5287 | [
"MIT"
] | 4 | 2020-11-17T05:24:24.000Z | 2021-06-14T21:01:45.000Z | 1679. Max Number of K-Sum Pairs.py | Dharaneeshwar/Leetcode | cc3ed07f6ac5f4d6e3f60c57a94a06a8be2f5287 | [
"MIT"
] | null | null | null | 1679. Max Number of K-Sum Pairs.py | Dharaneeshwar/Leetcode | cc3ed07f6ac5f4d6e3f60c57a94a06a8be2f5287 | [
"MIT"
] | null | null | null | # Space - O(1) ; Time - O(n logn)
class Solution:
def maxOperations(self, nums: List[int], k: int) -> int:
nums.sort()
i = 0
j = len(nums)-1
op = 0
while i<j:
val = nums[i] + nums[j]
if val == k:
op += 1
i += 1
j -= 1
elif val > k:
j -= 1
else:
i += 1
return op
# Space - O(n) ; Time - O(n)
class Solution:
def maxOperations(self, nums: List[int], k: int) -> int:
dairy = defaultdict(int)
pairs = 0
for i in nums:
if dairy[k-i] > 0:
pairs += 1
dairy[k-i] -= 1
else:
dairy[i] += 1
return pairs | 23.8 | 60 | 0.35054 |
f74f5822c0300aed1e291eb49337ea10c51fdf3e | 9,319 | py | Python | pytree/models/n_ary/modeling_n_ary.py | AntoineSimoulin/pytree | 9408799c4b8d4d59b1103e2205ffe7b250614f92 | [
"Apache-2.0"
] | 18 | 2021-11-10T13:34:02.000Z | 2021-12-17T13:28:00.000Z | pytree/models/n_ary/modeling_n_ary.py | AntoineSimoulin/pytree | 9408799c4b8d4d59b1103e2205ffe7b250614f92 | [
"Apache-2.0"
] | null | null | null | pytree/models/n_ary/modeling_n_ary.py | AntoineSimoulin/pytree | 9408799c4b8d4d59b1103e2205ffe7b250614f92 | [
"Apache-2.0"
] | null | null | null | import torch.nn as nn
import torch
from typing import List, Tuple, Optional, overload, Union, cast
from torch import Tensor
from transformers import BertModel
from pytree.data.packed_tree import PackedTree
class TreeEmbeddings(nn.Module):
def __init__(self, config):
super(TreeEmbeddings, self).__init__()
self.use_bert = config.use_bert
self.tune_bert = config.tune_bert
self.normalize_bert_embeddings = config.normalize_bert_embeddings
# embeddings
if self.use_bert:
self.bert = BertModel.from_pretrained(config.pretrained_model_name_or_path)
for name, param in self.bert.named_parameters():
param.requires_grad = self.tune_bert
else:
self.embeddings = nn.Embedding(config.vocab_size, config.embedding_size) # , sparse=True
nn.init.xavier_uniform_(self.embeddings.weight.data, gain=1.0)
self.embeddings.weight.requires_grad = True
if config.xavier_init:
self.xavier_init_weights()
def load_pretrained_embeddings(self, embeddings_weights, requires_grad=False):
self.embeddings = nn.Embedding.from_pretrained(embeddings_weights, sparse=True)
self.embeddings.weight.requires_grad = requires_grad
def forward(self, raw_inputs=None, packed_tree=None, bert_inputs=None):
if self.use_bert:
tokens_tensor, tokens_type_ids, attention_mask, sum_idx = bert_inputs
if self.tune_bert:
outputs = self.bert(input_ids=tokens_tensor,
token_type_ids=tokens_type_ids,
attention_mask=attention_mask)[0]
else:
with torch.no_grad():
outputs = self.bert(input_ids=tokens_tensor,
token_type_ids=tokens_type_ids,
attention_mask=attention_mask)[0]
if self.normalize_bert_embeddings:
outputs = F.normalize(outputs, p=2, dim=2)
cat_inputs = torch.reshape(outputs, (-1, outputs.shape[2]))
embeds = torch.index_select(cat_inputs, 0, sum_idx.long())
# embeds = torch.sigmoid(self.projection(embeds))
else:
# cat_inputs = torch.cat(raw_inputs)
embeds = self.embeddings(raw_inputs)
return embeds
def xavier_init_weights(self):
nn.init.xavier_uniform_(self.embeddings.weight.data, gain=1.0)
class NaryTree(nn.Module):
def __init__(self, config):
super(NaryTree, self).__init__()
self.config = config
self.embeddings = TreeEmbeddings(config)
if config.cell_type == 'lstm':
self.encoder = NaryTreeLSTMEncoder(config)
elif config.cell_type == 'gru':
self.encoder = NaryTreeGRUEncoder(config)
def forward(self, inputs):
embeds = self.embeddings(inputs['input_ids'])
hidden, h_root = self.encoder(embeds, inputs['tree_ids'].to(embeds.device), inputs['tree_ids_r'].to(embeds.device), inputs['tree_ids_l'].to(embeds.device))
return hidden, h_root
class TreeLSTM(nn.Module):
"""[summary]
Args:
nn ([type]): [description]
"""
def __init__(self, config):
super(TreeLSTM, self).__init__()
self.hidden_size = config.hidden_size
self.embedding_size = config.embedding_size
self.vocab_size = config.vocab_size
def xavier_init_weights(self):
# nn.init.xavier_uniform_(self.embeddings.weight.data, gain=1.0)
for name, param in self.named_parameters():
if 'weight' in name:
nn.init.xavier_uniform_(param.data, gain=1.0)
if 'bias' in name:
param.data.fill_(0)
def forward(self,
input_ids: Union[Tensor, PackedTree],
tree_ids: Tensor = None,
tree_ids_r: Tensor = None,
tree_ids_l: Tensor = None,
hx: Optional[Tuple[Tensor, Tensor]] = None) -> Tuple[Union[Tensor, PackedTree], Tuple[Tensor, Tensor]]:
# if isinstance(orig_input, PackedTrees):
batch_size = input_ids.size(0) # if self.batch_first else input.size(1)
n_steps = tree_ids.size(1)
sequence_length = input_ids.size(1)
# else:
# batch_size = input.size(0) if self.batch_first else input.size(1)
# n_steps = tree_ids.size(0)
if hx is None:
h_zeros = torch.zeros(batch_size, sequence_length, self.hidden_size,
dtype=input_ids.dtype, device=input_ids.device)
c_zeros = torch.zeros(batch_size, sequence_length, self.hidden_size,
dtype=input_ids.dtype, device=input_ids.device)
hx = (h_zeros, c_zeros)
for step in range(n_steps):
hx = self.tree_lstm_cell(input_ids, hx, tree_ids[:, step, :], tree_ids_r[:, step, :], tree_ids_l[:, step, :]) # .select(0, step)
roots = tree_ids[:, -1, :].max(axis=1)[0]
h_root = torch.gather(hx[0], 1, roots.unsqueeze(1).unsqueeze(2).repeat(1, 1, self.hidden_size)).squeeze()
return hx, h_root
class NaryTreeLSTMCell(nn.Module):
def __init__(self, config):
super(NaryTreeLSTMCell, self).__init__()
self.N = config.N
self.ioux = nn.Linear(config.embedding_size, 3 * config.hidden_size, bias=True)
self.iouh = nn.ModuleList([nn.Linear(config.hidden_size, 3 * config.hidden_size, bias=False) for i in range(config.N)])
self.fx = nn.Linear(config.embedding_size, config.hidden_size, bias=True)
self.fh = nn.ModuleList([nn.Linear(config.hidden_size, config.hidden_size, bias=False) for i in range(config.N * config.N)])
self.hidden_size = config.hidden_size
self.embedding_size = config.embedding_size
def forward(self, x, hx, tree_ids_d, tree_ids_dr, tree_ids_dl):
# import pdb; pdb.set_trace()
index = tree_ids_d.unsqueeze(-1).repeat(1, 1, self.hidden_size)
index_r = tree_ids_dr.unsqueeze(-1).repeat(1, 1, self.hidden_size)
index_l = tree_ids_dl.unsqueeze(-1).repeat(1, 1, self.hidden_size)
updated_nodes = torch.zeros_like(index).scatter_add_(1, index, torch.ones_like(index))
updated_nodes[:, 0, :] = 0
updated_nodes = updated_nodes.bool()
# iou_x = self.ioux(x)
iou = self.ioux(x)
# print('shape ioux', iou_x.shape)
iou_hr = self.iouh[0](hx[0])
# print('iouhr shape', iou_hr.shape)
iou_hl = self.iouh[1](hx[0])
# iou = iou_x + \
# torch.zeros_like(iou_x).scatter_add_(1, index_r, iou_hr) + \
# torch.zeros_like(iou_x).scatter_add_(1, index_l, iou_hl)
# print('index r shape', index_r.shape)
# print('index_r', index_r)
iou = torch.scatter_add(iou, 1, index_r.repeat(1, 1, 3), iou_hr)
iou = torch.scatter_add(iou, 1, index_l.repeat(1, 1, 3), iou_hl)
# iou = iou_x.scatter_add_(1, index_r, iou_hr)
# iou = iou_x.scatter_add_(1, index_l, iou_hl)
i, o, u = torch.split(iou, iou.size(-1) // 3, dim=-1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = self.fx(x).gather(1, index) + \
self.fh[0](hx[0]).gather(1, index_r) + \
self.fh[1](hx[0]).gather(1, index_r) + \
self.fh[2](hx[0]).gather(1, index_l) + \
self.fh[3](hx[0]).gather(1, index_l)
f = torch.sigmoid(f)
fc = torch.mul(f, hx[1])
# c = torch.mul(i, u) + torch.zeros_like(fc).scatter_add_(1, index, fc)
c = torch.mul(i, u)
c = c.scatter_add_(1, index, fc)
h = torch.mul(o, torch.tanh(c))
# h = hx[0].masked_scatter_(index.bool(), h)
h = torch.where(updated_nodes, h, hx[0]) # index.bool()
# c = hx[1].masked_scatter_(index.bool(), c)
c = torch.where(updated_nodes, c, hx[1]) # index.bool()
return h, c
class NaryTreeLSTMEncoder(TreeLSTM):
r"""
.. math::
:nowrap:
\begin{align}
\tilde{h}_j &= \sum_{k \in C(j)} h_k, \\
i_j &=\sigma \left( W^{(i)} x_j + U^{(i)} \tilde{h}_j + b^{(i)} \right), \\
f_{jk} &= \sigma\left( W^{(f)} x_j + U^{(f)} h_k + b^{(f)} \right),\\
o_j &= \sigma \left( W^{(o)} x_j + U^{(o)} \tilde{h}_j + b^{(o)} \right), \\
u_j &= \tanh\left( W^{(u)} x_j + U^{(u)} \tilde{h}_j + b^{(u)} \right), \\
c_j &= i_j \odot u_j + \sum_{k\in C(j)} f_{jk} \odot c_{k}, \\
h_j &= o_j \odot \tanh(c_j),
\end{align}
"""
def __init__(self, config):
"""
Class attributes:
- ``embedding_size``: `int`. Dimension of the embeddings.
- ``hidden_size``: `int`. Dimension of the Tree LSTM hidden layer
- ``vocab_size``: `int`. Dimension of the vocabulary.
- ``xavier_init``: `bool`, default 1. Whether to intiate networks weights using the glorot procedure.
"""
super(NaryTreeLSTMEncoder, self).__init__(config)
self.tree_lstm_cell = NaryTreeLSTMCell(config)
if config.xavier_init:
self.xavier_init_weights() | 43.344186 | 163 | 0.593841 |
f74f5aefcc5817037ddcd5c4e1f68206420349a7 | 292 | py | Python | paper_spider/paper_spider/pipelines.py | peterwilliams97/ToneRanger | 61ab8b5a96bbf3f82b8e6a07e470831189afff8c | [
"MIT"
] | null | null | null | paper_spider/paper_spider/pipelines.py | peterwilliams97/ToneRanger | 61ab8b5a96bbf3f82b8e6a07e470831189afff8c | [
"MIT"
] | null | null | null | paper_spider/paper_spider/pipelines.py | peterwilliams97/ToneRanger | 61ab8b5a96bbf3f82b8e6a07e470831189afff8c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
class PaperSpiderPipeline(object):
def process_item(self, item, spider):
return item
| 24.333333 | 65 | 0.715753 |
f74f5df1c1719aeb789b373dcb8e714b822f3b7c | 699 | py | Python | runtests.py | apache/openwhisk-tutorial | f3d4e1ef8eb41462cff525df02dbbdd4998e471a | [
"Apache-2.0"
] | 2 | 2019-12-23T19:11:48.000Z | 2021-11-10T15:53:41.000Z | runtests.py | tspannhw/incubator-openwhisk-tutorial | f3d4e1ef8eb41462cff525df02dbbdd4998e471a | [
"Apache-2.0"
] | 5 | 2019-08-15T15:31:21.000Z | 2019-08-15T15:32:00.000Z | runtests.py | tspannhw/incubator-openwhisk-tutorial | f3d4e1ef8eb41462cff525df02dbbdd4998e471a | [
"Apache-2.0"
] | 2 | 2021-11-04T12:32:33.000Z | 2021-11-10T15:53:32.000Z | import sys
from django.conf import settings
settings.configure(
DEBUG=True,
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
ROOT_URLCONF='whisk_tutorial.urls',
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'whisk_tutorial',),
MIDDLEWARE_CLASSES=(
'django.contrib.sessions.middleware.SessionMiddleware',
))
from django.test.runner import DiscoverRunner
test_runner = DiscoverRunner(verbosity=1)
failures = test_runner.run_tests(['whisk_tutorial', ])
if failures:
sys.exit(failures)
| 24.103448 | 63 | 0.65093 |
f74f8120e9b462eb6456b7cfccf5a20184b1bf35 | 16,367 | py | Python | sunpy/wcs/wcs.py | derdon/sunpy | 619102cd48c73a326c45263369446be9b74366e8 | [
"MIT"
] | null | null | null | sunpy/wcs/wcs.py | derdon/sunpy | 619102cd48c73a326c45263369446be9b74366e8 | [
"MIT"
] | null | null | null | sunpy/wcs/wcs.py | derdon/sunpy | 619102cd48c73a326c45263369446be9b74366e8 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import numpy as np
import sunpy.sun as sun
import astropy.units as u
rsun_meters = sun.constants.radius.si.value
__all__ = ['_convert_angle_units', 'convert_pixel_to_data', 'convert_hpc_hg',
'convert_data_to_pixel', 'convert_hpc_hcc', 'convert_hcc_hpc',
'convert_hcc_hg', 'convert_hg_hcc', 'proj_tan',
'convert_hg_hpc', 'convert_to_coord',
'get_center']
def _convert_angle_units(unit='arcsec'):
"""Determine the conversion factor between the data units and radians."""
if unit == 'degrees':
return np.deg2rad(1)
elif unit == 'arcmin':
return np.deg2rad(1) / 60.0
elif unit == 'arcsec':
return np.deg2rad(1) / (60 * 60.0)
elif unit == 'mas':
return np.deg2rad(1) / (60 * 60 * 1000.0)
else:
raise ValueError("The units specified are either invalid or is not supported at this time.")
def convert_pixel_to_data(size, scale, reference_pixel,
reference_coordinate, x=None, y=None):
"""Calculate the data coordinate for particular pixel indices.
Parameters
----------
size : 2d ndarray
Number of pixels in width and height.
scale : 2d ndarray
The size of a pixel (dx,dy) in data coordinates (equivalent to WCS/CDELT)
reference_pixel : 2d ndarray
The reference pixel (x,y) at which the reference coordinate is given (equivalent to WCS/CRPIX)
reference_coordinate : 2d ndarray
The data coordinate (x, y) as measured at the reference pixel (equivalent to WCS/CRVAL)
x,y : int or ndarray
The pixel values at which data coordinates are requested. If none are given,
returns coordinates for every pixel.
Returns
-------
out : ndarray
The data coordinates at pixel (x,y).
Notes
-----
This function assumes a gnomic projection which is correct for a detector at the focus
of an optic observing the Sun.
Examples
--------
"""
cdelt = np.array(scale)
crpix = np.array(reference_pixel)
crval = np.array(reference_coordinate)
# first assume that coord is just [x,y]
if (x is None) and (y is None):
x, y = np.meshgrid(np.arange(size[0]), np.arange(size[1]))
# note that crpix[] counts pixels starting at 1
coordx = (x - (crpix[0] - 1)) * cdelt[0] + crval[0]
coordy = (y - (crpix[1] - 1)) * cdelt[1] + crval[1]
# Correct for Gnomic projection
coordx, coordy = proj_tan(coordx, coordy)
return coordx, coordy
def get_center(size, scale, reference_pixel, reference_coordinate):
"""Returns the center of the image in data coordinates.
Parameters
----------
size : 2d ndarray
Number of pixels in width and height.
scale : 2d ndarray
The size of a pixel (dx,dy) in data coordinates (equivalent to WCS/CDELT)
reference_pixel : 2d ndarray
The reference pixel (x,y) at which the reference coordinate is given (equivalent to WCS/CRPIX)
reference_coordinate : 2d ndarray
The data coordinate (x, y) as measured at the reference pixel (equivalent to WCS/CRVAL)
Returns
-------
out : ndarray
The data coordinates
Examples
--------
"""
return scale * (size - 1 * u.pix) / 2. + reference_coordinate - (reference_pixel - 1 * u.pix) * scale
def convert_data_to_pixel(x, y, scale, reference_pixel, reference_coordinate):
"""Calculate the pixel indices for a given data coordinate.
Parameters
----------
x, y : float
Data coordinate in same units as reference coordinate
scale : 2d ndarray
The size of a pixel (dx,dy) in data coordinates (equivalent to WCS/CDELT)
reference_pixel : 2d ndarray
The reference pixel (x,y) at which the reference coordinate is given (equivalent to WCS/CRPIX)
reference_coordinate : 2d ndarray
The data coordinate (x, y) as measured at the reference pixel (equivalent to WCS/CRVAL)
Returns
-------
out : ndarray
The pixel coordinates (x,y) at that data coordinate.
Examples
--------
"""
# TODO: Needs to check what coordinate system the data is given in
cdelt = np.array(scale)
crpix = np.array(reference_pixel)
crval = np.array(reference_coordinate)
# De-apply any tabular projections.
# coord = inv_proj_tan(coord)
# note that crpix[] counts pixels starting at 1
pixelx = (x - crval[0]) / cdelt[0] + (crpix[1] - 1)
pixely = (y - crval[1]) / cdelt[1] + (crpix[1] - 1)
return pixelx, pixely
def convert_hpc_hcc(x, y, dsun_meters=None, angle_units='arcsec', z=False):
"""Converts from Helioprojective-Cartesian (HPC) coordinates into
Heliocentric-Cartesian (HCC) coordinates. Returns all three dimensions, x, y, z in
meters.
Parameters
----------
x, y : float
Data coordinate in angle units (default is arcsec)
dsun_meters : float
Distance from the observer to the Sun in meters. Default is 1 AU.
angle_units : str
Units of the data coordinates (e.g. arcsec, arcmin, degrees). Default is arcsec.
z : Bool
If true return the z coordinate as well.
Returns
-------
out : ndarray
The data coordinates (x,y,z) in heliocentric cartesian coordinates in meters.
Notes
-----
Implements Eq. (15) of Thompson (2006), A&A, 449, 791.
Examples
--------
>>> import sunpy.wcs
>>> sunpy.wcs.convert_hpc_hcc(40.0, 32.0, z=True)
(28876152.176423457, 23100922.071266972, 694524220.8157959)
"""
c = np.array([_convert_angle_units(unit=angle_units),
_convert_angle_units(unit=angle_units)])
cosx = np.cos(x * c[0])
sinx = np.sin(x * c[0])
cosy = np.cos(y * c[1])
siny = np.sin(y * c[1])
if dsun_meters is None:
dsun_meters = sun.constants.au.si.value
elif isinstance(dsun_meters, u.Quantity):
dsun_meters = dsun_meters.si.value
q = dsun_meters * cosy * cosx
distance = q ** 2 - dsun_meters ** 2 + rsun_meters ** 2
# distance[np.where(distance < 0)] = np.sqrt(-1)
distance = q - np.sqrt(distance)
rx = distance * cosy * sinx
ry = distance * siny
rz = dsun_meters - distance * cosy * cosx
if np.all(z == True):
return rx, ry, rz
else:
return rx, ry
def convert_hcc_hpc(x, y, dsun_meters=None, angle_units='arcsec'):
"""Convert Heliocentric-Cartesian (HCC) to angular
Helioprojective-Cartesian (HPC) coordinates (in degrees).
Parameters
----------
x, y : float (meters)
Data coordinate in meters.
dsun_meters : float
Distance from the observer to the Sun in meters. Default is 1 AU.
angle_units : str
Units of the data coordinates (e.g. arcsec, arcmin, degrees). Default is arcsec.
Returns
-------
out : ndarray
The data coordinates (x,y) in helioprojective cartesian coordinates in arcsec.
Notes
-----
Implements Eq. (16) of Thompson (2006), A&A, 449, 791.
Examples
--------
>>> import sunpy.wcs
>>> sunpy.wcs.convert_hcc_hpc(28748691, 22998953)
(39.823439773829705, 31.858751644835717)
"""
# Calculate the z coordinate by assuming that it is on the surface of the Sun
z = np.sqrt(rsun_meters ** 2 - x ** 2 - y ** 2)
if dsun_meters is None:
dsun_meters = sun.constants.au.si.value
elif isinstance(dsun_meters, u.Quantity):
dsun_meters = dsun_meters.si.value
zeta = dsun_meters - z
distance = np.sqrt(x**2 + y**2 + zeta**2)
hpcx = np.rad2deg(np.arctan2(x, zeta))
hpcy = np.rad2deg(np.arcsin(y / distance))
if angle_units == 'arcsec':
hpcx = 60 * 60 * hpcx
hpcy = 60 * 60 * hpcy
elif angle_units == 'arcmin':
hpcx = 60 * hpcx
hpcy = 60 * hpcy
return hpcx, hpcy
def convert_hcc_hg(x, y, z=None, b0_deg=0, l0_deg=0, radius=False):
"""Convert from Heliocentric-Cartesian (HCC) (given in meters) to
Stonyhurst Heliographic coordinates (HG) given in degrees, with
radial output in meters.
Parameters
----------
x, y : float (meters)
Data coordinate in meters.
z : float (meters)
Data coordinate in meters. If None, then the z-coordinate is assumed
to be on the Sun.
b0_deg : float (degrees)
Tilt of the solar North rotational axis toward the observer
(heliographic latitude of the observer). Usually given as SOLAR_B0,
HGLT_OBS, or CRLT_OBS. Default is 0.
l0_deg : float (degrees)
Carrington longitude of central meridian as seen from Earth. Default is 0.
radius : Bool
If true, forces the output to return a triple of (lon, lat, r). If
false, return (lon, lat) only.
Returns
-------
out : ndarray (degrees, meters)
if radius is false, return the data coordinates (lon, lat). If
radius=True, return the data coordinates (lon, lat, r). The quantities
(lon, lat) are the heliographic coordinates in degrees. The quantity
'r' is the heliographic radius in meters.
Notes
-----
Implements Eq. (12) of Thompson (2006), A&A, 449, 791.
Examples
--------
>>> import sunpy.wcs
>>> sunpy.wcs.convert_hcc_hg(230000.0,45000000.0,
... z=695508000.0 + 8000000.0, radius=True)
(0.01873188196651189, 3.6599471896203317, 704945784.41465974)
"""
if z is None:
z = np.sqrt(rsun_meters**2 - x**2 - y**2)
cosb = np.cos(np.deg2rad(b0_deg))
sinb = np.sin(np.deg2rad(b0_deg))
hecr = np.sqrt(x**2 + y**2 + z**2)
hgln = np.arctan2(x, z * cosb - y * sinb) + np.deg2rad(l0_deg)
hglt = np.arcsin((y * cosb + z * sinb) / hecr)
if radius:
return np.rad2deg(hgln), np.rad2deg(hglt), hecr
else:
return np.rad2deg(hgln), np.rad2deg(hglt)
def convert_hg_hcc(hglon_deg, hglat_deg, b0_deg=0, l0_deg=0, occultation=False,
z=False, r=rsun_meters):
"""Convert from Stonyhurst Heliographic coordinates (given in degrees) to
Heliocentric-Cartesian coordinates (given in meters).
Parameters
----------
hglon_deg, hglat_deg : float (degrees)
Heliographic longitude and latitude in degrees.
b0_deg : float (degrees)
Tilt of the solar North rotational axis toward the observer
(heliographic latitude of the observer). Usually given as SOLAR_B0,
HGLT_OBS, or CRLT_OBS. Default is 0.
l0_deg : float (degrees)
Carrington longitude of central meridian as seen from Earth. Default is 0.
occultation : Bool
If true set all points behind the Sun (e.g. not visible) to Nan.
z : Bool
If true return the z coordinate as well.
r : float (meters)
Heliographic radius
Returns
-------
out : ndarray (meters)
The data coordinates in Heliocentric-Cartesian coordinates.
Notes
-----
Implements Eq. (11) of Thompson (2006), A&A, 449, 791, with the default
assumption that the value 'r' in Eq. (11) is identical to the radius of the
Sun.
Examples
--------
>>> import sunpy.wcs
>>> sunpy.wcs.convert_hg_hcc(0.01873188196651189, 3.6599471896203317,
... r=704945784.41465974, z=True)
(230000.0, 45000000.0, 703508000.0)
"""
lon = np.deg2rad(hglon_deg)
lat = np.deg2rad(hglat_deg)
cosb = np.cos(np.deg2rad(b0_deg))
sinb = np.sin(np.deg2rad(b0_deg))
lon = lon - np.deg2rad(l0_deg)
cosx = np.cos(lon)
sinx = np.sin(lon)
cosy = np.cos(lat)
siny = np.sin(lat)
# Perform the conversion.
x = r * cosy * sinx
y = r * (siny * cosb - cosy * cosx * sinb)
zz = r * (siny * sinb + cosy * cosx * cosb)
if occultation:
x[zz < 0] = np.nan
y[zz < 0] = np.nan
if np.all(z == True):
return x, y, zz
else:
return x, y
def convert_hg_hpc(hglon_deg, hglat_deg, b0_deg=0, l0_deg=0, dsun_meters=None, angle_units='arcsec',
occultation=False):
"""Convert from Heliographic coordinates (HG) to Helioprojective-Cartesian
(HPC).
Parameters
----------
hglon_deg, hglat_deg : float (degrees)
Heliographic longitude and latitude in degrees.
b0_deg : float (degrees)
Tilt of the solar North rotational axis toward the observer
(heliographic latitude of the observer). Usually given as SOLAR_B0,
HGLT_OBS, or CRLT_OBS. Default is 0.
l0_deg : float (degrees)
Carrington longitude of central meridian as seen from Earth. Default is 0.
occultation : Bool
If true set all points behind the Sun (e.g. not visible) to Nan.
dsun_meters : float (meters)
Distance between the observer and the Sun.
angle_units : str
Returns
-------
out : ndarray (arcsec)
The data coordinates (x,y) in Helioprojective-Cartesian coordinates.
Notes
-----
Uses equations 11 and 16 in Thompson (2006), A&A, 449, 791-803.
Examples
--------
>>> import sunpy.wcs
>>> sunpy.wcs.convert_hg_hpc(34.0, 45.0, b0_deg=-7.064078, l0_deg=0.0)
(380.05656560308898, 743.78281283290016)
"""
tempx, tempy = convert_hg_hcc(hglon_deg, hglat_deg, b0_deg=b0_deg, l0_deg=l0_deg, occultation=occultation)
x, y = convert_hcc_hpc(tempx, tempy, dsun_meters=dsun_meters, angle_units=angle_units)
return x, y
def convert_hpc_hg(x, y, b0_deg=0, l0_deg=0, dsun_meters=None, angle_units='arcsec'):
"""Convert from Helioprojective-Cartesian (HPC) to Heliographic coordinates
(HG) in degrees.
Parameters
----------
x, y : float ()
Data coordinate in angle units.
b0 : float (degrees)
Tilt of the solar North rotational axis toward the observer
(heliographic latitude of the observer). Usually given as SOLAR_B0,
HGLT_OBS, or CRLT_OBS. Default is 0.
l0 : float (degrees)
Carrington longitude of central meridian as seen from Earth. Default is 0.
dsun_meters : float (meters)
Distance between the observer and the Sun.
angle_units : str
Units used for input x and y. Default is arcsec.
Returns
-------
out : ndarray (degrees)
The data coordinates (hglongitude, hglatitude) in Heliographic coordinates.
Notes
-----
Uses equations 15 and 12 in Thompson (2006), A&A, 449, 791-803.
Examples
--------
>>> import sunpy.wcs
>>> sunpy.wcs.convert_hpc_hg(382, 748, b0_deg=-7.064078, l0_deg=0.0)
(34.504653439914669, 45.443143275518182)
"""
tempx, tempy = convert_hpc_hcc(x, y, dsun_meters=dsun_meters, angle_units=angle_units)
lon, lat = convert_hcc_hg(tempx, tempy, b0_deg=b0_deg, l0_deg=l0_deg)
return lon, lat
def proj_tan(x, y, force=False):
"""Applies the gnomonic (TAN) projection to intermediate relative
coordinates. This function is not currently implemented!"""
# if pixels are within 3 degrees of the Sun then skip the calculation unless
# force is True. This applies to all sdo images so this function is just
# here as a place holder for the future
# TODO: write proj_tan function
return x, y
def convert_to_coord(x, y, from_coord, to_coord, b0_deg=0, l0_deg=0, dsun_meters=None, angle_units='arcsec'):
"""Apply a coordinate transform to coordinates. Right now can only do hpc
to hcc to hg"""
if (from_coord == 'hcc') and (to_coord == 'hg'):
rx, ry = convert_hcc_hg(x, y, b0_deg=b0_deg, l0_deg=l0_deg)
elif (from_coord == 'hpc') and (to_coord == 'hg'):
rx, ry = convert_hpc_hg(x, y, b0_deg=b0_deg, l0_deg=l0_deg, dsun_meters=dsun_meters, angle_units=angle_units)
elif (from_coord == 'hg') and (to_coord == 'hcc'):
rx, ry = convert_hg_hcc(x, y, b0_deg=b0_deg, l0_deg=l0_deg)
elif (from_coord == 'hcc') and (to_coord == 'hpc'):
rx, ry = convert_hcc_hpc(x, y, dsun_meters=dsun_meters, angle_units=angle_units)
elif (from_coord == 'hg') and (to_coord == 'hpc'):
rx, ry = convert_hg_hpc(x, y, b0_deg=b0_deg, l0_deg=l0_deg, dsun_meters=dsun_meters, angle_units=angle_units)
elif (from_coord == 'hpc') and (to_coord == 'hcc'):
rx, ry = convert_hpc_hcc(x, y, dsun_meters=dsun_meters, angle_units=angle_units)
return rx, ry
| 33.886128 | 117 | 0.637136 |
f74f9fdcdf2e4eef050033ba9110b8512ffecec0 | 13,880 | py | Python | django_xsede_warehouse/warehouse_views/views.py | XSEDE/XSEDE_Information_Warehouse | 8b3aab42b7afd70ce69b9bf44551a0ded4491831 | [
"Apache-2.0"
] | 1 | 2019-10-29T22:50:29.000Z | 2019-10-29T22:50:29.000Z | django_xsede_warehouse/warehouse_views/views.py | XSEDE/XSEDE_Information_Warehouse | 8b3aab42b7afd70ce69b9bf44551a0ded4491831 | [
"Apache-2.0"
] | null | null | null | django_xsede_warehouse/warehouse_views/views.py | XSEDE/XSEDE_Information_Warehouse | 8b3aab42b7afd70ce69b9bf44551a0ded4491831 | [
"Apache-2.0"
] | null | null | null | from django.db.models import Q
from django.core.serializers import serialize
from django.shortcuts import render
from django.template.loader import get_template
from django.utils.encoding import uri_to_iri
from django.urls import reverse, get_script_prefix
from rest_framework.views import APIView
from rest_framework.permissions import IsAuthenticated, IsAuthenticatedOrReadOnly, AllowAny
from rest_framework.renderers import JSONRenderer, TemplateHTMLRenderer
from rest_framework_xml.renderers import XMLRenderer
from rest_framework.response import Response
from rest_framework import status
from rdr_db.models import RDRResource
from rdr_db.filters import *
from glue2_db.models import ApplicationHandle
from xdcdb.models import *
from xdcdb.serializers import *
from rdr_db.serializers import *
from warehouse_views.serializers import Generic_Resource_Serializer, Software_Full_Serializer, Software_Community_Serializer, SGCI_Resource_Serializer_100
from xsede_warehouse.responses import MyAPIResponse
# Create your views here.
class RDR_List(APIView):
'''
### RDR resource list
Optional selection argument(s):
```
info_siteid=<siteid>
```
Optional response argument(s):
```
format={json,xml,html} (json default)
sort=<field>
```
<a href="https://docs.google.com/document/d/1kh_0JCwRr7J2LiNlkQgfjopkHV4UbxB_UpXNhgt3vzc"
target="_blank">More API documentation</a>
'''
permission_classes = (IsAuthenticatedOrReadOnly,)
def get(self, request, format=None, **kwargs):
returnformat = request.query_params.get('format', None)
active_resourceids = RDR_Active_Resources(affiliation='XSEDE', allocated=True, type='ALL', result='RESOURCEID')
if 'info_siteid' in self.kwargs:
try:
sort_by = request.GET.get('sort')
objects = RDRResource.objects.filter(rdr_type='resource').filter(info_siteid__exact=uri_to_iri(self.kwargs['info_siteid'])).order_by(sort_by)
except:
objects = RDRResource.objects.filter(rdr_type='resource').filter(info_siteid__exact=uri_to_iri(self.kwargs['info_siteid']))
else:
try:
sort_by = request.GET.get('sort')
objects = RDRResource.objects.filter(rdr_type='resource').order_by(sort_by)
except:
objects = RDRResource.objects.filter(rdr_type='resource').order_by('info_resourceid')
for o in objects:
o.Active = o.info_resourceid in active_resourceids
serializer = RDRResource_Serializer_Plus(objects, context={'request': request}, many=True)
if returnformat != 'html':
return Response(serializer.data)
else:
return render(request, 'warehouse_views/warehouse_resources.html', {'resource_list': serializer.data})
class RDR_List_Active(APIView):
'''
### RDR information about ACTIVE XSEDE resources, meaning:
Provider level is: Level 1 or Level 2
Status is: friendly, coming soon, pre-production, production, post-production
Excludes: Non-XSEDE, Provider Level 3, Status Decomissioned
Optional selection argument(s):
```
info_siteid=<siteid>
```
Optional response argument(s):
```
format={json,xml,html} (json default)
sort=<field>
```
<a href="https://docs.google.com/document/d/1kh_0JCwRr7J2LiNlkQgfjopkHV4UbxB_UpXNhgt3vzc"
target="_blank">More API documentation</a>
'''
permission_classes = (IsAuthenticatedOrReadOnly,)
def get(self, request, format=None):
returnformat = request.query_params.get('format', None)
try:
sort_by = request.GET.get('sort')
objects = RDR_Active_Resources(affiliation='XSEDE', allocated=True, type='SUB', result='OBJECTS').order_by('info_resourceid').order_by(sort_by)
except:
objects = RDR_Active_Resources(affiliation='XSEDE', allocated=True, type='SUB', result='OBJECTS').order_by('info_resourceid')
for o in objects:
o.Active = True
serializer = RDRResource_Serializer_Plus(objects, context={'request': request}, many=True)
if returnformat != 'html':
return Response(serializer.data)
else:
return render(request, 'warehouse_views/warehouse_resources.html', {'resource_list': serializer.data})
class Resource_List_XDCDB_Active(APIView):
'''
### XDCDB resource information about ACTIVE XSEDE resources, meaning:
Provider level is: Level 1 or Level 2
Status is: friendly, coming soon, pre-production, production, post-production
Excludes: Non-XSEDE, Provider Level 3, Status Decomissioned
Optional response argument(s):
```
format={json,xml,html} (json default)
```
<a href="https://docs.google.com/document/d/1kh_0JCwRr7J2LiNlkQgfjopkHV4UbxB_UpXNhgt3vzc"
target="_blank">More API documentation</a>
'''
permission_classes = (IsAuthenticatedOrReadOnly,)
def get(self, request, format=None):
active_resourceids = RDR_Active_Resources(affiliation='XSEDE', allocated=True, type='ALL', result='RESOURCEID')
sort_by = request.GET.get('sort', 'ResourceID')
objects = TGResource.objects.filter(ResourceID__in=active_resourceids).order_by(sort_by)
returnformat = request.query_params.get('format', None)
serializer = XSEDEResource_Serializer(objects, many=True)
if returnformat != 'html':
return Response(serializer.data)
else:
context = {'xdcdb_list': serializer.data}
return render(request, 'warehouse_views/xdcdb_resources.html', context)
class Resource_List_CSA_Active(APIView):
'''
### RDR Community Software Area (CSA) information about ACTIVE XSEDE resources, meaning:
Provider level is: Level 1 or Level 2
Status is: friendly, coming soon, pre-production, production, post-production
Community Software Area Support is: True
Excludes: Non-XSEDE, Provider Level 3, Status Decomissioned, no CSA Support
Optional response argument(s):
```
format={json,xml,html} (json default)
```
<a href="https://docs.google.com/document/d/1kh_0JCwRr7J2LiNlkQgfjopkHV4UbxB_UpXNhgt3vzc"
target="_blank">More API documentation</a>
'''
permission_classes = (IsAuthenticatedOrReadOnly,)
renderer_classes = (JSONRenderer,TemplateHTMLRenderer,XMLRenderer,)
def get(self, request, format=None):
csa_resources = RDR_Active_Resources(affiliation='XSEDE', allocated=False, type='SUB', result='OBJECTS')
objects = []
for res in csa_resources:
if str(res.other_attributes.get('community_software_area', '')).lower() == 'true':
objects.append(res)
serializer = RDR_CSA_Serializer(objects, many=True)
response_obj = {'results': serializer.data}
return MyAPIResponse(response_obj, template_name='warehouse_views/csa_resources.html')
class Resource_List_SGCI_Active_100(APIView):
'''
### SGCI Resource Description from RDR about ACTIVE XSEDE resources, meaning:
Provider level is: Level 1 or Level 2
Status is: friendly, coming soon, pre-production, production, post-production
Excludes: Non-XSEDE, Provider Level 3, Status Decomissioned
Optional response argument(s):
```
format={json,xml,html} (json default)
```
<a href="https://docs.google.com/document/d/1kh_0JCwRr7J2LiNlkQgfjopkHV4UbxB_UpXNhgt3vzc"
target="_blank">More API documentation</a>
'''
permission_classes = (IsAuthenticatedOrReadOnly,)
renderer_classes = (JSONRenderer,TemplateHTMLRenderer,XMLRenderer,)
def get(self, request, format=None):
objects = RDR_Active_Resources_V2(affiliation='XSEDE', allocated=True, type='SUB', result='OBJECTS')
serializer = SGCI_Resource_Serializer_100(objects, many=True)
response_obj = {'results': serializer.data}
return MyAPIResponse(response_obj, template_name='warehouse_views/sgci_resources.html')
class RDR_Detail(APIView):
'''
### RDR detailed information
Required selection argument(s):
```
resourceid=<info_resourceid>
```
Optional response argument(s):
```
format={json,xml,html} (json default)
```
<a href="https://docs.google.com/document/d/1kh_0JCwRr7J2LiNlkQgfjopkHV4UbxB_UpXNhgt3vzc"
target="_blank">More API documentation</a>
'''
permission_classes = (IsAuthenticatedOrReadOnly,)
renderer_classes = (JSONRenderer,TemplateHTMLRenderer,XMLRenderer,)
def get(self, request, format=None, **kwargs):
rdrid = request.GET.get('rdrid', kwargs.get('rdrid', None))
resourceid = request.GET.get('resourceid', kwargs.get('resourceid', None))
if not rdrid and not resourceid:
raise MyAPIException(code=status.HTTP_404_NOT_FOUND, detail='Missing RDR or Resource ID argument')
try:
if rdrid:
final_objects = [RDRResource.objects.get(pk=rdrid)]
else: # have resourceid, this may return no objects
final_objects = RDRResource.objects.filter(info_resourceid__exact=uri_to_iri(resourceid))
except RDRResource.DoesNotExist:
raise MyAPIException(code=status.HTTP_404_NOT_FOUND, detail='Specified RDR or Resource ID not found')
context = {}
serializer = Generic_Resource_Serializer(final_objects, context=context, many=True)
response_obj = {'results': serializer.data}
return MyAPIResponse(response_obj, template_name='warehouse_views/resource_details.html')
class Software_Full(APIView):
'''
### Software detailed information
Optional selection argument(s):
```
AppName=<appname>
resourceid=<info_resourceid>
ID=<id>
```
Optional response argument(s):
```
format={json,xml,html} (json default)
```
<a href="https://docs.google.com/document/d/1kh_0JCwRr7J2LiNlkQgfjopkHV4UbxB_UpXNhgt3vzc"
target="_blank">More API documentation</a>
'''
permission_classes = (IsAuthenticatedOrReadOnly,)
def get(self, request, format=None, **kwargs):
if 'id' in self.kwargs:
try:
object = ApplicationHandle.objects.get(pk=uri_to_iri(self.kwargs['id'])) # uri_to_iri translates %xx
except ApplicationHandle.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = Software_Community_Serializer(object)
elif 'resourceid' in self.kwargs:
objects = ApplicationHandle.objects.filter(ResourceID__exact=self.kwargs['resourceid'])
serializer = Software_Community_Serializer(objects, many=True)
# elif 'siteid' in self.kwargs:
# objects = ApplicationHandle.objects.filter(ResourceID__exact=self.kwargs['siteid'])
# serializer = Software_Community_Serializer(objects, many=True)
elif 'appname' in self.kwargs:
objects = ApplicationHandle.objects.filter(ApplicationEnvironment__AppName__exact=uri_to_iri(self.kwargs['appname']))
serializer = Software_Community_Serializer(objects, many=True)
else:
objects = ApplicationHandle.objects.all()
serializer = Software_Community_Serializer(objects, many=True)
return Response(serializer.data)
class Software_XUP_v1_List(APIView):
'''
### XUP Software Detail of XSEDE SP Supported Software
Optional response argument(s):
```
format={json,xml} (json default)
```
<a href="https://docs.google.com/document/d/1kh_0JCwRr7J2LiNlkQgfjopkHV4UbxB_UpXNhgt3vzc"
target="_blank">More API documentation</a>
'''
permission_classes = (IsAuthenticatedOrReadOnly,)
def get(self, request, format=None):
active_resourceids = RDR_Active_Resources(affiliation='XSEDE', allocated=True, type='SUB', result='RESOURCEID')
xsede_contact = 'https://info.xsede.org/wh1/xcsr-db/v1/supportcontacts/globalid/helpdesk.xsede.org/'
objects = ApplicationHandle.objects.filter(ResourceID__in=active_resourceids).filter(ApplicationEnvironment__EntityJSON__Extension__SupportContact__exact=xsede_contact)
serializer = Software_Full_Serializer(objects, many=True)
return Response(serializer.data)
class Community_Software_XUP_v1_List(APIView):
'''
### XUP Software Detail of Community Software Area (CSA) software
Optional response argument(s):
```
format={json,xml} (json default)
```
<a href="https://docs.google.com/document/d/1kh_0JCwRr7J2LiNlkQgfjopkHV4UbxB_UpXNhgt3vzc"
target="_blank">More API documentation</a>
'''
permission_classes = (IsAuthenticatedOrReadOnly,)
def get(self, request, format=None):
xsede_contact = 'https://info.xsede.org/wh1/xcsr-db/v1/supportcontacts/globalid/helpdesk.xsede.org/'
objects = ApplicationHandle.objects.exclude(ApplicationEnvironment__EntityJSON__Extension__SupportContact__exact=xsede_contact)
serializer = Software_Community_Serializer(objects, many=True)
return Response(serializer.data)
| 47.862069 | 176 | 0.666643 |
f74fa0f3a2763d5eaa33dddd189e55e34c5fb13b | 339 | py | Python | src/supervised_model.py | gdex1/irl-maxent | 86cfdaab6111cb65a866d384fbf811d812ee434d | [
"MIT"
] | null | null | null | src/supervised_model.py | gdex1/irl-maxent | 86cfdaab6111cb65a866d384fbf811d812ee434d | [
"MIT"
] | null | null | null | src/supervised_model.py | gdex1/irl-maxent | 86cfdaab6111cb65a866d384fbf811d812ee434d | [
"MIT"
] | null | null | null | class SupervisedModel:
def train(self, x, y):
raise NotImplementedError
def predict(self, x):
raise NotImplementedError
def predict_classes(self, x):
raise NotImplementedError
def save(self, path):
raise NotImplementedError
def load(self, path):
raise NotImplementedError | 22.6 | 33 | 0.654867 |
f74fb5ebce9c57818173ac4fea01bd68112477e2 | 1,955 | py | Python | tests/check_tests.py | snare/idiot | 840a8e0de30724bca8298ae1318906d3f273f3d9 | [
"MIT"
] | 145 | 2016-03-16T10:37:06.000Z | 2021-08-02T16:51:01.000Z | tests/check_tests.py | snare/idiot | 840a8e0de30724bca8298ae1318906d3f273f3d9 | [
"MIT"
] | 5 | 2016-03-27T11:27:51.000Z | 2018-07-03T08:00:47.000Z | tests/check_tests.py | snare/idiot | 840a8e0de30724bca8298ae1318906d3f273f3d9 | [
"MIT"
] | 16 | 2016-03-17T06:08:38.000Z | 2020-12-18T18:21:08.000Z | import nose
import idiot
import datetime
import time
def setup():
idiot.init()
def teardown():
pass
def test_snooze_intervals():
p = idiot.CheckPlugin()
assert p.snooze_intervals == idiot.config.snooze_intervals
class TestPlugin(idiot.CheckPlugin):
snooze_intervals = [1, 2, 3, 4]
p = TestPlugin()
assert p.snooze_intervals == [1, 2, 3, 4]
def test_snooze():
p = idiot.CheckPlugin()
assert p.snoozing is False
p.snooze_until = datetime.datetime.now() + datetime.timedelta(seconds=3600)
assert p.snoozing is True
class TestPlugin(idiot.CheckPlugin):
snooze_intervals = [3600, 6 * 3600, 'forever']
p = TestPlugin()
assert p.snooze_index == -1
assert p.snooze_until is None
assert not p.snoozing
p.snooze()
assert p.snooze_index == 0
assert p.snooze_until > datetime.datetime.now() + datetime.timedelta(seconds=3500)
assert p.snooze_until < datetime.datetime.now() + datetime.timedelta(seconds=3700)
assert p.snoozing
p.snooze()
assert p.snooze_index == 1
assert p.snooze_until > datetime.datetime.now() + datetime.timedelta(seconds=6 * 3600 - 100)
assert p.snooze_until < datetime.datetime.now() + datetime.timedelta(seconds=6 * 3600 + 100)
assert p.snoozing
p.snooze()
assert p.snooze_index == 2
assert p.snooze_until is True
assert p.snoozing
p.snooze()
assert p.snooze_index == 2
assert p.snooze_until is True
assert p.snoozing
def test_run_checks():
class FailCheck(idiot.CheckPlugin):
name = "Fail"
def run(self):
return (False, "Snooze this notification or the test will fail")
cm = idiot.CheckManager()
c = FailCheck()
assert not c.snoozing
cm.checks = [c]
cm.run_checks()
# clicking notifications isn't working from the test for some reason, works fine in the app. will fix later.
# time.sleep(5)
# assert c.snoozing
| 26.418919 | 112 | 0.6711 |
f74fe68e2cf07bb416f98ce860a10fc269f21232 | 336 | py | Python | test/mitmproxy/test_master.py | fedosgad/mitmproxy | 7eacc41f3b1079e000cf6b6c19c0f337d6e01177 | [
"MIT"
] | null | null | null | test/mitmproxy/test_master.py | fedosgad/mitmproxy | 7eacc41f3b1079e000cf6b6c19c0f337d6e01177 | [
"MIT"
] | null | null | null | test/mitmproxy/test_master.py | fedosgad/mitmproxy | 7eacc41f3b1079e000cf6b6c19c0f337d6e01177 | [
"MIT"
] | null | null | null | import asyncio
from mitmproxy.test.taddons import RecordingMaster
async def err():
raise RuntimeError
async def test_exception_handler():
m = RecordingMaster(None)
running = asyncio.create_task(m.run())
asyncio.create_task(err())
await m.await_log("Traceback", level="error")
m.shutdown()
await running
| 19.764706 | 50 | 0.717262 |
f74ff99a051b6ceeea8d0cba5506b01714e04e98 | 1,329 | py | Python | website/addons/s3/settings/defaults.py | dplorimer/osf | 9f3f400f62dc7bde18532949ed35bf1d3f6ec3d6 | [
"Apache-2.0"
] | null | null | null | website/addons/s3/settings/defaults.py | dplorimer/osf | 9f3f400f62dc7bde18532949ed35bf1d3f6ec3d6 | [
"Apache-2.0"
] | 13 | 2020-03-24T15:29:41.000Z | 2022-03-11T23:15:28.000Z | website/addons/s3/settings/defaults.py | dplorimer/osf | 9f3f400f62dc7bde18532949ed35bf1d3f6ec3d6 | [
"Apache-2.0"
] | null | null | null | import json
import os
from website.settings import parent_dir
HERE = os.path.dirname(os.path.abspath(__file__))
STATIC_PATH = os.path.join(parent_dir(HERE), 'static')
MAX_RENDER_SIZE = (1024 ** 2) * 3
ALLOWED_ORIGIN = '*'
BUCKET_LOCATIONS = {}
ENCRYPT_UPLOADS_DEFAULT = True
# Load S3 settings used in both front and back end
with open(os.path.join(STATIC_PATH, 'settings.json')) as fp:
settings = json.load(fp)
BUCKET_LOCATIONS = settings.get('bucketLocations', {})
ENCRYPT_UPLOADS_DEFAULT = settings.get('encryptUploads', True)
OSF_USER = 'osf-user{0}'
OSF_USER_POLICY_NAME = 'osf-user-policy'
OSF_USER_POLICY = json.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "Stmt1392138408000",
"Effect": "Allow",
"Action": [
"s3:*"
],
"Resource": [
"*"
]
},
{
"Sid": "Stmt1392138440000",
"Effect": "Allow",
"Action": [
"iam:DeleteAccessKey",
"iam:DeleteUser",
"iam:DeleteUserPolicy"
],
"Resource": [
"*"
]
}
]
}
)
| 25.075472 | 66 | 0.485327 |
f7500e80768f8c0750263afa5156f9e8cf6f48ec | 22,399 | py | Python | Source/ThirdParty/libwebrtc/Source/PRESUBMIT.py | jacadcaps/webkitty | 9aebd2081349f9a7b5d168673c6f676a1450a66d | [
"BSD-2-Clause"
] | 6 | 2021-07-05T16:09:39.000Z | 2022-03-06T22:44:42.000Z | Source/ThirdParty/libwebrtc/Source/PRESUBMIT.py | jacadcaps/webkitty | 9aebd2081349f9a7b5d168673c6f676a1450a66d | [
"BSD-2-Clause"
] | 7 | 2022-03-15T13:25:39.000Z | 2022-03-15T13:25:44.000Z | Source/ThirdParty/libwebrtc/Source/PRESUBMIT.py | jacadcaps/webkitty | 9aebd2081349f9a7b5d168673c6f676a1450a66d | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import json
import os
import re
import subprocess
import sys
# Directories that will be scanned by cpplint by the presubmit script.
CPPLINT_DIRS = [
'webrtc/api',
'webrtc/audio',
'webrtc/call',
'webrtc/common_video',
'webrtc/examples',
'webrtc/modules/audio_mixer',
'webrtc/modules/bitrate_controller',
'webrtc/modules/congestion_controller',
'webrtc/modules/pacing',
'webrtc/modules/remote_bitrate_estimator',
'webrtc/modules/rtp_rtcp',
'webrtc/modules/video_coding',
'webrtc/modules/video_processing',
'webrtc/tools',
'webrtc/video',
]
# These filters will always be removed, even if the caller specifies a filter
# set, as they are problematic or broken in some way.
#
# Justifications for each filter:
# - build/c++11 : Rvalue ref checks are unreliable (false positives),
# include file and feature blacklists are
# google3-specific.
# - whitespace/operators: Same as above (doesn't seem sufficient to eliminate
# all move-related errors).
BLACKLIST_LINT_FILTERS = [
'-build/c++11',
'-whitespace/operators',
]
# List of directories of "supported" native APIs. That means changes to headers
# will be done in a compatible way following this scheme:
# 1. Non-breaking changes are made.
# 2. The old APIs as marked as deprecated (with comments).
# 3. Deprecation is announced to discuss-webrtc@googlegroups.com and
# webrtc-users@google.com (internal list).
# 4. (later) The deprecated APIs are removed.
NATIVE_API_DIRS = (
'webrtc',
'webrtc/api',
'webrtc/media',
'webrtc/modules/audio_device/include',
'webrtc/pc',
)
# These directories should not be used but are maintained only to avoid breaking
# some legacy downstream code.
LEGACY_API_DIRS = (
'webrtc/base',
'webrtc/common_audio/include',
'webrtc/modules/audio_coding/include',
'webrtc/modules/audio_conference_mixer/include',
'webrtc/modules/audio_processing/include',
'webrtc/modules/bitrate_controller/include',
'webrtc/modules/congestion_controller/include',
'webrtc/modules/include',
'webrtc/modules/remote_bitrate_estimator/include',
'webrtc/modules/rtp_rtcp/include',
'webrtc/modules/rtp_rtcp/source',
'webrtc/modules/utility/include',
'webrtc/modules/video_coding/codecs/h264/include',
'webrtc/modules/video_coding/codecs/i420/include',
'webrtc/modules/video_coding/codecs/vp8/include',
'webrtc/modules/video_coding/codecs/vp9/include',
'webrtc/modules/video_coding/include',
'webrtc/system_wrappers/include',
'webrtc/voice_engine/include',
)
API_DIRS = NATIVE_API_DIRS[:] + LEGACY_API_DIRS[:]
def _RunCommand(command, cwd):
"""Runs a command and returns the output from that command."""
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=cwd)
stdout = p.stdout.read()
stderr = p.stderr.read()
p.wait()
p.stdout.close()
p.stderr.close()
return p.returncode, stdout, stderr
def _VerifyNativeApiHeadersListIsValid(input_api, output_api):
"""Ensures the list of native API header directories is up to date."""
non_existing_paths = []
native_api_full_paths = [
input_api.os_path.join(input_api.PresubmitLocalPath(),
*path.split('/')) for path in API_DIRS]
for path in native_api_full_paths:
if not os.path.isdir(path):
non_existing_paths.append(path)
if non_existing_paths:
return [output_api.PresubmitError(
'Directories to native API headers have changed which has made the '
'list in PRESUBMIT.py outdated.\nPlease update it to the current '
'location of our native APIs.',
non_existing_paths)]
return []
api_change_msg = """
You seem to be changing native API header files. Please make sure that you:
1. Make compatible changes that don't break existing clients. Usually
this is done by keeping the existing method signatures unchanged.
2. Mark the old stuff as deprecated (see RTC_DEPRECATED macro).
3. Create a timeline and plan for when the deprecated stuff will be
removed. (The amount of time we give users to change their code
should be informed by how much work it is for them. If they just
need to replace one name with another or something equally
simple, 1-2 weeks might be good; if they need to do serious work,
up to 3 months may be called for.)
4. Update/inform existing downstream code owners to stop using the
deprecated stuff. (Send announcements to
discuss-webrtc@googlegroups.com and webrtc-users@google.com.)
5. Remove the deprecated stuff, once the agreed-upon amount of time
has passed.
Related files:
"""
def _CheckNativeApiHeaderChanges(input_api, output_api):
"""Checks to remind proper changing of native APIs."""
files = []
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if f.LocalPath().endswith('.h'):
for path in API_DIRS:
if os.path.dirname(f.LocalPath()) == path:
files.append(f)
if files:
return [output_api.PresubmitNotifyResult(api_change_msg, files)]
return []
def _CheckNoIOStreamInHeaders(input_api, output_api):
"""Checks to make sure no .h files include <iostream>."""
files = []
pattern = input_api.re.compile(r'^#include\s*<iostream>',
input_api.re.MULTILINE)
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if not f.LocalPath().endswith('.h'):
continue
contents = input_api.ReadFile(f)
if pattern.search(contents):
files.append(f)
if len(files):
return [output_api.PresubmitError(
'Do not #include <iostream> in header files, since it inserts static ' +
'initialization into every file including the header. Instead, ' +
'#include <ostream>. See http://crbug.com/94794',
files)]
return []
def _CheckNoPragmaOnce(input_api, output_api):
"""Make sure that banned functions are not used."""
files = []
pattern = input_api.re.compile(r'^#pragma\s+once',
input_api.re.MULTILINE)
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if not f.LocalPath().endswith('.h'):
continue
contents = input_api.ReadFile(f)
if pattern.search(contents):
files.append(f)
if files:
return [output_api.PresubmitError(
'Do not use #pragma once in header files.\n'
'See http://www.chromium.org/developers/coding-style#TOC-File-headers',
files)]
return []
def _CheckNoFRIEND_TEST(input_api, output_api):
"""Make sure that gtest's FRIEND_TEST() macro is not used, the
FRIEND_TEST_ALL_PREFIXES() macro from testsupport/gtest_prod_util.h should be
used instead since that allows for FLAKY_, FAILS_ and DISABLED_ prefixes."""
problems = []
file_filter = lambda f: f.LocalPath().endswith(('.cc', '.h'))
for f in input_api.AffectedFiles(file_filter=file_filter):
for line_num, line in f.ChangedContents():
if 'FRIEND_TEST(' in line:
problems.append(' %s:%d' % (f.LocalPath(), line_num))
if not problems:
return []
return [output_api.PresubmitPromptWarning('WebRTC\'s code should not use '
'gtest\'s FRIEND_TEST() macro. Include testsupport/gtest_prod_util.h and '
'use FRIEND_TEST_ALL_PREFIXES() instead.\n' + '\n'.join(problems))]
def _IsLintWhitelisted(whitelist_dirs, file_path):
""" Checks if a file is whitelisted for lint check."""
for path in whitelist_dirs:
if os.path.dirname(file_path).startswith(path):
return True
return False
def _CheckApprovedFilesLintClean(input_api, output_api,
source_file_filter=None):
"""Checks that all new or whitelisted .cc and .h files pass cpplint.py.
This check is based on _CheckChangeLintsClean in
depot_tools/presubmit_canned_checks.py but has less filters and only checks
added files."""
result = []
# Initialize cpplint.
import cpplint
# Access to a protected member _XX of a client class
# pylint: disable=W0212
cpplint._cpplint_state.ResetErrorCounts()
lint_filters = cpplint._Filters()
lint_filters.extend(BLACKLIST_LINT_FILTERS)
cpplint._SetFilters(','.join(lint_filters))
# Create a platform independent whitelist for the CPPLINT_DIRS.
whitelist_dirs = [input_api.os_path.join(*path.split('/'))
for path in CPPLINT_DIRS]
# Use the strictest verbosity level for cpplint.py (level 1) which is the
# default when running cpplint.py from command line.
# To make it possible to work with not-yet-converted code, we're only applying
# it to new (or moved/renamed) files and files listed in LINT_FOLDERS.
verbosity_level = 1
files = []
for f in input_api.AffectedSourceFiles(source_file_filter):
# Note that moved/renamed files also count as added.
if f.Action() == 'A' or _IsLintWhitelisted(whitelist_dirs, f.LocalPath()):
files.append(f.AbsoluteLocalPath())
for file_name in files:
cpplint.ProcessFile(file_name, verbosity_level)
if cpplint._cpplint_state.error_count > 0:
if input_api.is_committing:
# TODO(kjellander): Change back to PresubmitError below when we're
# confident with the lint settings.
res_type = output_api.PresubmitPromptWarning
else:
res_type = output_api.PresubmitPromptWarning
result = [res_type('Changelist failed cpplint.py check.')]
return result
def _CheckNoSourcesAbove(input_api, gn_files, output_api):
# Disallow referencing source files with paths above the GN file location.
source_pattern = input_api.re.compile(r' +sources \+?= \[(.*?)\]',
re.MULTILINE | re.DOTALL)
file_pattern = input_api.re.compile(r'"((\.\./.*?)|(//.*?))"')
violating_gn_files = set()
violating_source_entries = []
for gn_file in gn_files:
contents = input_api.ReadFile(gn_file)
for source_block_match in source_pattern.finditer(contents):
# Find all source list entries starting with ../ in the source block
# (exclude overrides entries).
for file_list_match in file_pattern.finditer(source_block_match.group(1)):
source_file = file_list_match.group(1)
if 'overrides/' not in source_file:
violating_source_entries.append(source_file)
violating_gn_files.add(gn_file)
if violating_gn_files:
return [output_api.PresubmitError(
'Referencing source files above the directory of the GN file is not '
'allowed. Please introduce new GN targets in the proper location '
'instead.\n'
'Invalid source entries:\n'
'%s\n'
'Violating GN files:' % '\n'.join(violating_source_entries),
items=violating_gn_files)]
return []
def _CheckNoMixingCAndCCSources(input_api, gn_files, output_api):
# Disallow mixing .c and .cc source files in the same target.
source_pattern = input_api.re.compile(r' +sources \+?= \[(.*?)\]',
re.MULTILINE | re.DOTALL)
file_pattern = input_api.re.compile(r'"(.*)"')
violating_gn_files = dict()
for gn_file in gn_files:
contents = input_api.ReadFile(gn_file)
for source_block_match in source_pattern.finditer(contents):
c_files = []
cc_files = []
for file_list_match in file_pattern.finditer(source_block_match.group(1)):
source_file = file_list_match.group(1)
if source_file.endswith('.c'):
c_files.append(source_file)
if source_file.endswith('.cc'):
cc_files.append(source_file)
if c_files and cc_files:
violating_gn_files[gn_file.LocalPath()] = sorted(c_files + cc_files)
if violating_gn_files:
return [output_api.PresubmitError(
'GN targets cannot mix .cc and .c source files. Please create a '
'separate target for each collection of sources.\n'
'Mixed sources: \n'
'%s\n'
'Violating GN files:' % json.dumps(violating_gn_files, indent=2),
items=violating_gn_files.keys())]
return []
def _CheckNoPackageBoundaryViolations(input_api, gn_files, output_api):
cwd = input_api.PresubmitLocalPath()
script_path = os.path.join('tools-webrtc', 'check_package_boundaries.py')
webrtc_path = os.path.join('webrtc')
command = [sys.executable, script_path, webrtc_path]
command += [gn_file.LocalPath() for gn_file in gn_files]
returncode, _, stderr = _RunCommand(command, cwd)
if returncode:
return [output_api.PresubmitError(
'There are package boundary violations in the following GN files:\n\n'
'%s' % stderr)]
return []
def _CheckGnChanges(input_api, output_api):
source_file_filter = lambda x: input_api.FilterSourceFile(
x, white_list=(r'.+\.(gn|gni)$',))
gn_files = []
for f in input_api.AffectedSourceFiles(source_file_filter):
if f.LocalPath().startswith('webrtc'):
gn_files.append(f)
result = []
if gn_files:
result.extend(_CheckNoSourcesAbove(input_api, gn_files, output_api))
result.extend(_CheckNoMixingCAndCCSources(input_api, gn_files, output_api))
result.extend(_CheckNoPackageBoundaryViolations(
input_api, gn_files, output_api))
return result
def _CheckUnwantedDependencies(input_api, output_api):
"""Runs checkdeps on #include statements added in this
change. Breaking - rules is an error, breaking ! rules is a
warning.
"""
# Copied from Chromium's src/PRESUBMIT.py.
# We need to wait until we have an input_api object and use this
# roundabout construct to import checkdeps because this file is
# eval-ed and thus doesn't have __file__.
original_sys_path = sys.path
try:
checkdeps_path = input_api.os_path.join(input_api.PresubmitLocalPath(),
'buildtools', 'checkdeps')
if not os.path.exists(checkdeps_path):
return [output_api.PresubmitError(
'Cannot find checkdeps at %s\nHave you run "gclient sync" to '
'download Chromium and setup the symlinks?' % checkdeps_path)]
sys.path.append(checkdeps_path)
import checkdeps
from cpp_checker import CppChecker
from rules import Rule
finally:
# Restore sys.path to what it was before.
sys.path = original_sys_path
added_includes = []
for f in input_api.AffectedFiles():
if not CppChecker.IsCppFile(f.LocalPath()):
continue
changed_lines = [line for _, line in f.ChangedContents()]
added_includes.append([f.LocalPath(), changed_lines])
deps_checker = checkdeps.DepsChecker(input_api.PresubmitLocalPath())
error_descriptions = []
warning_descriptions = []
for path, rule_type, rule_description in deps_checker.CheckAddedCppIncludes(
added_includes):
description_with_path = '%s\n %s' % (path, rule_description)
if rule_type == Rule.DISALLOW:
error_descriptions.append(description_with_path)
else:
warning_descriptions.append(description_with_path)
results = []
if error_descriptions:
results.append(output_api.PresubmitError(
'You added one or more #includes that violate checkdeps rules.',
error_descriptions))
if warning_descriptions:
results.append(output_api.PresubmitPromptOrNotify(
'You added one or more #includes of files that are temporarily\n'
'allowed but being removed. Can you avoid introducing the\n'
'#include? See relevant DEPS file(s) for details and contacts.',
warning_descriptions))
return results
def _CheckChangeHasBugField(input_api, output_api):
"""Requires that the changelist have a BUG= field.
This check is stricter than the one in depot_tools/presubmit_canned_checks.py
since it fails the presubmit if the BUG= field is missing or doesn't contain
a bug reference.
"""
if input_api.change.BUG:
return []
else:
return [output_api.PresubmitError(
'The BUG=[bug number] field is mandatory. Please create a bug and '
'reference it using either of:\n'
' * https://bugs.webrtc.org - reference it using BUG=webrtc:XXXX\n'
' * https://crbug.com - reference it using BUG=chromium:XXXXXX')]
def _CheckJSONParseErrors(input_api, output_api):
"""Check that JSON files do not contain syntax errors."""
def FilterFile(affected_file):
return input_api.os_path.splitext(affected_file.LocalPath())[1] == '.json'
def GetJSONParseError(input_api, filename):
try:
contents = input_api.ReadFile(filename)
input_api.json.loads(contents)
except ValueError as e:
return e
return None
results = []
for affected_file in input_api.AffectedFiles(
file_filter=FilterFile, include_deletes=False):
parse_error = GetJSONParseError(input_api,
affected_file.AbsoluteLocalPath())
if parse_error:
results.append(output_api.PresubmitError('%s could not be parsed: %s' %
(affected_file.LocalPath(), parse_error)))
return results
def _RunPythonTests(input_api, output_api):
def join(*args):
return input_api.os_path.join(input_api.PresubmitLocalPath(), *args)
test_directories = [
join('webrtc', 'tools', 'py_event_log_analyzer')
] + [
root for root, _, files in os.walk(join('tools-webrtc'))
if any(f.endswith('_test.py') for f in files)
]
tests = []
for directory in test_directories:
tests.extend(
input_api.canned_checks.GetUnitTestsInDirectory(
input_api,
output_api,
directory,
whitelist=[r'.+_test\.py$']))
return input_api.RunTests(tests, parallel=True)
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
# Filter out files that are in objc or ios dirs from being cpplint-ed since
# they do not follow C++ lint rules.
black_list = input_api.DEFAULT_BLACK_LIST + (
r".*\bobjc[\\\/].*",
r".*objc\.[hcm]+$",
r"webrtc\/build\/ios\/SDK\/.*",
)
source_file_filter = lambda x: input_api.FilterSourceFile(x, None, black_list)
results.extend(_CheckApprovedFilesLintClean(
input_api, output_api, source_file_filter))
results.extend(input_api.canned_checks.RunPylint(input_api, output_api,
black_list=(r'^base[\\\/].*\.py$',
r'^build[\\\/].*\.py$',
r'^buildtools[\\\/].*\.py$',
r'^ios[\\\/].*\.py$',
r'^out.*[\\\/].*\.py$',
r'^testing[\\\/].*\.py$',
r'^third_party[\\\/].*\.py$',
r'^tools[\\\/].*\.py$',
# TODO(phoglund): should arguably be checked.
r'^tools-webrtc[\\\/]mb[\\\/].*\.py$',
r'^tools-webrtc[\\\/]valgrind[\\\/].*\.py$',
r'^xcodebuild.*[\\\/].*\.py$',),
disabled_warnings=['F0401', # Failed to import x
'E0611', # No package y in x
'W0232', # Class has no __init__ method
],
pylintrc='pylintrc'))
# TODO(nisse): talk/ is no more, so make below checks simpler?
# WebRTC can't use the presubmit_canned_checks.PanProjectChecks function since
# we need to have different license checks in talk/ and webrtc/ directories.
# Instead, hand-picked checks are included below.
# .m and .mm files are ObjC files. For simplicity we will consider .h files in
# ObjC subdirectories ObjC headers.
objc_filter_list = (r'.+\.m$', r'.+\.mm$', r'.+objc\/.+\.h$')
# Skip long-lines check for DEPS and GN files.
build_file_filter_list = (r'.+\.gn$', r'.+\.gni$', 'DEPS')
eighty_char_sources = lambda x: input_api.FilterSourceFile(x,
black_list=build_file_filter_list + objc_filter_list)
hundred_char_sources = lambda x: input_api.FilterSourceFile(x,
white_list=objc_filter_list)
results.extend(input_api.canned_checks.CheckLongLines(
input_api, output_api, maxlen=80, source_file_filter=eighty_char_sources))
results.extend(input_api.canned_checks.CheckLongLines(
input_api, output_api, maxlen=100,
source_file_filter=hundred_char_sources))
results.extend(input_api.canned_checks.CheckChangeHasNoTabs(
input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasNoStrayWhitespace(
input_api, output_api))
results.extend(input_api.canned_checks.CheckAuthorizedAuthor(
input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeTodoHasOwner(
input_api, output_api))
results.extend(_CheckNativeApiHeaderChanges(input_api, output_api))
results.extend(_CheckNoIOStreamInHeaders(input_api, output_api))
results.extend(_CheckNoPragmaOnce(input_api, output_api))
results.extend(_CheckNoFRIEND_TEST(input_api, output_api))
results.extend(_CheckGnChanges(input_api, output_api))
results.extend(_CheckUnwantedDependencies(input_api, output_api))
results.extend(_CheckJSONParseErrors(input_api, output_api))
results.extend(_RunPythonTests(input_api, output_api))
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(
input_api.canned_checks.CheckGNFormatted(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(_VerifyNativeApiHeadersListIsValid(input_api, output_api))
results.extend(input_api.canned_checks.CheckOwners(input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeWasUploaded(
input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
results.extend(_CheckChangeHasBugField(input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasTestField(
input_api, output_api))
results.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
json_url='http://webrtc-status.appspot.com/current?format=json'))
return results
| 39.296491 | 80 | 0.700344 |
f7501f9ba404e8efcee17bc15a9901ed269dcbb4 | 1,038 | py | Python | passpie/utils.py | bcfurtado/passpie | 421c40a57ad5f55e3f14b323c929a2c41dfb5527 | [
"MIT"
] | 700 | 2015-05-02T20:58:37.000Z | 2022-01-20T18:49:17.000Z | passpie/utils.py | bcfurtado/passpie | 421c40a57ad5f55e3f14b323c929a2c41dfb5527 | [
"MIT"
] | 108 | 2015-05-02T02:56:39.000Z | 2021-07-01T21:46:10.000Z | passpie/utils.py | bcfurtado/passpie | 421c40a57ad5f55e3f14b323c929a2c41dfb5527 | [
"MIT"
] | 72 | 2015-05-03T02:50:24.000Z | 2021-06-09T03:38:51.000Z | from contextlib import contextmanager
import errno
import os
import re
from random import SystemRandom
import tempfile
from rstr import Rstr
from ._compat import which
rstr = Rstr(SystemRandom())
import_module = __import__
def genpass(pattern=r'[\w]{32}'):
"""generates a password with random chararcters
"""
try:
return rstr.xeger(pattern)
except re.error as e:
raise ValueError(str(e))
@contextmanager
def mkdir_open(path, mode="r"):
try:
dir_path = os.path.dirname(path)
os.makedirs(dir_path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(dir_path):
pass
else:
raise
with open(path, mode) as fd:
yield fd
def ensure_dependencies():
try:
assert which('gpg') or which('gpg2')
except AssertionError:
raise RuntimeError('GnuPG not installed. https://www.gnupg.org/')
def tempdir():
return tempfile.mkdtemp()
def touch(path):
with open(path, "w"):
pass
| 18.872727 | 73 | 0.644509 |
f75039b9ae96b9a954037d553ff74ae27cd78a36 | 2,055 | py | Python | beers/migrations/0023_auto_20190408_2248.py | danroberts728/hsvdotbeer | 5b977bf4a7aab149ad56564b3adbb09424500308 | [
"Apache-2.0"
] | 18 | 2018-12-06T01:46:37.000Z | 2021-10-17T10:37:17.000Z | beers/migrations/0023_auto_20190408_2248.py | danroberts728/hsvdotbeer | 5b977bf4a7aab149ad56564b3adbb09424500308 | [
"Apache-2.0"
] | 194 | 2018-11-04T12:50:49.000Z | 2022-01-06T22:43:43.000Z | beers/migrations/0023_auto_20190408_2248.py | danroberts728/hsvdotbeer | 5b977bf4a7aab149ad56564b3adbb09424500308 | [
"Apache-2.0"
] | 7 | 2019-03-18T05:36:06.000Z | 2020-12-25T03:27:29.000Z | # Generated by Django 2.2 on 2019-04-08 22:48
import django.contrib.postgres.fields.citext
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("beers", "0022_merge_common_endings"),
]
operations = [
migrations.CreateModel(
name="Style",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"name",
django.contrib.postgres.fields.citext.CITextField(unique=True),
),
],
),
migrations.CreateModel(
name="StyleAlternateName",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"name",
django.contrib.postgres.fields.citext.CITextField(unique=True),
),
(
"style",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="alternate_names",
to="beers.Style",
),
),
],
),
migrations.AddField(
model_name="beer",
name="new_style",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
related_name="beers",
to="beers.Style",
),
),
]
| 28.943662 | 83 | 0.400487 |
f7504ce3c2129d452007d4609794425ab78791e6 | 2,067 | py | Python | imagenet_training/models/simple_cnn.py | daniilgaltsev/ImageNet-Training | 9ca1d26cde07782398c7f366d5bf510c9e988236 | [
"MIT"
] | null | null | null | imagenet_training/models/simple_cnn.py | daniilgaltsev/ImageNet-Training | 9ca1d26cde07782398c7f366d5bf510c9e988236 | [
"MIT"
] | null | null | null | imagenet_training/models/simple_cnn.py | daniilgaltsev/ImageNet-Training | 9ca1d26cde07782398c7f366d5bf510c9e988236 | [
"MIT"
] | null | null | null | """A simple cnn model."""
import argparse
from collections import OrderedDict
from typing import Any, Dict, Optional
import torch
import torch.nn as nn
class SimpleCNN(nn.Module):
"""A simple CNN model.
Args:
data_config: a dictionary containing information about data.
args (optional): args from argparser.
"""
def __init__(
self,
data_config: Dict[str, Any],
args: Optional[argparse.Namespace] = None,
):
super().__init__()
if args is None:
self.args = {}
else:
self.args = vars(args)
num_classes = len(data_config["mapping"])
self.cnn = nn.Sequential(OrderedDict([
("conv1", nn.Conv2d(3, 32, kernel_size=3, padding=1, bias=False)),
("relu1", nn.ReLU(inplace=True)),
("bn1", nn.BatchNorm2d(32)),
("maxpool1", nn.MaxPool2d(kernel_size=2, stride=2)),
("conv2", nn.Conv2d(32, 64, kernel_size=3, bias=False)),
("relu2", nn.ReLU(inplace=True)),
("bn2", nn.BatchNorm2d(64)),
("maxpool2", nn.MaxPool2d(kernel_size=2, stride=2)),
("conv3", nn.Conv2d(64, 128, kernel_size=3, bias=False)),
("relu3", nn.ReLU(inplace=True)),
("bn3", nn.BatchNorm2d(128))
]))
self.head = nn.Sequential(OrderedDict([
("avgpool", nn.AdaptiveAvgPool2d(1)),
("flatten", nn.Flatten()),
("fc1", nn.Linear(128, 128)),
("relu1", nn.ReLU(inplace=True)),
("fc2", nn.Linear(128, num_classes))
]))
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Performs forward operation on a given tensor."""
x = self.cnn(x)
x = self.head(x)
return x
@staticmethod
def add_to_argparse(
parser: argparse.ArgumentParser,
main_parser: argparse.ArgumentParser # pylint: disable=unused-argument
) -> argparse.ArgumentParser:
"""Adds possible agrs to the given parser."""
return parser
| 30.397059 | 79 | 0.56507 |
f7505d1b79d2a9cd8601e32e758458b01b198dd8 | 5,863 | py | Python | net_performance_comparison.py | SandhyaaGopchandani/PythonNetworkLibsComparion | 72db0cabecd0a9764663a044b19ef4dde843c402 | [
"MIT"
] | 1 | 2019-05-27T07:44:23.000Z | 2019-05-27T07:44:23.000Z | net_performance_comparison.py | SandhyaaGopchandani/PythonNetworkLibsComparion | 72db0cabecd0a9764663a044b19ef4dde843c402 | [
"MIT"
] | null | null | null | net_performance_comparison.py | SandhyaaGopchandani/PythonNetworkLibsComparion | 72db0cabecd0a9764663a044b19ef4dde843c402 | [
"MIT"
] | null | null | null | import itertools
import numpy as np
from timeit import default_timer as timer
from graph_tool.all import *
import pickle
import networkx as nx
import matplotlib as mpl
#mpl.use('TkAgg')
import matplotlib.pyplot as plt
from igraph import *
def nodes_edges(num_nodes):
""" this function takes number of nodes and returns nodes and edge list"""
nodes = list(range(num_nodes))
edges = list(itertools.combinations(nodes, 2))
return nodes, edges
def create_graph_graphtool(node_num, edges):
""" this function creates graph object of graphtool library"""
g = Graph(directed=False)
vlist = g.add_vertex(node_num)
g.add_edge_list(edges)
return g
def create_graph_igraph(nodes, edges):
""" this function creates graph object of igraph library"""
g = Graph(directed=False)
g.add_vertices(nodes)
g.add_edges(edges)
return g
def create_graph_networkx(nodes, edges):
""" this function creates graph object of networkx library"""
g = nx.Graph(directed=False)
g.add_nodes_from(nodes)
g.add_edges_from(edges)
return g
def get_edges(complete_edge_list, threshold=0.5):
""" this function randomnly picks the edges in graph based on probability. 0.5 means we want to include only 50% of random
edges of the total edges in the graph"""
edge_list = []
for key in complete_edge_list:
if np.random.random() < threshold:
edge_list.append(key)
return edge_list
def multiple_graph(complete_edge_list, nodes, probs, netlib='networkx'):
"""this function times the various centrality measures calculated using three different network libararies.
The function computes various graph based on given probability of edges, computes the degree, closeness and betweenness
centrality measure and time those. At the end, it returns the list of timestamp for each cenrality. """
print("total possible edges:", len(complete_edge_list))
time_deg_central = []
time_closeness_central = []
time_between_central = []
num_edges = []
for prob in probs:
edges = get_edges(complete_edge_list, prob)
if netlib == 'graph-tool':
num_nodes = len(nodes)
graph = create_graph_graphtool(num_nodes, edges)
print(prob, len(graph.get_vertices()), len(graph.get_edges()))
num_edges.append(len(graph.get_edges()))
start = timer()
doc_degree_centralities = graph.get_out_degrees(nodes)
end = timer()
time_deg_central.append(end - start)
start = timer()
vertex_betweenness, edge_betweenness = graph_tool.centrality.betweenness(graph)
end = timer()
time_between_central.append(end - start)
start = timer()
vertex_closeness = graph_tool.centrality.closeness(graph)
end = timer()
time_closeness_central.append(end - start)
if netlib == 'networkx':
graph = create_graph_networkx(nodes, edges)
print(prob, len(graph.nodes()), len(graph.edges()))
num_edges.append(len(graph.edges()))
start = timer()
doc_degree_centralities = nx.algorithms.centrality.degree_centrality(graph)
end = timer()
time_deg_central.append(end - start)
start = timer()
vertex_betweenness = nx.algorithms.centrality.betweenness_centrality(graph)
end = timer()
time_between_central.append(end - start)
start = timer()
vertex_closeness = nx.algorithms.centrality.closeness_centrality(graph)
end = timer()
time_closeness_central.append(end - start)
if netlib == 'igraph':
graph = create_graph_igraph(nodes, edges)
print(prob, graph.vcount(), graph.ecount())
num_edges.append(graph.ecount())
start = timer()
doc_degree_centralities = np.array(graph.degree(nodes), dtype='f') / (graph.vcount() - 1)
end = timer()
time_deg_central.append(end - start)
start = timer()
normalization_factor = 2 / (float(graph.vcount() - 1) * float(graph.vcount() - 2))
vertex_betweenness = np.array(graph.betweenness(), dtype='f') * normalization_factor
end = timer()
time_between_central.append(end - start)
start = timer()
vertex_closeness = graph.closeness()
end = timer()
time_closeness_central.append(end - start)
return num_edges, time_deg_central, time_closeness_central, time_between_central
def plot_result(num_nodes, x, y1, y2, y3):
"""This function plots the timestamp for three centralities as a function of number of edges."""
plt.plot(x, y1)
plt.plot(x, y2)
plt.plot(x, y3)
plt.legend(['degree centrality', 'closeness centrality','betweenness centrality'], loc='upper left')
plt.xticks(x)
plt.title('with network of nodes '+str(num_nodes))
plt.xticks(rotation=90)
plt.xlabel('number of edges')
plt.ylabel('time (in seconds)')
plt.show()
if __name__ == '__main__':
num_nodes = 500 # number of nodes
nodes, complete_edge_list = nodes_edges(num_nodes)
threshold = [0.05, 0.2, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
num_edges, time_deg_central, time_closeness_central, time_between_central = multiple_graph(complete_edge_list,
nodes, threshold,
netlib='igraph')
print(num_edges, time_deg_central, time_closeness_central, time_between_central)
plot_result(num_nodes, num_edges, time_deg_central, time_closeness_central, time_between_central) | 38.827815 | 127 | 0.640287 |
f7507aab80565d9a085aad59b3fafd181bc5706f | 207 | py | Python | attic/specialization.py | IMS-workshop/cython-numba | fc9560752f15908d616666df78b1beb970a00a55 | [
"BSD-3-Clause"
] | null | null | null | attic/specialization.py | IMS-workshop/cython-numba | fc9560752f15908d616666df78b1beb970a00a55 | [
"BSD-3-Clause"
] | null | null | null | attic/specialization.py | IMS-workshop/cython-numba | fc9560752f15908d616666df78b1beb970a00a55 | [
"BSD-3-Clause"
] | null | null | null | from numba import jit, int32
@jit(int32(int32, int32))
def f(x, y):
# A somewhat trivial example
return x + y
print(f)
# print(f(123, 123**30))
@jit(nopython=True)
def f(x, y):
return x + y
| 12.9375 | 32 | 0.613527 |
f750c7c66b6901abf81b3820cf58e07e28870047 | 5,197 | py | Python | oscar/lib/python2.7/site-packages/setuptools/__init__.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/setuptools/__init__.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/setuptools/__init__.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | """Extensions to the 'distutils' for large or complex distributions"""
import os
import functools
import distutils.core
import distutils.filelist
from distutils.util import convert_path
from fnmatch import fnmatchcase
from setuptools.extern.six.moves import filter, map
import setuptools.version
from setuptools.extension import Extension
from setuptools.dist import Distribution, Feature
from setuptools.depends import Require
from . import monkey
__all__ = [
'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',
'find_packages',
]
__version__ = setuptools.version.__version__
bootstrap_install_from = None
# If we run 2to3 on .py files, should we also convert docstrings?
# Default: yes; assume that we can detect doctests reliably
run_2to3_on_doctests = True
# Standard package names for fixer packages
lib2to3_fixer_packages = ['lib2to3.fixes']
class PackageFinder(object):
"""
Generate a list of all Python packages found within a directory
"""
@classmethod
def find(cls, where='.', exclude=(), include=('*',)):
"""Return a list all Python packages found within directory 'where'
'where' is the root directory which will be searched for packages. It
should be supplied as a "cross-platform" (i.e. URL-style) path; it will
be converted to the appropriate local path syntax.
'exclude' is a sequence of package names to exclude; '*' can be used
as a wildcard in the names, such that 'foo.*' will exclude all
subpackages of 'foo' (but not 'foo' itself).
'include' is a sequence of package names to include. If it's
specified, only the named packages will be included. If it's not
specified, all found packages will be included. 'include' can contain
shell style wildcard patterns just like 'exclude'.
"""
return list(cls._find_packages_iter(
convert_path(where),
cls._build_filter('ez_setup', '*__pycache__', *exclude),
cls._build_filter(*include)))
@classmethod
def _find_packages_iter(cls, where, exclude, include):
"""
All the packages found in 'where' that pass the 'include' filter, but
not the 'exclude' filter.
"""
for root, dirs, files in os.walk(where, followlinks=True):
# Copy dirs to iterate over it, then empty dirs.
all_dirs = dirs[:]
dirs[:] = []
for dir in all_dirs:
full_path = os.path.join(root, dir)
rel_path = os.path.relpath(full_path, where)
package = rel_path.replace(os.path.sep, '.')
# Skip directory trees that are not valid packages
if ('.' in dir or not cls._looks_like_package(full_path)):
continue
# Should this package be included?
if include(package) and not exclude(package):
yield package
# Keep searching subdirectories, as there may be more packages
# down there, even if the parent was excluded.
dirs.append(dir)
@staticmethod
def _looks_like_package(path):
"""Does a directory look like a package?"""
return os.path.isfile(os.path.join(path, '__init__.py'))
@staticmethod
def _build_filter(*patterns):
"""
Given a list of patterns, return a callable that will be true only if
the input matches at least one of the patterns.
"""
return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
class PEP420PackageFinder(PackageFinder):
@staticmethod
def _looks_like_package(path):
return True
find_packages = PackageFinder.find
setup = distutils.core.setup
_Command = monkey.get_unpatched(distutils.core.Command)
class Command(_Command):
__doc__ = _Command.__doc__
command_consumes_arguments = False
def __init__(self, dist, **kw):
"""
Construct the command for dist, updating
vars(self) with any keyword parameters.
"""
_Command.__init__(self, dist)
vars(self).update(kw)
def reinitialize_command(self, command, reinit_subcommands=0, **kw):
cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
vars(cmd).update(kw)
return cmd
def _find_all_simple(path):
"""
Find all files under 'path'
"""
results = (
os.path.join(base, file)
for base, dirs, files in os.walk(path, followlinks=True)
for file in files
)
return filter(os.path.isfile, results)
def findall(dir=os.curdir):
"""
Find all files under 'dir' and return the list of full filenames.
Unless dir is '.', return full filenames with dir prepended.
"""
files = _find_all_simple(dir)
if dir == os.curdir:
make_rel = functools.partial(os.path.relpath, start=dir)
files = map(make_rel, files)
return list(files)
monkey.patch_all()
| 32.279503 | 80 | 0.630941 |
f750d0003606d9d7cd8b3218994de8998bf613fd | 3,108 | py | Python | xml_py/routedem.py | mlacayoemery/wps-server-invest | c3d5eca357218dac76225028fab491b357cbc343 | [
"MIT"
] | null | null | null | xml_py/routedem.py | mlacayoemery/wps-server-invest | c3d5eca357218dac76225028fab491b357cbc343 | [
"MIT"
] | null | null | null | xml_py/routedem.py | mlacayoemery/wps-server-invest | c3d5eca357218dac76225028fab491b357cbc343 | [
"MIT"
] | null | null | null | import pywps
import pywps.validator.mode
import natcap.invest.routing.routedem
import tempfile
import os.path
import logging
import sys
class invest(pywps.Process):
def __init__(self):
inputs = [pywps.LiteralInput("calculate_downstream_distance",
"Calculate Downstream Distance",
data_type="boolean"),
pywps.LiteralInput("calculate_flow_accumulation",
"Calculate Flow Accumulation",
data_type="boolean"),
pywps.LiteralInput("calculate_slope",
"Calculate Slope",
data_type="boolean"),
pywps.LiteralInput("calculate_stream_threshold",
"Calculate Stream Threshold",
data_type="boolean"),
pywps.LiteralInput("threshold_flow_accumulation",
"Threshold Flow Accumulation",
data_type="integer"),
pywps.ComplexInput('dem',
'DEM',
supported_formats=[pywps.Format('image/tiff')],
mode=pywps.validator.mode.MODE.STRICT)]
outputs = [pywps.ComplexOutput('route',
'Route',
supported_formats=[pywps.Format('image/tiff')])]
super(invest, self).__init__(
self._handler,
identifier='routedem', #'natcap.invest.routing.routedem',
title='RouteDEM',
abstract='InVEST implementation of Tarboton (1997) d-infinity flow direction algorithm.',
version='None',
inputs=inputs,
outputs=outputs,
store_supported=True,
status_supported=True
)
def _handler(self, request, response):
logger = logging.getLogger("natcap.invest.routing.routedem")
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
args = {
u'calculate_downstream_distance': request.inputs["calculate_downstream_distance"][0].data,
u'calculate_flow_accumulation': request.inputs["calculate_flow_accumulation"][0].data,
u'calculate_slope': request.inputs["calculate_slope"][0].data,
u'calculate_stream_threshold': request.inputs["calculate_stream_threshold"][0].data,
u'dem_path': request.inputs['dem'][0].file,
u'results_suffix': u'',
u'threshold_flow_accumulation': request.inputs["threshold_flow_accumulation"][0].data,
u'workspace_dir': tempfile.mkdtemp(),
}
natcap.invest.routing.routedem.execute(args)
response.outputs['route'].file = os.path.join(args[u'workspace_dir'],
"flow_direction.tif")
return response
| 43.774648 | 102 | 0.535071 |
f750d25a6569ab0b4f3ca88347c14468fa6858b4 | 7,567 | py | Python | cm/app/api_v1/my_calculation_module_directory/CM/__delete_if_tested__/CEDM/create_csv_results.py | HotMaps/renovation_effect | 469a01b4cc805256768d884a3a2ae4560770b734 | [
"Apache-2.0"
] | 1 | 2021-05-11T06:41:04.000Z | 2021-05-11T06:41:04.000Z | cm/app/api_v1/my_calculation_module_directory/CM/__delete_if_tested__/CEDM/create_csv_results.py | mayr-ethink/renovation_effect | 5b1fb81102b3c6ee531b719d8136ed9a343c2598 | [
"Apache-2.0"
] | 2 | 2020-06-04T20:19:36.000Z | 2020-06-04T20:19:36.000Z | cm/app/api_v1/my_calculation_module_directory/CM/__delete_if_tested__/CEDM/create_csv_results.py | mayr-ethink/renovation_effect | 5b1fb81102b3c6ee531b719d8136ed9a343c2598 | [
"Apache-2.0"
] | null | null | null |
import numpy as np
import os
import time
import sys
path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.
abspath(__file__))))
if path not in sys.path:
sys.path.append(path)
import CM_intern.CEDM.modules.cyf.create_density_map as CDM
import CM_intern.CEDM.modules.Subfunctions as SF
from CM_intern.common_modules.exportLayerDict import export_layer as expLyr
import CM_intern.common_modules.cliprasterlayer as CRL
import pickle
TARGET_RESOLUTION = 100
def load_reference_raster_lyr(NUTS3_vector_path, strd_raster_path_full, outputpath, NUTS3_feat_id_LIST
, MOST_RECENT_CUT=""):
datatype_int = 'uint32'
#self.datatype_int16 = 'uint16'
datatype = "float32"
# common parameters
noDataValue = 0
#SaveLayerDict = {}
# Get current extent -> Use the Population 1x1km raster as reference Layer
key_field = "NUTS_ID"
REFERENCE_RASTER_LAYER_COORD, Layer_is_uncut = CRL.create_reference_raster_layer_origin_extent_of_vctr_feat(strd_raster_path_full
, NUTS3_vector_path, NUTS3_feat_id_LIST
, Vctr_key_field=key_field)
(REFERENCE_geotransform_obj, REFERENCE_RasterSize
, REFERENCE_RESOLUTION, REFERENCE_extent) = REFERENCE_RASTER_LAYER_COORD
REFERENCE_RasterResolution = REFERENCE_geotransform_obj[1]
gto_hr = list(REFERENCE_geotransform_obj)
gto_hr[1] = TARGET_RESOLUTION
gto_hr[5] = -TARGET_RESOLUTION
HighRes_gt_obj = tuple(gto_hr)
SaveLayerDict = {}
SaveLayerDict["Reference"] = ["%s/REFERENCE.tif" % outputpath, REFERENCE_geotransform_obj
, datatype_int
, np.ones((REFERENCE_RasterSize), dtype=datatype_int) , noDataValue]
# If data are the same as previous cut, then loading data can be done
LOAD_DATA_PREVIOUS = False
filename = MOST_RECENT_CUT
if os.path.exists(MOST_RECENT_CUT):
try:
with open(MOST_RECENT_CUT, 'rb') as fobject:
PREV_CUT = pickle.load(fobject)
fobject.close()
if PREV_CUT == REFERENCE_RASTER_LAYER_COORD:
LOAD_DATA_PREVIOUS = True
except Exception as e:
print("Cannot import %s"%MOST_RECENT_CUT)
print(e)
if LOAD_DATA_PREVIOUS != True:
with open(filename, 'wb') as fobject:
pickle.dump(REFERENCE_RASTER_LAYER_COORD, fobject, protocol=2)
fobject.close()
SaveLayerDict = expLyr(SaveLayerDict)
return (REFERENCE_RasterResolution, HighRes_gt_obj, LOAD_DATA_PREVIOUS, Layer_is_uncut, REFERENCE_geotransform_obj, REFERENCE_RasterSize)
def main(main_path, path_in_raw, preproccessed_input_path, prj_path_output):
st = time.time()
data_type = "uint8"
MOST_RECENT_CUT = main_path + prj_path_output + "/MOST_RECENT_CUT.pk"
prepro_path = main_path + preproccessed_input_path
org_data_path = main_path + path_in_raw
p_ = org_data_path
pi_ = org_data_path + "/vector_input_data/"
NUTS3_vector_path = pi_ + "/NUTS3.shp"
strd_raster_path_full = "%s/%s" %(org_data_path, "Population.tif")
temp_path = "/home/simulant/workspace/project/Hotmaps_DATA/heat_density_map/output_2/" + os.sep + "Temp"
SoilSeal_path_full = "%s/%s" %(org_data_path, "_____ESM100m_final.tif")
#p_ = "/home/simulant/workspace/project/Hotmaps_DATA/heat_density_map/output/"
sd = ""
print(os.path.exists(p_))
print(os.path.exists(pi_))
fn = []
NUTS3_feat_id_LIST = range(12000)
(REFERENCE_RasterResolution, HighRes_gt_obj, LOAD_DATA_PREVIOUS
, Ref_layer_is_uncut, REFERENCE_geotransform_obj, REFERENCE_RasterSize) = \
load_reference_raster_lyr(NUTS3_vector_path,
strd_raster_path_full,
temp_path, NUTS3_feat_id_LIST
, MOST_RECENT_CUT)
for f_ in os.listdir("%s/%s" %(p_, sd)):
if f_.endswith(".tif"):
fn.append("%s/%s/%s" %(p_, sd, f_))
print(f_)
if "g100_clc12_v18_5" in f_.lower():
data, geotransform_obj = CRL.clip_raster_layer(fn[-1]
, REFERENCE_geotransform_obj
, REFERENCE_RasterSize)
data2 = np.zeros((data.shape),dtype="f4")
data3 = np.zeros_like(data2)
data4 = np.ones_like(data2) * 10.0 # 1000 m2
data2[data <= 21] = 10.0
data3[data <= 6] = 10.0
data3[data == 9] = 10.0
data3[data == 10] = 10.0
data3[data == 11] = 10.0
data3[data == 20] = 10.0
print(np.sum(data2))
print(np.sum(data3))
print(np.sum(data4))
elif "ESM100m_final" in f_:
data5, geotransform_obj = CRL.clip_raster_layer(fn[-1]
, REFERENCE_geotransform_obj
, REFERENCE_RasterSize)
data5 *= 10.0/100.0 # in 1000 m2, data5 Einheit = %
print(np.sum(data5))
print(time.time() - st)
ARR_NUTS_ID_NUMBER, geotransform_obj = SF.rrl("%s/%s_id_number.tif" %(prepro_path, "NUTS3"), data_type="uint16")
print(time.time() - st)
ARR_LAU2_ID_NUMBER, geotransform_obj = SF.rrl("%s/%s_id_number.tif" %(prepro_path, "LAU2"), data_type="uint32")
print(time.time() - st)
#num_fn = len(fn)
num_fn = 4
RES_Table_NUTS = np.zeros((np.max(ARR_NUTS_ID_NUMBER)+1, num_fn+1), "f4")
RES_Table_LAU = np.zeros((np.max(ARR_LAU2_ID_NUMBER)+1, num_fn+1), "f4")
RES_Table_NUTS[:,0] = np.arange(RES_Table_NUTS.shape[0])
RES_Table_LAU[:,0] = np.arange(RES_Table_LAU.shape[0])
header = ["DI"]
#for i, f_ in enumerate(fn):
for i in range(num_fn):
#print(f_)
if i == 0:
data = data2.copy()
fn = "dauersiedlungsraum"
elif i == 1:
data = data3.copy()
fn = "dauersiedlungsraum_eng"
elif i == 2:
data = data4.copy()
fn = "flaeche"
else:
data = data5.copy()
fn = "ESM100m_final"
print(fn)
header.append(fn)
print(np.sum(data))
#header.append(f_.split("/")[-1])
#data, geotransform_obj = SF.rrl(f_, data_type=data_type)
TABLE_RESULTS_NUTS = CDM.CreateResultsTableperIndicator(data, ARR_NUTS_ID_NUMBER)
print(time.time() - st)
TABLE_RESULTS_LAU = CDM.CreateResultsTableperIndicator(data, ARR_LAU2_ID_NUMBER)
del data
print(time.time() - st)
RES_Table_NUTS[:, i+1] = TABLE_RESULTS_NUTS[:,-1]
RES_Table_LAU[:, i+1] = TABLE_RESULTS_LAU[:,-1]
#break
header = ",".join(header)
np.savetxt("%s/%s.csv" %(prepro_path, "__TABLE_RES_LAU2"), np.round(RES_Table_LAU, 3), delimiter=",", header=header, comments="")
np.savetxt("%s/%s.csv" %(prepro_path, "__TABLE_RES_NUTS"), np.round(RES_Table_NUTS, 3), delimiter=",", header=header, comments="")
print("DONE") | 38.805128 | 141 | 0.584776 |
f750d6ad1e45999c9e0206221e729359e5243f54 | 1,434 | py | Python | metrics.py | bhigy/discrete-repr | 3d4a4fc3833df3a1fa287c78c7402ce6df09abd4 | [
"Apache-2.0"
] | 1 | 2021-09-24T03:44:13.000Z | 2021-09-24T03:44:13.000Z | metrics.py | bhigy/discrete-repr | 3d4a4fc3833df3a1fa287c78c7402ce6df09abd4 | [
"Apache-2.0"
] | null | null | null | metrics.py | bhigy/discrete-repr | 3d4a4fc3833df3a1fa287c78c7402ce6df09abd4 | [
"Apache-2.0"
] | null | null | null | from collections import Counter
from itertools import groupby
from math import log2
import numpy as np
def segments_start(array):
return [i for i in range(len(array)) if i == 0 or array[i] != array[i-1]]
def split_sequences(array, start):
end = start[1:] + [len(array)]
return [array[s:e] for s, e in zip(start, end)]
def coverage_top_1(labels, codes):
'''
Computes the coverage of label segments by the most frequent co-occuring
code.
'''
start = segments_start(labels)
segments = split_sequences(codes, start)
return [sorted(Counter(s).values())[-1] / len(s) for s in segments]
def compute_joint_probability(x, y):
labels_x = np.unique(x)
idx_x = {v: i for i, v in enumerate(labels_x)}
labels_y = np.unique(y)
idx_y = {v: i for i, v in enumerate(labels_y)}
counts_xy = np.zeros([len(labels_x), len(labels_y)])
for xi, yi in zip(x, y):
counts_xy[idx_x[xi], idx_y[yi]] += 1
return labels_x, labels_y, counts_xy / len(x)
def conditional_entropy(x, y):
labels_x, labels_y, p_xy = compute_joint_probability(x, y)
p_y = np.sum(p_xy, axis=0)
h_x_y = 0
for i_x in range(len(labels_x)):
for i_y in range(len(labels_y)):
if p_xy[i_x, i_y] > 0:
h_x_y -= p_xy[i_x, i_y] * log2(p_xy[i_x, i_y] / p_y[i_y])
return h_x_y
def count_repetitions(array):
return [len(list(v)) for _, v in groupby(array)]
| 28.68 | 77 | 0.642957 |
f750d86c9cddcc067645619ba4b167eff700690a | 1,490 | py | Python | oscar/lib/python2.7/site-packages/django/conf/locale/el/formats.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/django/conf/locale/el/formats.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/django/conf/locale/el/formats.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd/m/Y'
TIME_FORMAT = 'P'
DATETIME_FORMAT = 'd/m/Y P'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd/m/Y'
SHORT_DATETIME_FORMAT = 'd/m/Y P'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', '%d/%m/%y', '%Y-%m-%d', # '25/10/2006', '25/10/06', '2006-10-25',
]
DATETIME_INPUT_FORMATS = [
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| 38.205128 | 83 | 0.547651 |
f750e30c4991c96cda7995ac565e9bcfe9a00927 | 6,390 | py | Python | JumpScale9AYS/jobcontroller/Run.py | Jumpscale/ays9 | 63bd414ff06372ba885c55eec528f427e63bcbe1 | [
"Apache-2.0"
] | 4 | 2017-06-07T08:10:06.000Z | 2017-11-10T02:20:38.000Z | JumpScale9AYS/jobcontroller/Run.py | Jumpscale/ays9 | 63bd414ff06372ba885c55eec528f427e63bcbe1 | [
"Apache-2.0"
] | 242 | 2017-05-18T10:51:48.000Z | 2019-09-18T15:09:47.000Z | JumpScale9AYS/jobcontroller/Run.py | Jumpscale/ays9 | 63bd414ff06372ba885c55eec528f427e63bcbe1 | [
"Apache-2.0"
] | 5 | 2017-06-16T15:43:25.000Z | 2017-09-29T12:48:06.000Z | import colored_traceback
from .RunStep import RunStep
from js9 import j
import json
import aiohttp
colored_traceback.add_hook(always=True)
RETRY_DELAY = [10, 30, 60, 300, 600, 1800] # time of each retry in seconds, total: 46min 10sec
class Run:
def __init__(self, model):
self.lastnr = 0
self.logger = j.atyourservice.server.logger
self.model = model
@property
def steps(self):
steps = []
for dbobj in self.model.dbobj.steps:
step = RunStep(self, dbobj.number, dbobj=dbobj)
steps.append(step)
return steps
@property
def aysrepo(self):
return j.atyourservice.server.aysRepos.get(path=self.model.dbobj.repo)
@property
def state(self):
return self.model.dbobj.state
@state.setter
def state(self, state):
self.model.dbobj.state = state
@property
def key(self):
return self.model.key
@property
def timestamp(self):
return self.model.epoch
def delete(self):
self.model.delete()
def newStep(self):
self.lastnr += 1
dbobj = self.model.stepNew()
step = RunStep(self, self.lastnr, dbobj=dbobj)
return step
@property
def services(self):
res = []
for step in self.steps:
res.extend(step.services)
return res
def hasServiceForAction(self, service, action):
for step in self.steps:
for job in step.jobs:
if job.model.dbobj.actionName != action:
continue
if job.service == service:
return True
return False
def get_retry_level(self):
"""
find lowest error level
"""
levels = set()
for step in self.steps:
for job in step.jobs:
service_action_obj = job.service.model.actions[job.model.dbobj.actionName]
if service_action_obj.errorNr > 0:
levels.add(service_action_obj.errorNr)
if levels:
return min(levels)
def get_retry_info(self):
runInfo = {}
retry = self.get_retry_level()
if retry and self.retries[0] != 0 and retry <= len(self.retries):
# capnp list to python list
remaining_retries = [x for x in self.retries]
runInfo = {
'retry-number': retry,
'duration': self.retries[retry - 1],
'remaining-retries': remaining_retries[retry:]
}
return runInfo
@property
def error(self):
out = "%s\n" % self
out += "***ERROR***\n\n"
for step in self.steps:
if step.state == "ERROR":
for key, action in step.actions.items():
if action.state == "ERROR":
out += "STEP:%s, ACTION:%s" % (step.nr, step.action)
out += self.db.get_dedupe("source",
action.model["source"]).decode()
out += str(action.result or '')
return out
@property
def callbackUrl(self):
return self.model.dbobj.callbackUrl
@callbackUrl.setter
def callbackUrl(self, callbackUrl):
self.model.dbobj.callbackUrl = callbackUrl
@property
def retries(self):
if not self.model.dbobj.retries:
# if dev mode will only use the first value of default config with default number of retries
if j.atyourservice.server.dev_mode:
self.model.dbobj.retries = [RETRY_DELAY[0]] * len(RETRY_DELAY)
else:
self.model.dbobj.retries = RETRY_DELAY
return self.model.dbobj.retries
@retries.setter
def retries(self, retries):
self.model.dbobj.retries = retries
def reverse(self):
ordered = []
for i, _ in enumerate(self.model.dbobj.steps):
orphan = self.model.dbobj.steps.disown(i)
ordered.append(orphan)
for i, step in enumerate(reversed(ordered)):
self.model.dbobj.steps.adopt(i, step)
self.model.dbobj.steps[i].number = i + 1
self.model.save()
def save(self):
self.model.save()
async def execute(self):
"""
Execute executes all the steps contained in this run
if a step finishes with an error state.
print the error of all jobs in the step that has error states then raise any
exeception to stop execution
"""
self.state = 'running'
self.save()
try:
for step in self.steps:
await step.execute()
if step.state == 'error':
self.logger.error("error during execution of step {} in run {}".format(step.dbobj.number, self.key))
self.state = 'error'
err_msg = ''
for job in step.jobs:
if job.model.state == 'error':
if len(job.model.dbobj.logs) > 0:
log = job.model.dbobj.logs[-1]
print(job.str_error(log.log))
err_msg = log.log
raise j.exceptions.RuntimeError(err_msg)
self.state = 'ok'
except:
self.state = 'error'
raise
finally:
self.save()
if self.callbackUrl:
runInfo = self.get_retry_info()
data = {'runid': self.key, 'runState': self.state.__str__(), 'retries': runInfo}
async with aiohttp.ClientSession() as session:
await session.post(self.callbackUrl, headers={'Content-type': 'application/json'}, data=json.dumps(data))
def __repr__(self):
out = "RUN:%s\n" % (self.key)
out += "-------\n"
for step in self.steps:
out += "## step:%s\n\n" % step.dbobj.number
out += "%s\n" % step
return out
__str__ = __repr__
def __lt__(self, other):
return self.model.dbobj.lastModDate < other.model.dbobj.lastModDate
def __gt__(self, other):
return self.model.dbobj.lastModDate > other.model.dbobj.lastModDate
def __eq__(self, other):
return self.model.key == other.model.key
| 31.323529 | 125 | 0.54507 |
f751009df47057bacc6d0fbf6d1ab04c87998a96 | 3,617 | py | Python | sc2/paths.py | Matuiss2/python-sc2 | dd93215d8b09b7ddacfd5c3cc4e9f43641d3f953 | [
"MIT"
] | 3 | 2019-01-20T19:37:25.000Z | 2019-01-29T10:21:48.000Z | sc2/paths.py | Matuiss2/python-sc2 | dd93215d8b09b7ddacfd5c3cc4e9f43641d3f953 | [
"MIT"
] | 1 | 2019-02-13T06:42:26.000Z | 2019-03-11T14:04:43.000Z | sc2/paths.py | Matuiss2/python-sc2 | dd93215d8b09b7ddacfd5c3cc4e9f43641d3f953 | [
"MIT"
] | 1 | 2019-02-13T05:44:16.000Z | 2019-02-13T05:44:16.000Z | import logging
import os
import platform
import re
import subprocess
from pathlib import Path
logger = logging.getLogger(__name__)
BASEDIR = {
"Windows": "C:/Program Files (x86)/StarCraft II",
"Darwin": "/Applications/StarCraft II",
"Linux": "~/StarCraftII",
"WineLinux": "~/.wine/drive_c/Program Files (x86)/StarCraft II",
}
USERPATH = {
"Windows": "\\Documents\\StarCraft II\\ExecuteInfo.txt",
"Darwin": "/Library/Application Support/Blizzard/StarCraft II/ExecuteInfo.txt",
"Linux": None,
"WineLinux": None,
}
BINPATH = {
"Windows": "SC2_x64.exe",
"Darwin": "SC2.app/Contents/MacOS/SC2",
"Linux": "SC2_x64",
"WineLinux": "SC2_x64.exe",
}
CWD = {"Windows": "Support64", "Darwin": None, "Linux": None, "WineLinux": "Support64"}
PF = os.environ.get("SC2PF", platform.system())
def get_env():
# TODO: Linux env conf from: https://github.com/deepmind/pysc2/blob/master/pysc2/run_configs/platforms.py
return None
def get_runner_args(cwd):
if "WINE" in os.environ:
runner_dir = os.path.dirname(os.environ.get("WINE"))
# translate cwd from Unix to Windows path
win_cwd = subprocess.run(
[os.path.join(runner_dir, "winepath"), "-w", cwd],
capture_output=True,
text=True
).stdout.rstrip()
return [
os.environ.get("WINE"),
"start",
"/d",
win_cwd,
"/unix"
]
return []
def latest_executeble(versions_dir, base_build=None):
if base_build is None:
latest = max((int(p.name[4:]), p) for p in versions_dir.iterdir() if p.is_dir() and p.name.startswith("Base"))
else:
latest = (int(base_build[4:]), max(p for p in versions_dir.iterdir() if p.is_dir() and
p.name.startswith(str(base_build))))
version, path = latest
if version < 55958:
logger.critical(f"Your SC2 binary is too old. Upgrade to 3.16.1 or newer.")
exit(1)
return path / BINPATH[PF]
class _MetaPaths(type):
""""Lazily loads paths to allow importing the library even if SC2 isn't installed."""
def __setup(self):
if PF not in BASEDIR:
logger.critical(f"Unsupported platform '{PF}'")
exit(1)
try:
base = os.environ.get("SC2PATH")
if base is None and USERPATH[PF] is not None:
einfo = str(Path.home().expanduser()) + USERPATH[PF]
if os.path.isfile(einfo):
with open(einfo) as f:
content = f.read()
if content:
base = re.search(r" = (.*)Versions", content).group(1)
if not os.path.exists(base):
base = None
if base is None:
base = BASEDIR[PF]
self.BASE = Path(base).expanduser()
self.EXECUTABLE = latest_executeble(self.BASE / "Versions")
self.CWD = self.BASE / CWD[PF] if CWD[PF] else None
self.REPLAYS = self.BASE / "Replays"
if (self.BASE / "maps").exists():
self.MAPS = self.BASE / "maps"
else:
self.MAPS = self.BASE / "Maps"
except FileNotFoundError as e:
logger.critical(f"SC2 installation not found: File '{e.filename}' does not exist.")
exit(1)
def __getattr__(self, attr):
self.__setup()
return getattr(self, attr)
class Paths(metaclass=_MetaPaths):
"""Paths for SC2 folders, lazily loaded using the above metaclass."""
| 31.72807 | 118 | 0.570086 |
f7510917c531028cdc2a3b2f96098d6833ccdc4d | 2,269 | py | Python | tests/apis/__init__.py | a-wakeel/Device-Registration-Subsystem | dd9fa387e2087a6ccea9676303debe640bd99422 | [
"Unlicense"
] | 6 | 2018-11-07T12:41:30.000Z | 2020-04-12T18:07:03.000Z | tests/apis/__init__.py | a-wakeel/Device-Registration-Subsystem | dd9fa387e2087a6ccea9676303debe640bd99422 | [
"Unlicense"
] | 1 | 2020-10-20T12:33:18.000Z | 2020-10-20T12:33:18.000Z | tests/apis/__init__.py | a-wakeel/Device-Registration-Subsystem | dd9fa387e2087a6ccea9676303debe640bd99422 | [
"Unlicense"
] | 10 | 2018-11-12T06:15:19.000Z | 2021-11-18T05:45:12.000Z | """
DRS Package for API unit tests
Copyright (c) 2018-2020 Qualcomm Technologies, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment is required by displaying the trademark/log as per the details provided here: https://www.qualcomm.com/documents/dirbs-logo-and-brand-guidelines
Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
This notice may not be removed or altered from any source distribution.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from tests.apis import * # pylint: disable=wildcard-import
| 126.055556 | 844 | 0.806523 |
f7510e2e57e70b9bdc54e35893fb2f16f92ff9d7 | 1,917 | py | Python | tests/integration/import.py | doraskayo/buildstream | 1c72d4342ae7df360808de22c5e49f55dbb6bec6 | [
"Apache-2.0"
] | null | null | null | tests/integration/import.py | doraskayo/buildstream | 1c72d4342ae7df360808de22c5e49f55dbb6bec6 | [
"Apache-2.0"
] | null | null | null | tests/integration/import.py | doraskayo/buildstream | 1c72d4342ae7df360808de22c5e49f55dbb6bec6 | [
"Apache-2.0"
] | null | null | null | # Pylint doesn't play well with fixtures and dependency injection from pytest
# pylint: disable=redefined-outer-name
import os
import pytest
from buildstream import _yaml
from buildstream.testing import cli_integration as cli # pylint: disable=unused-import
from buildstream.testing.integration import walk_dir
pytestmark = pytest.mark.integration
DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "project")
def create_import_element(name, path, source, target, source_path):
element = {
"kind": "import",
"sources": [{"kind": "local", "path": source_path}],
"config": {"source": source, "target": target},
}
os.makedirs(os.path.dirname(os.path.join(path, name)), exist_ok=True)
_yaml.roundtrip_dump(element, os.path.join(path, name))
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize(
"source,target,path,expected",
[
("/", "/", "files/import-source", ["/test.txt", "/subdir", "/subdir/test.txt"]),
("/subdir", "/", "files/import-source", ["/test.txt"]),
("/", "/", "files/import-source/subdir", ["/test.txt"]),
(
"/",
"/output",
"files/import-source",
["/output", "/output/test.txt", "/output/subdir", "/output/subdir/test.txt"],
),
],
)
def test_import(cli, datafiles, source, target, path, expected):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout")
element_path = os.path.join(project, "elements")
element_name = "import/import.bst"
create_import_element(element_name, element_path, source, target, path)
res = cli.run(project=project, args=["build", element_name])
assert res.exit_code == 0
cli.run(project=project, args=["artifact", "checkout", element_name, "--directory", checkout])
assert res.exit_code == 0
assert set(walk_dir(checkout)) == set(expected)
| 32.491525 | 98 | 0.651539 |
f7513761a06800074ca2316b36ca1967570f5c68 | 876 | py | Python | Tools/ROS/entity_storage/scripts/test.py | ToyotaResearchInstitute/rad-robot | 9a47e4d88382719ab9bf142932fbcc83dcbcd665 | [
"MIT"
] | null | null | null | Tools/ROS/entity_storage/scripts/test.py | ToyotaResearchInstitute/rad-robot | 9a47e4d88382719ab9bf142932fbcc83dcbcd665 | [
"MIT"
] | null | null | null | Tools/ROS/entity_storage/scripts/test.py | ToyotaResearchInstitute/rad-robot | 9a47e4d88382719ab9bf142932fbcc83dcbcd665 | [
"MIT"
] | 2 | 2018-06-04T12:38:54.000Z | 2018-09-22T10:31:27.000Z | #!/usr/bin/env python
import roslib; roslib.load_manifest('entity_storage')
from entity_storage.srv import *
from entity_storage.msg import *
import rospy
def get_coordinates(name):
rospy.wait_for_service('get_entity_coordinates')
try:
get_coords = rospy.ServiceProxy('get_entity_coordinates', entity_coordinates)
resp1 = get_coords(name)
return resp1.coordinate
except rospy.ServiceException, e:
print "Service call failed: %s" % e
def usage():
return "%s [entity_name]"%sys.argv[0]
if __name__ == "__main__":
if len(sys.argv) == 2:
entity_name = sys.argv[1]
else:
print usage()
sys.exit(1)
print "Requesting coordinates for %s" % entity_name
coordinate = get_coordinates(entity_name)
print "%s: X:%s Y:%s Theta:%s" % (entity_name, coordinate.x, coordinate.y, coordinate.theta) | 31.285714 | 98 | 0.68379 |
f7514c3a3eaf678ce7d778ca6c7f72f18d562af5 | 3,134 | py | Python | chapter12/djangoApplication/djangoApplication/settings.py | yangwawa0323/Learning-Python-Networking-Second-Edition | 5460fe4fb6acc5d0df19bf36e52ac09e9a11eb8b | [
"MIT"
] | 52 | 2018-12-17T19:33:06.000Z | 2022-03-25T18:14:02.000Z | chapter12/djangoApplication/djangoApplication/settings.py | barretthugh/Learning-Python-Networking-Second-Edition | 0f00b8b20c1c85e76754e47113dff8ca9e99d5ca | [
"MIT"
] | null | null | null | chapter12/djangoApplication/djangoApplication/settings.py | barretthugh/Learning-Python-Networking-Second-Edition | 0f00b8b20c1c85e76754e47113dff8ca9e99d5ca | [
"MIT"
] | 38 | 2018-12-18T09:08:43.000Z | 2022-02-06T02:53:05.000Z | """
Django settings for djangoApplication project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')bi66w41j_fkm*(ec+)7cckw1-rw(s5yd#ctexaz4hnhxz!5_5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'djangoApp'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangoApplication.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangoApplication.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| 25.688525 | 91 | 0.700064 |
f751527f4010db2178948021484f40185235157b | 15,959 | py | Python | egg/zoo/basic_games/play.py | schlevik/EGG | 428d5aed3eb6fb0296f6856fb77b0a1cdceb33f1 | [
"MIT"
] | null | null | null | egg/zoo/basic_games/play.py | schlevik/EGG | 428d5aed3eb6fb0296f6856fb77b0a1cdceb33f1 | [
"MIT"
] | null | null | null | egg/zoo/basic_games/play.py | schlevik/EGG | 428d5aed3eb6fb0296f6856fb77b0a1cdceb33f1 | [
"MIT"
] | 1 | 2021-09-14T12:19:41.000Z | 2021-09-14T12:19:41.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
import egg.core as core
from egg.core import Callback, Interaction, PrintValidationEvents
from egg.zoo.basic_games.architectures import DiscriReceiver, RecoReceiver, Sender
from egg.zoo.basic_games.data_readers import AttValDiscriDataset, AttValRecoDataset
# the following section specifies parameters that are specific to our games: we will also inherit the
# standard EGG parameters from https://github.com/facebookresearch/EGG/blob/master/egg/core/util.py
def get_params(params):
parser = argparse.ArgumentParser()
# arguments controlling the game type
parser.add_argument(
"--game_type",
type=str,
default="reco",
help="Selects whether to play a reco(nstruction) or discri(mination) game (default: reco)",
)
# arguments concerning the input data and how they are processed
parser.add_argument(
"--train_data", type=str, default=None, help="Path to the train data"
)
parser.add_argument(
"--validation_data", type=str, default=None, help="Path to the validation data"
)
# (the following is only used in the reco game)
parser.add_argument(
"--n_attributes",
type=int,
default=None,
help="Number of attributes in Sender input (must match data set, and it is only used in reco game)",
)
parser.add_argument(
"--n_values",
type=int,
default=None,
help="Number of values for each attribute (must match data set)",
)
parser.add_argument(
"--validation_batch_size",
type=int,
default=0,
help="Batch size when processing validation data, whereas training data batch_size is controlled by batch_size (default: same as training data batch size)",
)
# arguments concerning the training method
parser.add_argument(
"--mode",
type=str,
default="rf",
help="Selects whether Reinforce or Gumbel-Softmax relaxation is used for training {rf, gs} (default: rf)",
)
parser.add_argument(
"--temperature",
type=float,
default=1.0,
help="GS temperature for the sender, only relevant in Gumbel-Softmax (gs) mode (default: 1.0)",
)
parser.add_argument(
"--sender_entropy_coeff",
type=float,
default=1e-1,
help="Reinforce entropy regularization coefficient for Sender, only relevant in Reinforce (rf) mode (default: 1e-1)",
)
# arguments concerning the agent architectures
parser.add_argument(
"--sender_cell",
type=str,
default="rnn",
help="Type of the cell used for Sender {rnn, gru, lstm} (default: rnn)",
)
parser.add_argument(
"--receiver_cell",
type=str,
default="rnn",
help="Type of the cell used for Receiver {rnn, gru, lstm} (default: rnn)",
)
parser.add_argument(
"--sender_hidden",
type=int,
default=10,
help="Size of the hidden layer of Sender (default: 10)",
)
parser.add_argument(
"--receiver_hidden",
type=int,
default=10,
help="Size of the hidden layer of Receiver (default: 10)",
)
parser.add_argument(
"--sender_embedding",
type=int,
default=10,
help="Output dimensionality of the layer that embeds symbols produced at previous step in Sender (default: 10)",
)
parser.add_argument(
"--receiver_embedding",
type=int,
default=10,
help="Output dimensionality of the layer that embeds the message symbols for Receiver (default: 10)",
)
# arguments controlling the script output
parser.add_argument(
"--print_validation_events",
default=False,
action="store_true",
help="If this flag is passed, at the end of training the script prints the input validation data, the corresponding messages produced by the Sender, and the output probabilities produced by the Receiver (default: do not print)",
)
args = core.init(parser, params)
return args
def main(params):
opts = get_params(params)
if opts.validation_batch_size == 0:
opts.validation_batch_size = opts.batch_size
print(opts, flush=True)
# the following if statement controls aspects specific to the two game tasks: loss, input data and architecture of the Receiver
# (the Sender is identical in both cases, mapping a single input attribute-value vector to a variable-length message)
if opts.game_type == "discri":
# the game object we will encounter below takes as one of its mandatory arguments a loss: a loss in EGG is expected to take as arguments the sender input,
# the message, the Receiver input, the Receiver output and the labels (although some of these elements might not actually be used by a particular loss);
# together with the actual loss computation, the loss function can return a dictionary with other auxiliary statistics: in this case, accuracy
def loss(
_sender_input,
_message,
_receiver_input,
receiver_output,
labels,
_aux_input,
):
# in the discriminative case, accuracy is computed by comparing the index with highest score in Receiver output (a distribution of unnormalized
# probabilities over target poisitions) and the corresponding label read from input, indicating the ground-truth position of the target
acc = (receiver_output.argmax(dim=1) == labels).detach().float()
# similarly, the loss computes cross-entropy between the Receiver-produced target-position probability distribution and the labels
loss = F.cross_entropy(receiver_output, labels, reduction="none")
return loss, {"acc": acc}
# the input data are read into DataLodaer objects, which are pytorch constructs implementing standard data processing functionalities, such as shuffling
# and batching
# within our games, we implement dataset classes, such as AttValDiscriDataset, to read the input text files and convert the information they contain
# into the form required by DataLoader
# look at the definition of the AttValDiscrDataset (the class to read discrimination game data) in data_readers.py for further details
# note that, for the training dataset, we first instantiate the AttValDiscriDataset object and then feed it to DataLoader, whereas for the
# validation data (confusingly called "test" data due to code heritage inertia) we directly declare the AttValDiscriDataset when instantiating
# DataLoader: the reason for this difference is that we need the train_ds object to retrieve the number of features of the input vectors
train_ds = AttValDiscriDataset(path=opts.train_data, n_values=opts.n_values)
train_loader = DataLoader(
train_ds, batch_size=opts.batch_size, shuffle=True, num_workers=1
)
test_loader = DataLoader(
AttValDiscriDataset(path=opts.validation_data, n_values=opts.n_values),
batch_size=opts.validation_batch_size,
shuffle=False,
num_workers=1,
)
# note that the number of features retrieved here concerns inputs after they are converted to 1-hot vectors
n_features = train_ds.get_n_features()
# we define here the core of the Receiver for the discriminative game, see the architectures.py file for details:
# note that this will be embedded in a wrapper below to define the full agent
receiver = DiscriReceiver(n_features=n_features, n_hidden=opts.receiver_hidden)
else: # reco game
def loss(
sender_input, _message, _receiver_input, receiver_output, labels, _aux_input
):
# in the case of the recognition game, for each attribute we compute a different cross-entropy score
# based on comparing the probability distribution produced by the Receiver over the values of each attribute
# with the corresponding ground truth, and then averaging across attributes
# accuracy is instead computed by considering as a hit only cases where, for each attribute, the Receiver
# assigned the largest probability to the correct value
# most of this function consists of the usual pytorch madness needed to reshape tensors in order to perform these computations
n_attributes = opts.n_attributes
n_values = opts.n_values
batch_size = sender_input.size(0)
receiver_output = receiver_output.view(batch_size * n_attributes, n_values)
receiver_guesses = receiver_output.argmax(dim=1)
correct_samples = (
(receiver_guesses == labels.view(-1))
.view(batch_size, n_attributes)
.detach()
)
acc = (torch.sum(correct_samples, dim=-1) == n_attributes).float()
labels = labels.view(batch_size * n_attributes)
loss = F.cross_entropy(receiver_output, labels, reduction="none")
loss = loss.view(batch_size, -1).mean(dim=1)
return loss, {"acc": acc}
# again, see data_readers.py in this directory for the AttValRecoDataset data reading class
train_loader = DataLoader(
AttValRecoDataset(
path=opts.train_data,
n_attributes=opts.n_attributes,
n_values=opts.n_values,
),
batch_size=opts.batch_size,
shuffle=True,
num_workers=1,
)
test_loader = DataLoader(
AttValRecoDataset(
path=opts.validation_data,
n_attributes=opts.n_attributes,
n_values=opts.n_values,
),
batch_size=opts.validation_batch_size,
shuffle=False,
num_workers=1,
)
# the number of features for the Receiver (input) and the Sender (output) is given by n_attributes*n_values because
# they are fed/produce 1-hot representations of the input vectors
n_features = opts.n_attributes * opts.n_values
# we define here the core of the receiver for the discriminative game, see the architectures.py file for details
# this will be embedded in a wrapper below to define the full architecture
receiver = RecoReceiver(n_features=n_features, n_hidden=opts.receiver_hidden)
# we are now outside the block that defined game-type-specific aspects of the games: note that the core Sender architecture
# (see architectures.py for details) is shared by the two games (it maps an input vector to a hidden layer that will be use to initialize
# the message-producing RNN): this will also be embedded in a wrapper below to define the full architecture
sender = Sender(n_hidden=opts.sender_hidden, n_features=n_features)
# now, we instantiate the full sender and receiver architectures, and connect them and the loss into a game object
# the implementation differs slightly depending on whether communication is optimized via Gumbel-Softmax ('gs') or Reinforce ('rf', default)
if opts.mode.lower() == "gs":
# in the following lines, we embed the Sender and Receiver architectures into standard EGG wrappers that are appropriate for Gumbel-Softmax optimization
# the Sender wrapper takes the hidden layer produced by the core agent architecture we defined above when processing input, and uses it to initialize
# the RNN that generates the message
sender = core.RnnSenderGS(
sender,
vocab_size=opts.vocab_size,
embed_dim=opts.sender_embedding,
hidden_size=opts.sender_hidden,
cell=opts.sender_cell,
max_len=opts.max_len,
temperature=opts.temperature,
)
# the Receiver wrapper takes the symbol produced by the Sender at each step (more precisely, in Gumbel-Softmax mode, a function of the overall probability
# of non-eos symbols upt to the step is used), maps it to a hidden layer through a RNN, and feeds this hidden layer to the
# core Receiver architecture we defined above (possibly with other Receiver input, as determined by the core architecture) to generate the output
receiver = core.RnnReceiverGS(
receiver,
vocab_size=opts.vocab_size,
embed_dim=opts.receiver_embedding,
hidden_size=opts.receiver_hidden,
cell=opts.receiver_cell,
)
game = core.SenderReceiverRnnGS(sender, receiver, loss)
# callback functions can be passed to the trainer object (see below) to operate at certain steps of training and validation
# for example, the TemperatureUpdater (defined in callbacks.py in the core directory) will update the Gumbel-Softmax temperature hyperparameter
# after each epoch
callbacks = [core.TemperatureUpdater(agent=sender, decay=0.9, minimum=0.1)]
else: # NB: any other string than gs will lead to rf training!
# here, the interesting thing to note is that we use the same core architectures we defined above, but now we embed them in wrappers that are suited to
# Reinforce-based optmization
sender = core.RnnSenderReinforce(
sender,
vocab_size=opts.vocab_size,
embed_dim=opts.sender_embedding,
hidden_size=opts.sender_hidden,
cell=opts.sender_cell,
max_len=opts.max_len,
)
receiver = core.RnnReceiverDeterministic(
receiver,
vocab_size=opts.vocab_size,
embed_dim=opts.receiver_embedding,
hidden_size=opts.receiver_hidden,
cell=opts.receiver_cell,
)
game = core.SenderReceiverRnnReinforce(
sender,
receiver,
loss,
sender_entropy_coeff=opts.sender_entropy_coeff,
receiver_entropy_coeff=0,
)
callbacks = []
# we are almost ready to train: we define here an optimizer calling standard pytorch functionality
optimizer = core.build_optimizer(game.parameters())
# in the following statement, we finally instantiate the trainer object with all the components we defined (the game, the optimizer, the data
# and the callbacks)
if opts.print_validation_events == True:
# we add a callback that will print loss and accuracy after each training and validation pass (see ConsoleLogger in callbacks.py in core directory)
# if requested by the user, we will also print a detailed log of the validation pass after full training: look at PrintValidationEvents in
# language_analysis.py (core directory)
trainer = core.Trainer(
game=game,
optimizer=optimizer,
train_data=train_loader,
validation_data=test_loader,
callbacks=callbacks
+ [
core.ConsoleLogger(print_train_loss=True, as_json=True),
core.PrintValidationEvents(n_epochs=opts.n_epochs),
],
)
else:
trainer = core.Trainer(
game=game,
optimizer=optimizer,
train_data=train_loader,
validation_data=test_loader,
callbacks=callbacks
+ [core.ConsoleLogger(print_train_loss=True, as_json=True)],
)
# and finally we train!
trainer.train(n_epochs=opts.n_epochs)
if __name__ == "__main__":
import sys
main(sys.argv[1:])
| 48.655488 | 236 | 0.672348 |
f75154c0f96287fe4623112f3de0cc0e61d931f4 | 4,357 | py | Python | experiments/murtaza/multiworld/camera_ready/pusher/offline_vae2.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | experiments/murtaza/multiworld/camera_ready/pusher/offline_vae2.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | experiments/murtaza/multiworld/camera_ready/pusher/offline_vae2.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | import rlkit.misc.hyperparameter as hyp
from multiworld.envs.mujoco.cameras import sawyer_pusher_camera_upright_v3
from rlkit.launchers.launcher_util import run_experiment
from rlkit.torch.grill.launcher import grill_her_td3_full_experiment
if __name__ == "__main__":
variant = dict(
imsize=84,
init_camera=sawyer_pusher_camera_upright_v3,
grill_variant=dict(
save_video=True,
save_video_period=50,
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
algo_kwargs=dict(
base_kwargs=dict(
num_epochs=505,
num_steps_per_epoch=1000,
num_steps_per_eval=1000,
min_num_steps_before_training=4000,
batch_size=128,
max_path_length=100,
discount=0.99,
num_updates_per_env_step=4,
collection_mode='online',
parallel_env_params=dict(
num_workers=1,
),
reward_scale=1,
),
her_kwargs=dict(),
td3_kwargs=dict(
tau=1e-2,
),
),
replay_buffer_kwargs=dict(
max_size=int(1e6),
fraction_goals_are_rollout_goals=0,
fraction_resampled_goals_are_env_goals=0.5,
),
algorithm='OFFLINE-VAE-HER-TD3',
normalize=False,
render=False,
exploration_noise=0.8,
exploration_type='ou',
training_mode='train',
testing_mode='test',
reward_params=dict(
type='latent_distance',
),
observation_key='latent_observation',
desired_goal_key='latent_desired_goal',
vae_wrapped_env_kwargs=dict(
sample_from_true_prior=True,
)
),
train_vae_variant=dict(
vae_path=None,
representation_size=16,
beta=.5,
num_epochs=2500,
dump_skew_debug_plots=False,
generate_vae_dataset_kwargs=dict(
test_p=.9,
N=5000,
oracle_dataset=True,
use_cached=False,
vae_dataset_specific_kwargs=dict(),
show=False,
# dataset_path='datasets/SawyerPushAndReachXYEnv-No-Arena-v1_N5000_sawyer_pusher_camera_upright_v3_imsize84_random_oracle_split_0.npy',
),
vae_kwargs=dict(
input_channels=3,
),
algo_kwargs=dict(
do_scatterplot=False,
use_linear_dynamics=False,
is_auto_encoder=False,
batch_size=64,
lr=1e-3,
),
save_period=50,
),
)
search_space = {
'train_vae_variant.beta':[.5, 2.5],
'env_id':['SawyerPushAndReacherXYEnv-v0'],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
# n_seeds = 1
# mode = 'local'
# exp_prefix = 'test'
n_seeds = 1
mode = 'ec2'
exp_prefix = 'sawyer_pusher_offline_vae_final'
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
# if variant['env_id'] == 'SawyerPushAndReachXYEnv-No-Arena-v0':
# variant['train_vae_variant']['generate_vae_dataset_kwargs']['dataset_path'] = \
# 'datasets/SawyerPushAndReachXYEnv-No-Arena-v0_N5000_sawyer_pusher_camera_upright_v3_imsize84_random_oracle_split_0.npy'
# else:
# variant['train_vae_variant']['generate_vae_dataset_kwargs']['dataset_path'] = \
# 'datasets/SawyerPushAndReachXYEnv-No-Arena-v1_N5000_sawyer_pusher_camera_upright_v3_imsize84_random_oracle_split_0.npy'
for _ in range(n_seeds):
run_experiment(
grill_her_td3_full_experiment,
exp_prefix=exp_prefix,
mode=mode,
variant=variant,
use_gpu=True,
num_exps_per_instance=2,
)
| 35.713115 | 151 | 0.54992 |
f7515b5996d204f1739b35001a682bbd11b2cc6a | 940 | py | Python | src/ipcollector/setup.py | logdna/k8s-egress-networkpolicy-manager | 3305ed30cbd07d1b549b743d02fde53a596780ca | [
"MIT"
] | null | null | null | src/ipcollector/setup.py | logdna/k8s-egress-networkpolicy-manager | 3305ed30cbd07d1b549b743d02fde53a596780ca | [
"MIT"
] | null | null | null | src/ipcollector/setup.py | logdna/k8s-egress-networkpolicy-manager | 3305ed30cbd07d1b549b743d02fde53a596780ca | [
"MIT"
] | 1 | 2021-09-26T00:06:52.000Z | 2021-09-26T00:06:52.000Z | from ipcollector import __version__
from setuptools import setup, find_packages
from sys import path
from os import environ
path.insert(0, '.')
NAME = 'ipcollector'
if __name__ == '__main__':
with open(environ.get('REQUIREMENTS_TXT', 'requirements.txt')) as f:
requirements = f.read().splitlines()
setup(
name=NAME,
version=__version__,
author='Jonathan Kelley',
author_email='jonathan.kelley@logdna.com',
url='https://github.com/logdna/k8s-egress-networkpolicy-manager',
license='ASLv2',
packages=find_packages(),
package_dir={NAME: NAME},
description='ipcollector - Daemon that collects and forwards node/host IP telemetry to your ip-curator service. A component of k8s-egress-policy-manager',
install_requires=requirements,
entry_points={
'console_scripts': ['ipcollector = ipcollector.app:main'],
}
)
| 29.375 | 162 | 0.669149 |
f75166e1db0088a12e37baa7c00ade5398b544c7 | 1,071 | py | Python | merge_geo_with collocates.py | RSLancs/Extracting_plant_names_and_collocates_from_historical_texts | c8d6746978786ca4b83dc550114700530077c543 | [
"Apache-2.0"
] | null | null | null | merge_geo_with collocates.py | RSLancs/Extracting_plant_names_and_collocates_from_historical_texts | c8d6746978786ca4b83dc550114700530077c543 | [
"Apache-2.0"
] | null | null | null | merge_geo_with collocates.py | RSLancs/Extracting_plant_names_and_collocates_from_historical_texts | c8d6746978786ca4b83dc550114700530077c543 | [
"Apache-2.0"
] | null | null | null | ##python27
from pprint import pprint
import pandas as pd
##..............open manually merged geoparsed results
geo = pd.read_csv('./data/merger_xml_extracted_geoparsed_collocates.csv')
geo = [tuple(x) for x in geo.values] # df to list
print(geo[1])
##..........open collocate results....
collocate = pd.read_csv('./data/INDEXED_no_overlaps-abrev-dups_collocate_results_15.01.19.csv')
collocate = [tuple(x) for x in collocate.values] # df to list
print(collocate[1])
#............merge results........................
merged = []
for ig in geo:
for ic in collocate:
if ig[0] == ic[0]:
merged.append([ic[0],ic[2],ic[3],ic[4],ic[5],ic[6],ic[7],ig[0],ig[3],ig[5],ig[6]])
my_df = pd.DataFrame(merged) # transform result list to dataframe
my_df.columns = ['para_index',
'text',
'year',
'spc_acc',
'spc_syn',
'find_index',
'window',
'geo_para_index',
'standoff_loc_word',
'lat',
'lon' ] # add column labels
a = my_df.to_csv('./data/geo_locations_collocate_merger.csv') | 26.121951 | 96 | 0.606909 |
f7516d72cf57a5045a31b1bc07961010f578dc55 | 5,009 | py | Python | q2_longitudinal/_vega_specs/volatility/data.py | sterrettJD/q2-longitudinal | d9edd427c9668c8b95d708fc3c4d2722204c9ce0 | [
"BSD-3-Clause"
] | 10 | 2017-09-13T03:16:34.000Z | 2022-01-31T06:07:00.000Z | q2_longitudinal/_vega_specs/volatility/data.py | sterrettJD/q2-longitudinal | d9edd427c9668c8b95d708fc3c4d2722204c9ce0 | [
"BSD-3-Clause"
] | 114 | 2017-08-10T00:19:09.000Z | 2022-03-03T06:48:07.000Z | q2_longitudinal/_vega_specs/volatility/data.py | sterrettJD/q2-longitudinal | d9edd427c9668c8b95d708fc3c4d2722204c9ce0 | [
"BSD-3-Clause"
] | 16 | 2017-08-23T21:33:57.000Z | 2022-01-28T16:26:44.000Z | # ----------------------------------------------------------------------------
# Copyright (c) 2017-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from .const import (
DAT_INDIVIDUAL, FLD_GROUP_BY, FLD_METRIC, DAT_GLOBAL_VALS, FLD_CTRL_CL3,
DAT_SELECTED, FLD_CTRL_MEAN, FLD_CTRL_STDEV, SIG_METRIC, SIG_GROUP,
DAT_STATS, DAT_STATS_CUME_EXT, DAT_STATS_GLOB_EXT_LEFT, FLD_CTRL_CL2,
FLD_CTRL_EXT, DAT_AGG_BY, FLD_CTRL_CI0, FLD_CTRL_CI1, FLD_CTRL_COUNT,
FLD_MIN_X, FLD_MAX_X, FLD_MIN_Y, FLD_MAX_Y, FLD_CTRL_CL0, FLD_CTRL_CL1,
FLD_STATS_AVG_DEC, FLD_STATS_AVG_INC, FLD_STATS_MIN, FLD_STATS_MAX,
SIG_STATS_LEFT, DAT_STATS_GLOB_EXT_RIGHT, SIG_STATS_RIGHT)
def render_data_ctrl(control_chart_data, state):
return [
{'name': DAT_INDIVIDUAL,
'values': control_chart_data.to_dict('records'),
'transform': [
{'type': 'formula', 'as': FLD_GROUP_BY,
'expr': 'datum[%s]' % SIG_GROUP},
{'type': 'formula', 'as': FLD_METRIC,
'expr': 'datum[%s]' % SIG_METRIC}]},
{'name': DAT_GLOBAL_VALS,
'source': DAT_INDIVIDUAL,
'transform': [
{'type': 'aggregate',
'ops': ['mean', 'min', 'max', 'stdev', 'min', 'max'],
'fields': [FLD_METRIC, state, state, FLD_METRIC, FLD_METRIC,
FLD_METRIC],
'as': [FLD_CTRL_MEAN, FLD_MIN_X, FLD_MAX_X, FLD_CTRL_STDEV,
FLD_MIN_Y, FLD_MAX_Y]},
{'type': 'formula', 'as': FLD_CTRL_CL0,
'expr': 'datum.%s - (3 * datum.%s)' % (FLD_CTRL_MEAN,
FLD_CTRL_STDEV)},
{'type': 'formula', 'as': FLD_CTRL_CL1,
'expr': 'datum.%s - (2 * datum.%s)' % (FLD_CTRL_MEAN,
FLD_CTRL_STDEV)},
{'type': 'formula', 'as': FLD_CTRL_CL2,
'expr': 'datum.%s + (2 * datum.%s)' % (FLD_CTRL_MEAN,
FLD_CTRL_STDEV)},
{'type': 'formula', 'as': FLD_CTRL_CL3,
'expr': 'datum.%s + (3 * datum.%s)' % (FLD_CTRL_MEAN,
FLD_CTRL_STDEV)},
{'type': 'formula', 'as': FLD_CTRL_EXT,
'expr': '[datum.%s, datum.%s]' % (FLD_CTRL_CL0, FLD_CTRL_CL3)}]},
{'name': DAT_AGG_BY,
'source': DAT_INDIVIDUAL,
'transform': [
{'type': 'aggregate',
'groupby': [FLD_GROUP_BY, state],
# TODO: parameterize these intervals
# I don't see an easy way at the moment to define
# your own confidence interval in vega.
'ops': ['mean', 'ci0', 'ci1', 'count'],
'fields': [FLD_METRIC, FLD_METRIC, FLD_METRIC, FLD_METRIC],
'as': [FLD_CTRL_MEAN, FLD_CTRL_CI0, FLD_CTRL_CI1,
FLD_CTRL_COUNT]}]},
# These are just UI state vars to keep track of what has been clicked
# in the legend.
{'name': DAT_SELECTED,
'on': [
{'trigger': 'clear', 'remove': True},
{'trigger': '!shift', 'remove': True},
{'trigger': '!shift && clicked', 'insert': 'clicked'},
{'trigger': 'shift && clicked', 'toggle': 'clicked'}]}]
def render_data_stats(stats_chart_data):
return [
{'name': DAT_STATS,
'values': stats_chart_data.to_dict('records'),
'format': {'parse': {'importance': 'number'}}
},
# This gets used to set the initial values for the x-axis extent
# when the selected stat is the cumulative average stats.
{'name': DAT_STATS_CUME_EXT,
'source': DAT_STATS,
'transform': [
{'type': 'aggregate',
'ops': ['min', 'max'],
'fields': [FLD_STATS_AVG_DEC, FLD_STATS_AVG_INC],
'as': [FLD_STATS_MIN, FLD_STATS_MAX]},
]},
{'name': DAT_STATS_GLOB_EXT_LEFT,
'source': DAT_STATS,
'transform': [
{'type': 'aggregate',
'ops': ['min', 'max'],
'fields': [{'signal': SIG_STATS_LEFT},
{'signal': SIG_STATS_LEFT}],
# These fields will be `undefined` for cumulative avg stats
'as': [FLD_STATS_MIN, FLD_STATS_MAX]}]},
{'name': DAT_STATS_GLOB_EXT_RIGHT,
'source': DAT_STATS,
'transform': [
{'type': 'aggregate',
'ops': ['min', 'max'],
'fields': [{'signal': SIG_STATS_RIGHT},
{'signal': SIG_STATS_RIGHT}],
# These fields will be `undefined` for cumulative avg stats
'as': [FLD_STATS_MIN, FLD_STATS_MAX]}]}]
| 46.813084 | 79 | 0.508884 |
f75176940202e76f8338ed5fd86bc72ce1de620e | 28,826 | py | Python | retrobiocat_web/retro/rdchiral/main.py | ihayhurst/RetroBioCat | d674897459c0ab65faad5ed3017c55cf51bcc020 | [
"MIT"
] | 9 | 2020-12-01T16:33:02.000Z | 2022-01-19T20:02:42.000Z | retrobiocat_web/retro/rdchiral/main.py | ihayhurst/RetroBioCat | d674897459c0ab65faad5ed3017c55cf51bcc020 | [
"MIT"
] | 4 | 2020-10-02T14:38:32.000Z | 2021-08-02T09:23:58.000Z | retrobiocat_web/retro/rdchiral/main.py | ihayhurst/RetroBioCat | d674897459c0ab65faad5ed3017c55cf51bcc020 | [
"MIT"
] | 6 | 2021-01-14T07:48:36.000Z | 2022-03-20T17:34:27.000Z | from __future__ import print_function
import sys
import os
import re
import copy
import rdkit.Chem as Chem
import rdkit.Chem.AllChem as AllChem
from rdkit.Chem.rdchem import ChiralType, BondType, BondDir
from retrobiocat_web.retro.rdchiral.utils import vprint, PLEVEL, atoms_are_different
from retrobiocat_web.retro.rdchiral.initialization import rdchiralReaction, rdchiralReactants
from retrobiocat_web.retro.rdchiral.chiral import template_atom_could_have_been_tetra, copy_chirality,\
atom_chirality_matches
from retrobiocat_web.retro.rdchiral.clean import canonicalize_outcome_smiles, combine_enantiomers_into_racemic
from retrobiocat_web.retro.rdchiral.bonds import BondDirOpposite, restore_bond_stereo_to_sp2_atom
'''
This file contains the main functions for running reactions.
An incomplete description of expected behavior is as follows:
(1) RDKit's native RunReactants is called on an achiral version of the molecule,
which has had all tetrahedral centers and bond directions stripped.
(2) For each outcome, we examine the correspondence between atoms in the
reactants and atoms in the reactant template for reasons to exclude the
current outcome. The way we do so is through the react_atom_idx property in
the generated products. This is one of the
few properties always copied over to the products here:
https://github.com/rdkit/rdkit/blob/master/Code/GraphMol/ChemReactions/ReactionRunner.cpp
A previous version of this code did so through the Isotope label of each atom,
before the react_atom_idx was added to the ReactionRunner.cpp code.
The following conditions are checked:
TETRAHEDRAL ATOMS
(a) If a reactant atom is a tetrahedral center with specified chirality
and the reactant template atom is NOT chiral but is defined in a way
that it could have been specified, reject this outcome
(b) If a reactant atom is a tetrahedral center with specified chirality
and the reactant template atom is NOT chiral and is not defined in
a way where it could have been (i.e., is generalized without spec.
neighbors), then keep the match.
(c) If a reactant atom is achiral but the reactant tempalte atom is chiral,
the match is still allowed to happen. We might want to change this later
or let it be an option.
(d) If a reactant atom is a tetrahedral center with specified chirality
and the reactant template also has its chirality specified, let the
match happen if the chirality matches.
DOUBLE BONDS
(a) If a reactant double bond is defined with directionality specified and
the reactant template is unspecified but COULD have been (i.e.,
neighbors of sp2 carbons are specified), reject this outcome
(b) If a reactant double bond is defined with directionality specified and
the reactant template si unspecified but could NOT have been (in the
case of generalization), allow the match to occur. This is what we
default to when half the bond is specified, like in "C=C/O"
note: reactants are checked for implicit bond stereo based on rings
(c) If a reactant double bond has implicit cis due to ring membership, it is
still allowed to match an unspecified template double bond. Might lead
to some weird edge cases, but mostly makes sense.
(3) For each outcome, merge all products into a single molecule. During this
process, we check for bonds that are missing in the product. These are those
that were present in the reactants but were NOT matched in the reactant
template.
(4) For each outcome, examine product atoms to correct tetrahedral chirality.
(5) For each outcome, examine product double bonds to correct cis/trans-ness
'''
def rdchiralRunText(reaction_smarts, reactant_smiles, **kwargs):
'''Run from SMARTS string and SMILES string. This is NOT recommended
for library application, since initialization is pretty slow. You should
separately initialize the template and molecules and call run()
Args:
reaction_smarts (str): Reaction SMARTS string
reactant_smiles (str): Reactant SMILES string
**kwargs: passed through to `rdchiralRun`
Returns:
list: List of outcomes from `rdchiralRun`
'''
rxn = rdchiralReaction(reaction_smarts)
reactants = rdchiralReactants(reactant_smiles)
return rdchiralRun(rxn, reactants, **kwargs)
def rdchiralRun(rxn, reactants, keep_mapnums=False, combine_enantiomers=True, return_mapped=False):
'''Run rdchiral reaction
NOTE: there is a fair amount of initialization (assigning stereochem), most
importantly assigning atom map numbers to the reactant atoms. It is
HIGHLY recommended to use the custom classes for initialization.
Args:
rxn (rdchiralReaction): (rdkit reaction + auxilliary information)
reactants (rdchiralReactants): (rdkit mol + auxilliary information)
keep_mapnums (bool): Whether to keep map numbers or not
combine_enantiomers (bool): Whether to combine enantiomers
return_mapped (bool): Whether to additionally return atom mapped SMILES strings
Returns:
(list, str (optional)): Returns list of outcomes. If `return_mapped` is True,
additionally return atom mapped SMILES strings
'''
# New: reset atom map numbers for templates in case they have been overwritten
# by previous uses of this template!
rxn.reset()
###############################################################################
# Run naive RDKit on ACHIRAL version of molecules
outcomes = rxn.rxn.RunReactants((reactants.reactants_achiral,))
if PLEVEL >= (1): print('Using naive RunReactants, {} outcomes'.format(len(outcomes)))
if not outcomes:
return []
###############################################################################
###############################################################################
# Initialize, now that there is at least one outcome
final_outcomes = set()
mapped_outcomes = {}
# We need to keep track of what map numbers correspond to which atoms
# note: all reactant atoms must be mapped, so this is safe
atoms_r = reactants.atoms_r
# Copy reaction template so we can play around with map numbers
template_r, template_p = rxn.template_r, rxn.template_p
# Get molAtomMapNum->atom dictionary for tempalte reactants and products
atoms_rt_map = rxn.atoms_rt_map
atoms_pt_map = rxn.atoms_pt_map
###############################################################################
for outcome in outcomes:
###############################################################################
# Look for new atoms in products that were not in
# reactants (e.g., LGs for a retro reaction)
if PLEVEL >= (2): print('Processing {}'.format(str([Chem.MolToSmiles(x, True) for x in outcome])))
unmapped = 900
for m in outcome:
for a in m.GetAtoms():
# Assign map number to outcome based on react_atom_idx
if a.HasProp('react_atom_idx'):
a.SetAtomMapNum(reactants.idx_to_mapnum(int(a.GetProp('react_atom_idx'))))
if not a.GetAtomMapNum():
a.SetAtomMapNum(unmapped)
unmapped += 1
if PLEVEL >= 2: print('Added {} map numbers to product'.format(unmapped-900))
###############################################################################
###############################################################################
# Check to see if reactants should not have been matched (based on chirality)
# Define map num -> reactant template atom map
atoms_rt = {a.GetAtomMapNum(): atoms_rt_map[a.GetIntProp('old_mapno')] \
for m in outcome for a in m.GetAtoms() if a.HasProp('old_mapno')}
# Set map numbers of reactant template to be consistent with reactant/product molecules
# note: this is okay to do within the loop, because ALL atoms must be matched
# in the templates, so the atommapnum will get overwritten every time
[a.SetAtomMapNum(i) for (i, a) in atoms_rt.items()]
# Make sure each atom matches
# note: this is a little weird because atom_chirality_matches takes three values,
# -1 (both tetra but opposite), 0 (not a match), and +1 (both tetra and match)
# and we only want to continue if they all equal -1 or all equal +1
prev = None
skip_outcome = False
for match in (atom_chirality_matches(atoms_rt[i], atoms_r[i]) for i in atoms_rt):
if match == 0:
if PLEVEL >= 2: print('Chirality violated! Should not have gotten this match')
skip_outcome = True
break
elif match == 2: # ambiguous case
continue
elif prev is None:
prev = match
elif match != prev:
if PLEVEL >= 2: print('Part of the template matched reactant chirality, part is inverted! Should not match')
skip_outcome = True
break
if skip_outcome:
continue
if PLEVEL >= 2: print('Chirality matches! Just checked with atom_chirality_matches')
# Check bond chirality - iterate through reactant double bonds where
# chirality is specified (or not). atoms defined by map number
skip_outcome = False
for atoms, dirs, is_implicit in reactants.atoms_across_double_bonds:
if all(i in atoms_rt for i in atoms):
# All atoms definining chirality were matched to the reactant template
# So, check if it is consistent with how the template is defined
#...but /=/ should match \=\ since they are both trans...
matched_atom_map_nums = tuple(atoms_rt[i].GetAtomMapNum() for i in atoms)
# Convert atoms_rt to original template's atom map numbers:
matched_atom_map_nums = tuple(rxn.atoms_rt_idx_to_map[atoms_rt[i].GetIdx()] for i in atoms)
if matched_atom_map_nums not in rxn.required_rt_bond_defs:
continue # this can happen in ring openings, for example
dirs_template = rxn.required_rt_bond_defs[matched_atom_map_nums]
if dirs != dirs_template and \
(BondDirOpposite[dirs[0]], BondDirOpposite[dirs[1]]) != dirs_template and \
not (dirs_template == (BondDir.NONE, BondDir.NONE) and is_implicit):
if PLEVEL >= 5: print('Reactant bond chirality does not match template!')
if PLEVEL >= 5: print('Based on map numbers...')
if PLEVEL >= 5: print(' rct: {} -> {}'.format(matched_atom_map_nums, dirs))
if PLEVEL >= 5: print(' tmp: {} -> {}'.format(matched_atom_map_nums, dirs_template))
if PLEVEL >= 5: print('skipping this outcome, should not have matched...')
skip_outcome = True
break
if skip_outcome:
continue
###############################################################################
###############################################################################
# Convert product(s) to single product so that all
# reactions can be treated as pseudo-intramolecular
# But! check for ring openings mistakenly split into multiple
# This can be diagnosed by duplicate map numbers (i.e., SMILES)
mapnums = [a.GetAtomMapNum() for m in outcome for a in m.GetAtoms() if a.GetAtomMapNum()]
if len(mapnums) != len(set(mapnums)): # duplicate?
if PLEVEL >= 1: print('Found duplicate mapnums in product - need to stitch')
# need to do a fancy merge
merged_mol = Chem.RWMol(outcome[0])
merged_map_to_id = {a.GetAtomMapNum(): a.GetIdx() for a in outcome[0].GetAtoms() if a.GetAtomMapNum()}
for j in range(1, len(outcome)):
new_mol = outcome[j]
for a in new_mol.GetAtoms():
if a.GetAtomMapNum() not in merged_map_to_id:
merged_map_to_id[a.GetAtomMapNum()] = merged_mol.AddAtom(a)
for b in new_mol.GetBonds():
bi = b.GetBeginAtom().GetAtomMapNum()
bj = b.GetEndAtom().GetAtomMapNum()
if PLEVEL >= 10: print('stitching bond between {} and {} in stich has chirality {}, {}'.format(
bi, bj, b.GetStereo(), b.GetBondDir()
))
if not merged_mol.GetBondBetweenAtoms(
merged_map_to_id[bi], merged_map_to_id[bj]):
merged_mol.AddBond(merged_map_to_id[bi],
merged_map_to_id[bj], b.GetBondType())
merged_mol.GetBondBetweenAtoms(
merged_map_to_id[bi], merged_map_to_id[bj]
).SetStereo(b.GetStereo())
merged_mol.GetBondBetweenAtoms(
merged_map_to_id[bi], merged_map_to_id[bj]
).SetBondDir(b.GetBondDir())
outcome = merged_mol.GetMol()
if PLEVEL >= 1: print('Merged editable mol, converted back to real mol, {}'.format(Chem.MolToSmiles(outcome, True)))
else:
new_outcome = outcome[0]
for j in range(1, len(outcome)):
new_outcome = AllChem.CombineMols(new_outcome, outcome[j])
outcome = new_outcome
if PLEVEL >= 2: print('Converted all outcomes to single molecules')
###############################################################################
###############################################################################
# Figure out which atoms were matched in the templates
# atoms_rt and atoms_p will be outcome-specific.
atoms_pt = {a.GetAtomMapNum(): atoms_pt_map[a.GetIntProp('old_mapno')] \
for a in outcome.GetAtoms() if a.HasProp('old_mapno')}
atoms_p = {a.GetAtomMapNum(): a for a in outcome.GetAtoms() if a.GetAtomMapNum()}
# Set map numbers of product template
# note: this is okay to do within the loop, because ALL atoms must be matched
# in the templates, so the map numbers will get overwritten every time
# This makes it easier to check parity changes
[a.SetAtomMapNum(i) for (i, a) in atoms_pt.items()]
###############################################################################
###############################################################################
# Check for missing bonds. These are bonds that are present in the reactants,
# not specified in the reactant template, and not in the product. Accidental
# fragmentation can occur for intramolecular ring openings
missing_bonds = []
for (i, j, b) in reactants.bonds_by_mapnum:
if i in atoms_p and j in atoms_p:
# atoms from reactant bond show up in product
if not outcome.GetBondBetweenAtoms(atoms_p[i].GetIdx(), atoms_p[j].GetIdx()):
#...but there is not a bond in the product between those atoms
if i not in atoms_rt or j not in atoms_rt or not template_r.GetBondBetweenAtoms(atoms_rt[i].GetIdx(), atoms_rt[j].GetIdx()):
# the reactant template did not specify a bond between those atoms (e.g., intentionally destroy)
missing_bonds.append((i, j, b))
if missing_bonds:
if PLEVEL >= 1: print('Product is missing non-reacted bonds that were present in reactants!')
outcome = Chem.RWMol(outcome)
rwmol_map_to_id = {a.GetAtomMapNum(): a.GetIdx() for a in outcome.GetAtoms() if a.GetAtomMapNum()}
for (i, j, b) in missing_bonds:
outcome.AddBond(rwmol_map_to_id[i], rwmol_map_to_id[j])
new_b = outcome.GetBondBetweenAtoms(rwmol_map_to_id[i], rwmol_map_to_id[j])
new_b.SetBondType(b.GetBondType())
new_b.SetBondDir(b.GetBondDir())
new_b.SetIsAromatic(b.GetIsAromatic())
outcome = outcome.GetMol()
atoms_p = {a.GetAtomMapNum(): a for a in outcome.GetAtoms() if a.GetAtomMapNum()}
else:
if PLEVEL >= 3: print('No missing bonds')
###############################################################################
# Now that we've fixed any bonds, connectivity is set. This is a good time
# to udpate the property cache, since all that is left is fixing atom/bond
# stereochemistry.
try:
Chem.SanitizeMol(outcome)
outcome.UpdatePropertyCache()
except ValueError as e:
if PLEVEL >= 1: print('{}, {}'.format(Chem.MolToSmiles(outcome, True), e))
continue
###############################################################################
# Correct tetra chirality in the outcome
tetra_copied_from_reactants = []
for a in outcome.GetAtoms():
# Participants in reaction core (from reactants) will have old_mapno
# Spectators present in reactants will have react_atom_idx
# ...so new atoms will have neither!
if not a.HasProp('old_mapno'):
# Not part of the reactants template
if not a.HasProp('react_atom_idx'):
# Atoms only appear in product template - their chirality
# should be properly instantiated by RDKit...hopefully...
if PLEVEL >= 4: print('Atom {} created by product template, should have right chirality'.format(a.GetAtomMapNum()))
else:
if PLEVEL >= 4: print('Atom {} outside of template, copy chirality from reactants'.format(a.GetAtomMapNum()))
copy_chirality(atoms_r[a.GetAtomMapNum()], a)
if a.GetChiralTag() != ChiralType.CHI_UNSPECIFIED:
tetra_copied_from_reactants.append(a)
else:
# Part of reactants and reaction core
if template_atom_could_have_been_tetra(atoms_rt[a.GetAtomMapNum()]):
if PLEVEL >= 3: print('Atom {} was in rct template (could have been tetra)'.format(a.GetAtomMapNum()))
if template_atom_could_have_been_tetra(atoms_pt[a.GetAtomMapNum()]):
if PLEVEL >= 3: print('Atom {} in product template could have been tetra, too'.format(a.GetAtomMapNum()))
# Was the product template specified?
if atoms_pt[a.GetAtomMapNum()].GetChiralTag() == ChiralType.CHI_UNSPECIFIED:
# No, leave unspecified in product
if PLEVEL >= 3: print('...but it is not specified in product, so destroy chirality')
a.SetChiralTag(ChiralType.CHI_UNSPECIFIED)
else:
# Yes
if PLEVEL >= 3: print('...and product is specified')
# Was the reactant template specified?
if atoms_rt[a.GetAtomMapNum()].GetChiralTag() == ChiralType.CHI_UNSPECIFIED:
# No, so the reaction introduced chirality
if PLEVEL >= 3: print('...but reactant template was not, so copy from product template')
copy_chirality(atoms_pt[a.GetAtomMapNum()], a)
else:
# Yes, so we need to check if chirality should be preserved or inverted
if PLEVEL >= 3: print('...and reactant template was, too! copy from reactants')
copy_chirality(atoms_r[a.GetAtomMapNum()], a)
if atom_chirality_matches(atoms_pt[a.GetAtomMapNum()], atoms_rt[a.GetAtomMapNum()]) == -1:
if PLEVEL >= 3: print('but! reactant template and product template have opposite stereochem, so invert')
a.InvertChirality()
else:
# Reactant template chiral, product template not - the
# reaction is supposed to destroy chirality, so leave
# unspecified
if PLEVEL >= 3: print('If reactant template could have been ' +
'chiral, but the product template could not, then we dont need ' +
'to worry about specifying product atom chirality')
else:
if PLEVEL >= 3: print('Atom {} could not have been chiral in reactant template'.format(a.GetAtomMapNum()))
if not template_atom_could_have_been_tetra(atoms_pt[a.GetAtomMapNum()]):
if PLEVEL >= 3: print('Atom {} also could not have been chiral in product template', a.GetAtomMapNum())
if PLEVEL >= 3: print('...so, copy chirality from reactant instead')
copy_chirality(atoms_r[a.GetAtomMapNum()], a)
if a.GetChiralTag() != ChiralType.CHI_UNSPECIFIED:
tetra_copied_from_reactants.append(a)
else:
if PLEVEL >= 3: print('Atom could/does have product template chirality!'.format(a.GetAtomMapNum()))
if PLEVEL >= 3: print('...so, copy chirality from product template')
copy_chirality(atoms_pt[a.GetAtomMapNum()], a)
if PLEVEL >= 3: print('New chiral tag {}'.format(a.GetChiralTag()))
if skip_outcome:
if PLEVEL >= 2: print('Skipping this outcome - chirality broken?')
continue
if PLEVEL >= 2: print('After attempting to re-introduce chirality, outcome = {}'.format(Chem.MolToSmiles(outcome, True)))
###############################################################################
###############################################################################
# Correct bond directionality in the outcome
for b in outcome.GetBonds():
if b.GetBondType() != BondType.DOUBLE:
continue
# Ring double bonds do not need to be touched(?)
if b.IsInRing():
continue
ba = b.GetBeginAtom()
bb = b.GetEndAtom()
# Is it possible at all to specify this bond?
if ba.GetDegree() == 1 or bb.GetDegree() == 1:
continue
if PLEVEL >= 5: print('Looking at outcome bond {}={}'.format(ba.GetAtomMapNum(), bb.GetAtomMapNum()))
if ba.HasProp('old_mapno') and bb.HasProp('old_mapno'):
# Need to rely on templates for bond chirality, both atoms were
# in the reactant template
if PLEVEL >= 5: print('Both atoms in this double bond were in the reactant template')
if (ba.GetIntProp('old_mapno'), bb.GetIntProp('old_mapno')) in \
rxn.required_bond_defs_coreatoms:
if PLEVEL >= 5: print('and reactant template *could* have specified the chirality!')
if PLEVEL >= 5: print('..product should be property instantiated')
continue
if PLEVEL >= 5: print('But it was impossible to have specified chirality (e.g., aux C=C for context)')
elif not ba.HasProp('react_atom_idx') and not bb.HasProp('react_atom_idx'):
# The atoms were both created by the product template, so any bond
# stereochemistry should have been instantiated by the product template
# already...hopefully...otherwise it isn't specific enough?
continue
# Need to copy from reactants, this double bond was simply carried over,
# *although* one of the atoms could have reacted and been an auxilliary
# atom in the reaction, e.g., C/C=C(/CO)>>C/C=C(/C[Br])
if PLEVEL >= 5: print('Restoring cis/trans character of bond {}={} from reactants'.format(
ba.GetAtomMapNum(), bb.GetAtomMapNum()))
# Start with setting the BeginAtom
begin_atom_specified = restore_bond_stereo_to_sp2_atom(ba, reactants.bond_dirs_by_mapnum)
if not begin_atom_specified:
# don't bother setting other side of bond, since we won't be able to
# fully specify this bond as cis/trans
continue
# Look at other side of the bond now, the EndAtom
end_atom_specified = restore_bond_stereo_to_sp2_atom(bb, reactants.bond_dirs_by_mapnum)
if not end_atom_specified:
# note: this can happen if C=C/C-N turns into C=C/C=N
if PLEVEL >= 1:
print(reactants.bond_dirs_by_mapnum)
print(ba.GetAtomMapNum())
print(bb.GetAtomMapNum())
print(Chem.MolToSmiles(reactants.reactants, True))
print(Chem.MolToSmiles(outcome, True))
print('Uh oh, looks like bond direction is only specified for half of this bond?')
###############################################################################
#Keep track of the reacting atoms for later use in grouping
atoms_diff = {x:atoms_are_different(atoms_r[x],atoms_p[x]) for x in atoms_rt}
#make tuple of changed atoms
atoms_changed = tuple([x for x in atoms_diff.keys() if atoms_diff[x] == True])
mapped_outcome = Chem.MolToSmiles(outcome, True)
if not keep_mapnums:
for a in outcome.GetAtoms():
a.SetAtomMapNum(0)
# Now, check to see if we have destroyed chirality
# this occurs when chirality was not actually possible (e.g., due to
# symmetry) but we had assigned a tetrahedral center originating
# from the reactants.
# ex: SMILES C(=O)1C[C@H](Cl)CCC1
# SMARTS [C:1]-[C;H0;D3;+0:2](-[C:3])=[O;H0;D1;+0]>>[C:1]-[CH2;D2;+0:2]-[C:3]
"""
skip_outcome = False
if len(tetra_copied_from_reactants) > 0:
Chem.AssignStereochemistry(outcome, cleanIt=True, force=True)
for a in tetra_copied_from_reactants:
if a.GetChiralTag() == ChiralType.CHI_UNSPECIFIED:
if PLEVEL >= 2: print('Auxiliary reactant atom was chiral, now is broken -> skip outcome')
skip_outcome = True
break
if skip_outcome:
continue
"""
smiles = Chem.MolToSmiles(outcome, True)
smiles_new = canonicalize_outcome_smiles(smiles)
if smiles_new is None:
continue
final_outcomes.add(smiles_new)
mapped_outcomes[smiles_new] = (mapped_outcome, atoms_changed)
###############################################################################
# One last fix for consolidating multiple stereospecified products...
if combine_enantiomers:
final_outcomes = combine_enantiomers_into_racemic(final_outcomes)
###############################################################################
if return_mapped:
return list(final_outcomes), mapped_outcomes
else:
return list(final_outcomes)
if __name__ == '__main__':
# Directly use SMILES/SMARTS
reaction_smarts = '[C:1][OH:2]>>[C:1][O:2][C]'
reactant_smiles = 'OCC(=O)OCCCO'
outcomes = rdchiralRunText(reaction_smarts, reactant_smiles)
print(outcomes)
# Pre-initialize
rxn = rdchiralReaction(reaction_smarts)
reactants = rdchiralReactants(reactant_smiles)
outcomes = rdchiralRun(rxn, reactants)
print(outcomes)
# Get list of atoms that changed as well
outcomes, mapped_outcomes = rdchiralRun(rxn, reactants, return_mapped=True)
print(outcomes, mapped_outcomes)
| 52.410909 | 144 | 0.575869 |
f7517ebb49513bbccd04a3711951b9223ce8a1b7 | 30,787 | py | Python | sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_pii_entities.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-01-24T08:54:57.000Z | 2022-01-24T08:54:57.000Z | sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_pii_entities.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_pii_entities.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import os
import pytest
import platform
import functools
import json
from azure.core.exceptions import HttpResponseError, ClientAuthenticationError
from azure.core.credentials import AzureKeyCredential
from testcase import TextAnalyticsTest, TextAnalyticsPreparer
from testcase import TextAnalyticsClientPreparer as _TextAnalyticsClientPreparer
from devtools_testutils import recorded_by_proxy
from azure.ai.textanalytics import (
TextAnalyticsClient,
TextDocumentInput,
VERSION,
TextAnalyticsApiVersion,
PiiEntityDomain,
PiiEntityCategory
)
# pre-apply the client_cls positional argument so it needn't be explicitly passed below
# the first one
TextAnalyticsClientPreparer = functools.partial(_TextAnalyticsClientPreparer, TextAnalyticsClient)
class TestRecognizePIIEntities(TextAnalyticsTest):
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_no_single_input(self, client):
with pytest.raises(TypeError):
response = client.recognize_pii_entities("hello world")
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_all_successful_passing_dict(self, client):
docs = [{"id": "1", "text": "My SSN is 859-98-0987."},
{"id": "2", "text": "Your ABA number - 111000025 - is the first 9 digits in the lower left hand corner of your personal check."},
{"id": "3", "text": "Is 998.214.865-68 your Brazilian CPF number?"}]
response = client.recognize_pii_entities(docs, show_stats=True)
assert response[0].entities[0].text == "859-98-0987"
assert response[0].entities[0].category == "USSocialSecurityNumber"
assert response[1].entities[0].text == "111000025"
# assert response[1].entities[0].category == "ABA Routing Number" # Service is currently returning PhoneNumber here
# commenting out brazil cpf, currently service is not returning it
# assert response[2].entities[0].text == "998.214.865-68"
# assert response[2].entities[0].category == "Brazil CPF Number"
for doc in response:
assert doc.id is not None
assert doc.statistics is not None
for entity in doc.entities:
assert entity.text is not None
assert entity.category is not None
assert entity.offset is not None
assert entity.confidence_score is not None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_all_successful_passing_text_document_input(self, client):
docs = [
TextDocumentInput(id="1", text="My SSN is 859-98-0987."),
TextDocumentInput(id="2", text="Your ABA number - 111000025 - is the first 9 digits in the lower left hand corner of your personal check."),
TextDocumentInput(id="3", text="Is 998.214.865-68 your Brazilian CPF number?")
]
response = client.recognize_pii_entities(docs, show_stats=True)
assert response[0].entities[0].text == "859-98-0987"
assert response[0].entities[0].category == "USSocialSecurityNumber"
assert response[1].entities[0].text == "111000025"
# assert response[1].entities[0].category == "ABA Routing Number" # Service is currently returning PhoneNumber here
# commenting out brazil cpf, currently service is not returning it
# assert response[2].entities[0].text == "998.214.865-68"
# assert response[2].entities[0].category == "Brazil CPF Number"
for doc in response:
assert doc.id is not None
assert doc.statistics is not None
for entity in doc.entities:
assert entity.text is not None
assert entity.category is not None
assert entity.offset is not None
assert entity.confidence_score is not None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_passing_only_string(self, client):
docs = [
"My SSN is 859-98-0987.",
"Your ABA number - 111000025 - is the first 9 digits in the lower left hand corner of your personal check.",
"Is 998.214.865-68 your Brazilian CPF number?",
""
]
response = client.recognize_pii_entities(docs, show_stats=True)
assert response[0].entities[0].text == "859-98-0987"
assert response[0].entities[0].category == "USSocialSecurityNumber"
assert response[1].entities[0].text == "111000025"
# assert response[1].entities[0].category == "ABA Routing Number" # Service is currently returning PhoneNumber here
# commenting out brazil cpf, currently service is not returning it
# assert response[2].entities[0].text == "998.214.865-68"
# assert response[2].entities[0].category == "Brazil CPF Number"
assert response[3].is_error
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_input_with_some_errors(self, client):
docs = [{"id": "1", "language": "notalanguage", "text": "hola"},
{"id": "2", "text": ""},
{"id": "3", "text": "Is 998.214.865-68 your Brazilian CPF number?"}]
response = client.recognize_pii_entities(docs)
assert response[0].is_error
assert response[1].is_error
# assert not response[2].is_error
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_input_with_all_errors(self, client):
docs = [{"id": "1", "text": ""},
{"id": "2", "language": "Spanish", "text": "Hola"},
{"id": "3", "language": "de", "text": ""}]
response = client.recognize_pii_entities(docs)
assert response[0].is_error
assert response[1].is_error
assert response[2].is_error
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_too_many_documents(self, client):
docs = ["One", "Two", "Three", "Four", "Five", "Six"]
with pytest.raises(HttpResponseError) as excinfo:
client.recognize_pii_entities(docs)
assert excinfo.value.status_code == 400
assert excinfo.value.error.code == "InvalidDocumentBatch"
assert "Batch request contains too many records" in str(excinfo.value)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_output_same_order_as_input(self, client):
docs = [
TextDocumentInput(id="1", text="one"),
TextDocumentInput(id="2", text="two"),
TextDocumentInput(id="3", text="three"),
TextDocumentInput(id="4", text="four"),
TextDocumentInput(id="5", text="five")
]
response = client.recognize_pii_entities(docs)
for idx, doc in enumerate(response):
assert str(idx + 1) == doc.id
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"textanalytics_test_api_key": ""})
@recorded_by_proxy
def test_empty_credential_class(self, client):
with pytest.raises(ClientAuthenticationError):
response = client.recognize_pii_entities(
["This is written in English."]
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"textanalytics_test_api_key": "xxxxxxxxxxxx"})
@recorded_by_proxy
def test_bad_credentials(self, client):
with pytest.raises(ClientAuthenticationError):
response = client.recognize_pii_entities(
["This is written in English."]
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_bad_document_input(self, client):
docs = "This is the wrong type"
with pytest.raises(TypeError):
response = client.recognize_pii_entities(docs)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_mixing_inputs(self, client):
docs = [
{"id": "1", "text": "Microsoft was founded by Bill Gates and Paul Allen."},
TextDocumentInput(id="2", text="I did not like the hotel we stayed at. It was too expensive."),
"You cannot mix string input with the above inputs"
]
with pytest.raises(TypeError):
response = client.recognize_pii_entities(docs)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_out_of_order_ids(self, client):
docs = [{"id": "56", "text": ":)"},
{"id": "0", "text": ":("},
{"id": "22", "text": ""},
{"id": "19", "text": ":P"},
{"id": "1", "text": ":D"}]
response = client.recognize_pii_entities(docs)
in_order = ["56", "0", "22", "19", "1"]
for idx, resp in enumerate(response):
assert resp.id == in_order[idx]
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_show_stats_and_model_version(self, client):
def callback(response):
assert response is not None
assert response.model_version
assert response.raw_response is not None
assert response.statistics.document_count == 5
assert response.statistics.transaction_count == 4
assert response.statistics.valid_document_count == 4
assert response.statistics.erroneous_document_count == 1
docs = [{"id": "56", "text": ":)"},
{"id": "0", "text": ":("},
{"id": "22", "text": ""},
{"id": "19", "text": ":P"},
{"id": "1", "text": ":D"}]
response = client.recognize_pii_entities(
docs,
show_stats=True,
model_version="latest",
raw_response_hook=callback
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_batch_size_over_limit(self, client):
docs = ["hello world"] * 1050
with pytest.raises(HttpResponseError):
response = client.recognize_pii_entities(docs)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_whole_batch_language_hint(self, client):
def callback(resp):
language_str = "\"language\": \"fr\""
language = resp.http_request.body.count(language_str)
assert language == 3
docs = [
"This was the best day of my life.",
"I did not like the hotel we stayed at. It was too expensive.",
"The restaurant was not as good as I hoped."
]
response = client.recognize_pii_entities(docs, language="fr", raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_whole_batch_dont_use_language_hint(self, client):
def callback(resp):
language_str = "\"language\": \"\""
language = resp.http_request.body.count(language_str)
assert language == 3
docs = [
"This was the best day of my life.",
"I did not like the hotel we stayed at. It was too expensive.",
"The restaurant was not as good as I hoped."
]
response = client.recognize_pii_entities(docs, language="", raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_per_item_dont_use_language_hint(self, client):
def callback(resp):
language_str = "\"language\": \"\""
language = resp.http_request.body.count(language_str)
assert language == 2
language_str = "\"language\": \"en\""
language = resp.http_request.body.count(language_str)
assert language == 1
docs = [{"id": "1", "language": "", "text": "I will go to the park."},
{"id": "2", "language": "", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
response = client.recognize_pii_entities(docs, raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_whole_batch_language_hint_and_obj_input(self, client):
def callback(resp):
language_str = "\"language\": \"de\""
language = resp.http_request.body.count(language_str)
assert language == 3
docs = [
TextDocumentInput(id="1", text="I should take my cat to the veterinarian."),
TextDocumentInput(id="4", text="Este es un document escrito en Español."),
TextDocumentInput(id="3", text="猫は幸せ"),
]
response = client.recognize_pii_entities(docs, language="de", raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_whole_batch_language_hint_and_obj_per_item_hints(self, client):
def callback(resp):
language_str = "\"language\": \"es\""
language = resp.http_request.body.count(language_str)
assert language == 2
language_str = "\"language\": \"en\""
language = resp.http_request.body.count(language_str)
assert language == 1
docs = [
TextDocumentInput(id="1", text="I should take my cat to the veterinarian.", language="es"),
TextDocumentInput(id="2", text="Este es un document escrito en Español.", language="es"),
TextDocumentInput(id="3", text="猫は幸せ"),
]
response = client.recognize_pii_entities(docs, language="en", raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_whole_batch_language_hint_and_dict_per_item_hints(self, client):
def callback(resp):
language_str = "\"language\": \"es\""
language = resp.http_request.body.count(language_str)
assert language == 2
language_str = "\"language\": \"en\""
language = resp.http_request.body.count(language_str)
assert language == 1
docs = [{"id": "1", "language": "es", "text": "I will go to the park."},
{"id": "2", "language": "es", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
response = client.recognize_pii_entities(docs, language="en", raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"default_language": "es"})
@recorded_by_proxy
def test_client_passed_default_language_hint(self, client):
def callback(resp):
language_str = "\"language\": \"es\""
language = resp.http_request.body.count(language_str)
assert language == 3
def callback_2(resp):
language_str = "\"language\": \"en\""
language = resp.http_request.body.count(language_str)
assert language == 3
docs = [{"id": "1", "text": "I will go to the park."},
{"id": "2", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
response = client.recognize_pii_entities(docs, raw_response_hook=callback)
response = client.recognize_pii_entities(docs, language="en", raw_response_hook=callback_2)
response = client.recognize_pii_entities(docs, raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_invalid_language_hint_method(self, client):
response = client.recognize_pii_entities(
["This should fail because we're passing in an invalid language hint"], language="notalanguage"
)
assert response[0].error.code == 'UnsupportedLanguageCode'
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_invalid_language_hint_docs(self, client):
response = client.recognize_pii_entities(
[{"id": "1", "language": "notalanguage", "text": "This should fail because we're passing in an invalid language hint"}]
)
assert response[0].error.code == 'UnsupportedLanguageCode'
@TextAnalyticsPreparer()
@recorded_by_proxy
def test_rotate_subscription_key(self, textanalytics_test_endpoint, textanalytics_test_api_key):
credential = AzureKeyCredential(textanalytics_test_api_key)
client = TextAnalyticsClient(textanalytics_test_endpoint, credential)
docs = [{"id": "1", "text": "I will go to the park."},
{"id": "2", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
response = client.recognize_pii_entities(docs)
assert response is not None
credential.update("xxx") # Make authentication fail
with pytest.raises(ClientAuthenticationError):
response = client.recognize_pii_entities(docs)
credential.update(textanalytics_test_api_key) # Authenticate successfully again
response = client.recognize_pii_entities(docs)
assert response is not None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_user_agent(self, client):
def callback(resp):
assert "azsdk-python-ai-textanalytics/{} Python/{} ({})".format(
VERSION, platform.python_version(), platform.platform()) in \
resp.http_request.headers["User-Agent"]
docs = [{"id": "1", "text": "I will go to the park."},
{"id": "2", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
response = client.recognize_pii_entities(docs, raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_document_attribute_error_no_result_attribute(self, client):
docs = [{"id": "1", "text": ""}]
response = client.recognize_pii_entities(docs)
# Attributes on DocumentError
assert response[0].is_error
assert response[0].id == "1"
assert response[0].error is not None
# Result attribute not on DocumentError, custom error message
try:
entities = response[0].entities
except AttributeError as custom_error:
assert custom_error.args[0] == \
'\'DocumentError\' object has no attribute \'entities\'. ' \
'The service was unable to process this document:\nDocument Id: 1\nError: ' \
'InvalidDocument - Document text is empty.\n'
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_document_attribute_error_nonexistent_attribute(self, client):
docs = [{"id": "1", "text": ""}]
response = client.recognize_pii_entities(docs)
# Attribute not found on DocumentError or result obj, default behavior/message
try:
entities = response[0].attribute_not_on_result_or_error
except AttributeError as default_behavior:
assert default_behavior.args[0] == '\'DocumentError\' object has no attribute \'attribute_not_on_result_or_error\''
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_bad_model_version_error(self, client):
docs = [{"id": "1", "language": "english", "text": "I did not like the hotel we stayed at."}]
try:
result = client.recognize_pii_entities(docs, model_version="bad")
except HttpResponseError as err:
assert err.error.code == "ModelVersionIncorrect"
assert err.error.message is not None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_document_errors(self, client):
text = ""
for _ in range(5121):
text += "x"
docs = [{"id": "1", "text": ""},
{"id": "2", "language": "english", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": text}]
doc_errors = client.recognize_pii_entities(docs)
assert doc_errors[0].error.code == "InvalidDocument"
assert doc_errors[0].error.message is not None
assert doc_errors[1].error.code == "UnsupportedLanguageCode"
assert doc_errors[1].error.message is not None
assert doc_errors[2].error.code == "InvalidDocument"
assert doc_errors[2].error.message is not None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_document_warnings(self, client):
# No warnings actually returned for recognize_pii_entities. Will update when they add
docs = [
{"id": "1", "text": "This won't actually create a warning :'("},
]
result = client.recognize_pii_entities(docs)
for doc in result:
doc_warnings = doc.warnings
assert len(doc_warnings) == 0
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_not_passing_list_for_docs(self, client):
docs = {"id": "1", "text": "hello world"}
with pytest.raises(TypeError) as excinfo:
client.recognize_pii_entities(docs)
assert "Input documents cannot be a dict" in str(excinfo.value)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_missing_input_records_error(self, client):
docs = []
with pytest.raises(ValueError) as excinfo:
client.recognize_pii_entities(docs)
assert "Input documents can not be empty or None" in str(excinfo.value)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_passing_none_docs(self, client):
with pytest.raises(ValueError) as excinfo:
client.recognize_pii_entities(None)
assert "Input documents can not be empty or None" in str(excinfo.value)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_duplicate_ids_error(self, client):
# Duplicate Ids
docs = [{"id": "1", "text": "hello world"},
{"id": "1", "text": "I did not like the hotel we stayed at."}]
try:
result = client.recognize_pii_entities(docs)
except HttpResponseError as err:
assert err.error.code == "InvalidDocument"
assert err.error.message is not None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_batch_size_over_limit_error(self, client):
# Batch size over limit
docs = ["hello world"] * 1001
try:
response = client.recognize_pii_entities(docs)
except HttpResponseError as err:
assert err.error.code == "InvalidDocumentBatch"
assert err.error.message is not None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_pass_cls(self, client):
def callback(pipeline_response, deserialized, _):
return "cls result"
res = client.recognize_pii_entities(
documents=["Test passing cls to endpoint"],
cls=callback
)
assert res == "cls result"
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_language_kwarg_english(self, client):
def callback(response):
language_str = "\"language\": \"en\""
assert response.http_request.body.count(language_str) == 1
assert response.model_version is not None
assert response.statistics is not None
res = client.recognize_pii_entities(
documents=["Bill Gates is the CEO of Microsoft."],
model_version="latest",
show_stats=True,
language="en",
raw_response_hook=callback
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_redacted_text(self, client):
result = client.recognize_pii_entities(["My SSN is 859-98-0987."])
assert "My SSN is ***********." == result[0].redacted_text
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_phi_domain_filter(self, client):
# without the domain filter, this should return two entities: Microsoft as an org,
# and the phone number. With the domain filter, it should only return one.
result = client.recognize_pii_entities(
["I work at Microsoft and my phone number is 333-333-3333"],
domain_filter=PiiEntityDomain.PROTECTED_HEALTH_INFORMATION
)
assert len(result[0].entities) == 2
microsoft = list(filter(lambda x: x.text == "Microsoft", result[0].entities))[0]
phone = list(filter(lambda x: x.text == "333-333-3333", result[0].entities))[0]
assert phone.category == "PhoneNumber"
assert microsoft.category == "Organization"
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_categories_filter(self, client):
result = client.recognize_pii_entities(
["My name is Inigo Montoya, my SSN in 243-56-0987 and my phone number is 333-3333."],
)
# assert len(result[0].entities) == 3 FIXME service returning entity for "333-3333" and "333-3333."
result = client.recognize_pii_entities(
["My name is Inigo Montoya, my SSN in 243-56-0987 and my phone number is 333-3333."],
categories_filter=[PiiEntityCategory.US_SOCIAL_SECURITY_NUMBER]
)
assert len(result[0].entities) == 1
entity = result[0].entities[0]
assert entity.category == PiiEntityCategory.US_SOCIAL_SECURITY_NUMBER.value
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_categories_filter_with_domain_filter(self, client):
# Currently there seems to be no effective difference with or without the PHI domain filter.
result = client.recognize_pii_entities(
["My name is Inigo Montoya, my SSN in 243-56-0987 and my phone number is 333-3333."],
categories_filter=[PiiEntityCategory.US_SOCIAL_SECURITY_NUMBER],
domain_filter=PiiEntityDomain.PROTECTED_HEALTH_INFORMATION
)
assert len(result[0].entities) == 1
entity = result[0].entities[0]
assert entity.category == PiiEntityCategory.US_SOCIAL_SECURITY_NUMBER.value
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"api_version": TextAnalyticsApiVersion.V3_1})
@recorded_by_proxy
def test_default_string_index_type_is_UnicodeCodePoint(self, client):
def callback(response):
assert response.http_request.query["stringIndexType"] == "UnicodeCodePoint"
res = client.recognize_pii_entities(
documents=["Hello world"],
raw_response_hook=callback
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"api_version": TextAnalyticsApiVersion.V2022_04_01_PREVIEW})
@recorded_by_proxy
def test_default_string_index_type_UnicodeCodePoint_body_param(self, client):
def callback(response):
assert json.loads(response.http_request.body)['parameters']["stringIndexType"] == "UnicodeCodePoint"
res = client.recognize_pii_entities(
documents=["Hello world"],
raw_response_hook=callback
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"api_version": TextAnalyticsApiVersion.V3_1})
@recorded_by_proxy
def test_explicit_set_string_index_type(self, client):
def callback(response):
assert response.http_request.query["stringIndexType"] == "TextElement_v8"
res = client.recognize_pii_entities(
documents=["Hello world"],
string_index_type="TextElement_v8",
raw_response_hook=callback
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"api_version": TextAnalyticsApiVersion.V2022_04_01_PREVIEW})
@recorded_by_proxy
def test_explicit_set_string_index_type_body_param(self, client):
def callback(response):
assert json.loads(response.http_request.body)['parameters']["stringIndexType"] == "TextElements_v8"
res = client.recognize_pii_entities(
documents=["Hello world"],
string_index_type="TextElement_v8",
raw_response_hook=callback
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"api_version": TextAnalyticsApiVersion.V3_1})
@recorded_by_proxy
def test_disable_service_logs(self, client):
def callback(resp):
assert resp.http_request.query['loggingOptOut']
client.recognize_pii_entities(
documents=["Test for logging disable"],
disable_service_logs=True,
raw_response_hook=callback,
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"api_version": TextAnalyticsApiVersion.V2022_04_01_PREVIEW})
@recorded_by_proxy
def test_disable_service_logs_body_param(self, client):
def callback(resp):
assert json.loads(resp.http_request.body)['parameters']['loggingOptOut']
client.recognize_pii_entities(
documents=["Test for logging disable"],
disable_service_logs=True,
raw_response_hook=callback,
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"api_version": "v3.0"})
def test_pii_entities_multiapi_validate_v3_0(self, **kwargs):
client = kwargs.pop("client")
with pytest.raises(ValueError) as e:
client.recognize_pii_entities(
documents=["Test"]
)
assert str(e.value) == "'recognize_pii_entities' is only available for API version v3.1 and up."
| 41.324832 | 152 | 0.64563 |
f7519752096720b558cc48482e85bc955853e1bc | 6,340 | py | Python | planet/splice.py | xaviershay/reader | 7c713c90ff546032e4723a232a32107ebae88278 | [
"CNRI-Python-GPL-Compatible"
] | 1 | 2016-05-09T10:18:18.000Z | 2016-05-09T10:18:18.000Z | planet/splice.py | xaviershay/reader | 7c713c90ff546032e4723a232a32107ebae88278 | [
"CNRI-Python-GPL-Compatible"
] | 4 | 2015-09-04T06:26:22.000Z | 2021-09-04T12:33:32.000Z | planet/splice.py | xaviershay/reader | 7c713c90ff546032e4723a232a32107ebae88278 | [
"CNRI-Python-GPL-Compatible"
] | 3 | 2015-09-04T05:44:12.000Z | 2018-05-11T13:06:03.000Z | """ Splice together a planet from a cache of feed entries """
import glob, os, time, shutil
from xml.dom import minidom
import planet, config, feedparser, reconstitute, shell
from reconstitute import createTextElement, date
from spider import filename
from planet import idindex
def splice():
""" Splice together a planet from a cache of entries """
import planet
log = planet.logger
log.info("Loading cached data")
cache = config.cache_directory()
dir=[(os.stat(file).st_mtime,file) for file in glob.glob(cache+"/*")
if not os.path.isdir(file)]
dir.sort()
dir.reverse()
max_items=max([config.items_per_page(templ)
for templ in config.template_files() or ['Planet']])
doc = minidom.parseString('<feed xmlns="http://www.w3.org/2005/Atom"/>')
feed = doc.documentElement
# insert feed information
createTextElement(feed, 'title', config.name())
date(feed, 'updated', time.gmtime())
gen = createTextElement(feed, 'generator', config.generator())
gen.setAttribute('uri', config.generator_uri())
author = doc.createElement('author')
createTextElement(author, 'name', config.owner_name())
createTextElement(author, 'email', config.owner_email())
feed.appendChild(author)
if config.feed():
createTextElement(feed, 'id', config.feed())
link = doc.createElement('link')
link.setAttribute('rel', 'self')
link.setAttribute('href', config.feed())
if config.feedtype():
link.setAttribute('type', "application/%s+xml" % config.feedtype())
feed.appendChild(link)
if config.link():
link = doc.createElement('link')
link.setAttribute('rel', 'alternate')
link.setAttribute('href', config.link())
feed.appendChild(link)
# insert subscription information
sub_ids = []
feed.setAttribute('xmlns:planet',planet.xmlns)
sources = config.cache_sources_directory()
for sub in config.subscriptions():
data=feedparser.parse(filename(sources,sub))
if data.feed.has_key('id'): sub_ids.append(data.feed.id)
if not data.feed: continue
xdoc=minidom.parseString('''<planet:source xmlns:planet="%s"
xmlns="http://www.w3.org/2005/Atom"/>\n''' % planet.xmlns)
reconstitute.source(xdoc.documentElement, data.feed, None, None)
feed.appendChild(xdoc.documentElement)
index = idindex.open()
# insert entry information
items = 0
count = {}
new_feed_items = config.new_feed_items()
for mtime,file in dir:
if index != None:
base = os.path.basename(file)
if index.has_key(base) and index[base] not in sub_ids: continue
try:
entry=minidom.parse(file)
# verify that this entry is currently subscribed to and that the
# number of entries contributed by this feed does not exceed
# config.new_feed_items
entry.normalize()
sources = entry.getElementsByTagName('source')
if sources:
ids = sources[0].getElementsByTagName('id')
if ids:
id = ids[0].childNodes[0].nodeValue
count[id] = count.get(id,0) + 1
if new_feed_items and count[id] > new_feed_items: continue
if id not in sub_ids:
ids = sources[0].getElementsByTagName('planet:id')
if not ids: continue
id = ids[0].childNodes[0].nodeValue
if id not in sub_ids: continue
# add entry to feed
feed.appendChild(entry.documentElement)
items = items + 1
if items >= max_items: break
except:
log.error("Error parsing %s", file)
if index: index.close()
return doc
def apply(doc):
output_dir = config.output_dir()
if not os.path.exists(output_dir): os.makedirs(output_dir)
log = planet.logger
planet_filters = config.filters('Planet')
# Go-go-gadget-template
for template_file in config.template_files():
output_file = shell.run(template_file, doc)
# run any template specific filters
if config.filters(template_file) != planet_filters:
output = open(output_file).read()
for filter in config.filters(template_file):
if filter in planet_filters: continue
if filter.find('>')>0:
# tee'd output
filter,dest = filter.split('>',1)
tee = shell.run(filter.strip(), output, mode="filter")
if tee:
output_dir = planet.config.output_dir()
dest_file = os.path.join(output_dir, dest.strip())
dest_file = open(dest_file,'w')
dest_file.write(tee)
dest_file.close()
else:
# pipe'd output
output = shell.run(filter, output, mode="filter")
if not output:
os.unlink(output_file)
break
else:
handle = open(output_file,'w')
handle.write(output)
handle.close()
# Process bill of materials
for copy_file in config.bill_of_materials():
dest = os.path.join(output_dir, copy_file)
for template_dir in config.template_directories():
source = os.path.join(template_dir, copy_file)
if os.path.exists(source): break
else:
log.error('Unable to locate %s', copy_file)
log.info("Template search path:")
for template_dir in config.template_directories():
log.info(" %s", os.path.realpath(template_dir))
continue
mtime = os.stat(source).st_mtime
if not os.path.exists(dest) or os.stat(dest).st_mtime < mtime:
dest_dir = os.path.split(dest)[0]
if not os.path.exists(dest_dir): os.makedirs(dest_dir)
log.info("Copying %s to %s", source, dest)
if os.path.exists(dest): os.chmod(dest, 0644)
shutil.copyfile(source, dest)
shutil.copystat(source, dest)
| 37.738095 | 79 | 0.586751 |
f751af146af895c2b7971239633f9ed60b77862a | 2,076 | py | Python | mtr/sync/forms.py | mtrgroup/django-mtr-import-export | b8e7a6fa1cbc58b9e2126526f418306a7490cb52 | [
"MIT"
] | null | null | null | mtr/sync/forms.py | mtrgroup/django-mtr-import-export | b8e7a6fa1cbc58b9e2126526f418306a7490cb52 | [
"MIT"
] | null | null | null | mtr/sync/forms.py | mtrgroup/django-mtr-import-export | b8e7a6fa1cbc58b9e2126526f418306a7490cb52 | [
"MIT"
] | null | null | null | from django import forms
from mtr.utils.forms import GlobalInitialFormMixin
from mtr.utils.helpers import model_choices
from .lib.manager import manager
from .models import Settings, Field
# TODO: refactor
class SettingsAdminForm(GlobalInitialFormMixin, forms.ModelForm):
class Meta:
exclude = tuple()
model = Settings
def __init__(self, *args, **kwargs):
super(SettingsAdminForm, self).__init__(*args, **kwargs)
self.fields['processor'] = forms.ChoiceField(
label=self.fields['processor'].label,
choices=manager.processor_choices(),
initial=self.fields['processor'].initial,
help_text=self.fields['processor'].help_text,
required=self.fields['processor'].required)
self.fields['dataset'] = forms.ChoiceField(
label=self.fields['dataset'].label,
choices=(('', '----'),) + tuple(manager.dataset_choices()),
initial=self.fields['dataset'].initial,
help_text=self.fields['dataset'].help_text,
required=self.fields['dataset'].required)
self.fields['data_action'] = forms.ChoiceField(
label=self.fields['data_action'].label,
choices=(('', '----'),) + tuple(manager.action_choices()),
initial=self.fields['data_action'].initial,
help_text=self.fields['data_action'].help_text,
required=self.fields['data_action'].required)
self.fields['model'] = forms.ChoiceField(
label=self.fields['model'].label,
choices=model_choices(),
required=self.fields['model'])
class FieldInlineAdminForm(forms.ModelForm):
class Meta:
exclude = tuple()
model = Field
def __init__(self, *args, **kwargs):
super(FieldInlineAdminForm, self).__init__(*args, **kwargs)
self.fields['converters'] = forms.ChoiceField(
label=self.fields['converters'].label,
required=self.fields['converters'].required,
choices=manager.converter_choices())
| 33.483871 | 71 | 0.631985 |
f751b95552a0b9a87022c05e2d6c1c8d426941f8 | 1,950 | py | Python | tests/test_redis.py | azhai/rdcache | 8f3ca558a2a9af11321916bfe548aaeb66ef75ae | [
"MIT"
] | null | null | null | tests/test_redis.py | azhai/rdcache | 8f3ca558a2a9af11321916bfe548aaeb66ef75ae | [
"MIT"
] | null | null | null | tests/test_redis.py | azhai/rdcache | 8f3ca558a2a9af11321916bfe548aaeb66ef75ae | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import unittest
from datetime import date
from rdcache.ext import RedisCache, RedisPool
redis = RedisPool({
"default": {
"host": "127.0.0.1",
"port": 6379,
"password": "",
"db": 0,
},
})
backend = redis.get('default')
cache = RedisCache(backend, touch = True)
user_rows = {
'alice': {'username':'alice', 'gender':'F', 'birthday':date(1981,10,10)},
'bob': {'username':'bob', 'gender':'M', 'birthday':date(1988,9,9)},
'candy': {'username':'candy', 'gender':'F', 'birthday':date(1983,7,15)},
'david': {'username':'david', 'gender':'M', 'birthday':date(1992,1,3)},
'emily': {'username':'eric', 'gender':'M', 'birthday':date(1991,12,25)},
}
@cache('user:%s', type = 'hash', time = 60)
def read_user(username):
return user_rows.get(username)
@cache('birthes', type = 'zset', time = 120)
def read_birthes():
return [(u, user_rows[u]['birthday']) for u in user_rows.iterkeys()]
def get_user(username):
user = read_user(username)
key = 'user:%s' % username
if backend.type(key) == 'hash':
user2 = backend.hgetall(key)
else:
user2 = {}
return user, user2
def get_birthes():
birthes = dict(read_birthes())
birthes2 = dict(backend.zrange('birthes',
0, -1, withscores = True))
return birthes, birthes2
class RedisCacheTestCase(unittest.TestCase):
""" 单元测试 """
def test_none(self):
eric, eric2 = get_user('eric')
self.assertEqual(eric, {})
self.assertEqual(eric2, {})
def test_hash(self):
candy, candy2 = get_user('candy')
self.assertEqual(candy, candy2)
def test_zset(self):
birthes, birthes2 = get_birthes()
self.assertEqual(len(birthes), len(birthes2))
for k, v in birthes.iteritems():
self.assertEqual(v, birthes2.get(k))
if __name__ == '__main__':
unittest.main() | 27.464789 | 81 | 0.586667 |
f751c1a7b8557d111ece974fe0337ad5501315ca | 34 | py | Python | test/__init__.py | JugalBoro/trip-based-routing- | 66fd2095d6c072d16fe8d5a7ef0912ee72b22c08 | [
"WTFPL"
] | 43 | 2016-10-10T18:31:35.000Z | 2022-03-14T06:25:28.000Z | test/__init__.py | JugalBoro/trip-based-routing- | 66fd2095d6c072d16fe8d5a7ef0912ee72b22c08 | [
"WTFPL"
] | null | null | null | test/__init__.py | JugalBoro/trip-based-routing- | 66fd2095d6c072d16fe8d5a7ef0912ee72b22c08 | [
"WTFPL"
] | 8 | 2017-09-27T10:55:27.000Z | 2020-08-09T04:14:00.000Z | from . import _common
c = _common
| 11.333333 | 21 | 0.735294 |
f751f549655353ec5d37344c1567c2eb88340d0c | 28,933 | py | Python | src/transformersX/models/promptbert/modeling_promptbert.py | stevezheng23/fewshot_nlp_pt | aaca4658aaa48a5a45dfd7d5ee7282d7f7c74be2 | [
"Apache-2.0"
] | 2 | 2021-08-06T05:43:55.000Z | 2022-03-17T22:31:21.000Z | src/transformersX/models/promptbert/modeling_promptbert.py | stevezheng23/fewshot_nlp_pt | aaca4658aaa48a5a45dfd7d5ee7282d7f7c74be2 | [
"Apache-2.0"
] | null | null | null | src/transformersX/models/promptbert/modeling_promptbert.py | stevezheng23/fewshot_nlp_pt | aaca4658aaa48a5a45dfd7d5ee7282d7f7c74be2 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch PROMPTBERT model. """
import math
import os
import warnings
import numpy as np
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.utils.checkpoint
import torch.nn.functional as F
from packaging import version
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss, KLDivLoss
from torch.distributions.beta import Beta
from ...activations import ACT2FN
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
DualPassageEncoderModelOutput,
)
from ...modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from ...utils import logging
from .configuration_promptbert import PromptBertConfig
from ..bert.modeling_bert import BertEmbeddings as PromptBertEmbeddings
from ..bert.modeling_bert import BertEncoder as PromptBertEncoder
from ..bert.modeling_bert import BertPooler as PromptBertPooler
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "bert-base-uncased"
_CONFIG_FOR_DOC = "PromptBertConfig"
_TOKENIZER_FOR_DOC = "PromptBertTokenizer"
PROMPTBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"bert-base-uncased",
"bert-large-uncased",
"bert-base-cased",
"bert-large-cased",
"bert-base-multilingual-uncased",
"bert-base-multilingual-cased",
# See all BERT models at https://huggingface.co/models?filter=bert
]
def load_tf_weights_in_promptbert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
class PromptBertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = PromptBertConfig
load_tf_weights = load_tf_weights_in_promptbert
base_model_prefix = "bert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
PROMPTBERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
PROMPTBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare PromptBert Model transformer outputting raw hidden-states without any specific head on top.",
PROMPTBERT_START_DOCSTRING,
)
class PromptBertModel(PromptBertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration
set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = PromptBertEmbeddings(config)
self.encoder = PromptBertEncoder(config)
self.pooler = PromptBertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(PROMPTBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""
PromptBert Model transformer with a sequence classification head on top (a linear layer on top of the pooled output).
""",
PROMPTBERT_START_DOCSTRING,
)
class PromptBertForSequenceClassification(PromptBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.bert = PromptBertModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(PROMPTBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a dual encoder head on top for passage retrieval tasks (a linear layer on top of the pooled output
for computing source-target similarity).
""",
PROMPTBERT_START_DOCSTRING,
)
class PromptBertForDualPassageEncoder(PromptBertPreTrainedModel):
def __init__(self, config, cls_loss_wgt=None):
super().__init__(config)
self.num_labels = config.num_labels
self.cls_loss_wgt = cls_loss_wgt
self.bert = PromptBertModel(config)
self.pooler = PromptBertPooler(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if self.cls_loss_wgt is not None and cls_loss_wgt > 0.0:
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(PROMPTBERT_INPUTS_DOCSTRING.format("batch_size, 2, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=DualPassageEncoderModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is None or len(input_ids.size()) < 3:
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = self.pooler(outputs[0])
pooled_output = self.dropout(pooled_output)
if not return_dict:
return (pooled_output,) + outputs[2:]
return DualPassageEncoderModelOutput(
pooled_output=pooled_output,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
b, _, l = input_ids.size()
flatten_input_ids = input_ids.reshape(-1, l)
flatten_attention_mask = attention_mask.reshape(-1, l) if attention_mask is not None else None
flatten_token_type_ids = token_type_ids.reshape(-1, l) if token_type_ids is not None else None
flatten_position_ids = position_ids.reshape(-1, l) if position_ids is not None else None
flatten_inputs_embeds = inputs_embeds.reshape(-1, l, self.config.hidden_size) if inputs_embeds is not None else None
flatten_outputs = self.bert(
flatten_input_ids,
attention_mask=flatten_attention_mask,
token_type_ids=flatten_token_type_ids,
position_ids=flatten_position_ids,
head_mask=head_mask,
inputs_embeds=flatten_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
flatten_pooled_output = self.pooler(flatten_outputs[0])
src_pooled_output, trg_pooled_output = flatten_pooled_output.reshape(b, 2, self.config.hidden_size).chunk(2, dim=1)
src_pooled_output, trg_pooled_output = src_pooled_output.squeeze(dim=1).contiguous(), trg_pooled_output.squeeze(dim=1).contiguous()
mask = (labels.unsqueeze(-1).expand(-1, b) == labels.unsqueeze(0).expand(b, -1)) & (1 - torch.eye(b)).to(labels.device).bool()
cl_logits = torch.einsum('ik,jk->ij', src_pooled_output, trg_pooled_output).masked_fill(mask, float('-inf'))
cl_labels = torch.arange(b).to(labels.device)
loss_fct = CrossEntropyLoss()
cl_loss = loss_fct(cl_logits.view(-1, labels.size(0)), cl_labels.view(-1))
if self.cls_loss_wgt is not None and self.cls_loss_wgt > 0.0:
flatten_logits = self.classifier(self.dropout(flatten_outputs[1]))
src_logits, trg_logits = flatten_logits.reshape(b, 2, self.num_labels).chunk(2, dim=1)
src_logits, trg_logits = src_logits.squeeze(dim=1).contiguous(), trg_logits.squeeze(dim=1).contiguous()
src_loss = loss_fct(src_logits.view(-1, self.num_labels), labels.view(-1))
trg_loss = loss_fct(trg_logits.view(-1, self.num_labels), labels.view(-1))
cls_loss = src_loss + trg_loss
cls_logits = src_logits + trg_logits
loss = cl_loss + cls_loss * self.cls_loss_wgt
logits = cls_logits
else:
loss = cl_loss
logits = cl_logits
if not return_dict:
return (loss, logits,)
return DualPassageEncoderModelOutput(
loss=loss,
logits=logits,
)
| 43.904401 | 213 | 0.666263 |
f75206c35d2b5070eaa00f5e8d63d5d6aeb16b70 | 2,370 | py | Python | alipay/aop/api/domain/AlipayPayAppCarPayModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/domain/AlipayPayAppCarPayModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/domain/AlipayPayAppCarPayModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayPayAppCarPayModel(object):
def __init__(self):
self._out_trade_no = None
self._qr_code = None
self._subject = None
self._total_amount = None
@property
def out_trade_no(self):
return self._out_trade_no
@out_trade_no.setter
def out_trade_no(self, value):
self._out_trade_no = value
@property
def qr_code(self):
return self._qr_code
@qr_code.setter
def qr_code(self, value):
self._qr_code = value
@property
def subject(self):
return self._subject
@subject.setter
def subject(self, value):
self._subject = value
@property
def total_amount(self):
return self._total_amount
@total_amount.setter
def total_amount(self, value):
self._total_amount = value
def to_alipay_dict(self):
params = dict()
if self.out_trade_no:
if hasattr(self.out_trade_no, 'to_alipay_dict'):
params['out_trade_no'] = self.out_trade_no.to_alipay_dict()
else:
params['out_trade_no'] = self.out_trade_no
if self.qr_code:
if hasattr(self.qr_code, 'to_alipay_dict'):
params['qr_code'] = self.qr_code.to_alipay_dict()
else:
params['qr_code'] = self.qr_code
if self.subject:
if hasattr(self.subject, 'to_alipay_dict'):
params['subject'] = self.subject.to_alipay_dict()
else:
params['subject'] = self.subject
if self.total_amount:
if hasattr(self.total_amount, 'to_alipay_dict'):
params['total_amount'] = self.total_amount.to_alipay_dict()
else:
params['total_amount'] = self.total_amount
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayPayAppCarPayModel()
if 'out_trade_no' in d:
o.out_trade_no = d['out_trade_no']
if 'qr_code' in d:
o.qr_code = d['qr_code']
if 'subject' in d:
o.subject = d['subject']
if 'total_amount' in d:
o.total_amount = d['total_amount']
return o
| 27.55814 | 75 | 0.583966 |
f7522b58fe759ad02beb5ce374a125dcdde42486 | 2,841 | py | Python | sphinx/conf.py | francois-rozet/lampe | 50e53c767ee5d98502ec8520b3bca554f2169eb7 | [
"MIT"
] | 3 | 2022-03-20T19:23:27.000Z | 2022-03-25T06:55:28.000Z | sphinx/conf.py | francois-rozet/lampe | 50e53c767ee5d98502ec8520b3bca554f2169eb7 | [
"MIT"
] | null | null | null | sphinx/conf.py | francois-rozet/lampe | 50e53c767ee5d98502ec8520b3bca554f2169eb7 | [
"MIT"
] | null | null | null | # Configuration file for the Sphinx documentation builder
import os
import sys
import inspect
import importlib
sys.path.insert(0, os.path.abspath('..'))
## Project
package = 'lampe'
project = 'LAMPE'
copyright = '2021-2022, François Rozet'
repository = 'https://github.com/francois-rozet/lampe'
## Extensions
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.linkcode',
'sphinx.ext.napoleon',
]
autodoc_default_options = {
'members': True,
'member-order': 'bysource',
}
autodoc_inherit_docstrings = False
autodoc_typehints = 'description'
autodoc_typehints_description_target = 'documented'
autodoc_typehints_format = 'short'
intersphinx_mapping = {
'matplotlib': ('https://matplotlib.org/stable', None),
'numpy': ('https://numpy.org/doc/stable', None),
'python': ('https://docs.python.org/3', None),
'torch': ('https://pytorch.org/docs/stable', None),
}
def linkcode_resolve(domain: str, info: dict) -> str:
module = info.get('module', '')
fullname = info.get('fullname', '')
if not module or not fullname:
return None
objct = importlib.import_module(module)
for name in fullname.split('.'):
objct = getattr(objct, name)
try:
file = inspect.getsourcefile(objct)
file = file[file.rindex(package):]
lines, start = inspect.getsourcelines(objct)
end = start + len(lines) - 1
except Exception as e:
return None
else:
return f'{repository}/tree/docs/{file}#L{start}-L{end}'
napoleon_custom_sections = [
('Shapes', 'params_style'),
'Wikipedia',
]
## Settings
add_function_parentheses = False
default_role = 'literal'
exclude_patterns = ['templates']
html_copy_source = False
html_css_files = [
'custom.css',
'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0/css/all.min.css',
]
html_favicon = 'static/logo_dark.svg'
html_show_sourcelink = False
html_sourcelink_suffix = ''
html_static_path = ['static']
html_theme = 'furo'
html_theme_options = {
'footer_icons': [
{
'name': 'GitHub',
'url': repository,
'html': '<i class="fa-brands fa-github fa-lg"></i>',
'class': '',
},
],
'light_css_variables': {
'color-api-keyword': '#007020',
'color-api-name': '#0e84b5',
'color-api-pre-name': '#0e84b5',
},
'light_logo': 'logo.svg',
'dark_css_variables': {
'color-api-keyword': '#66d9ef',
'color-api-name': '#a6e22e',
'color-api-pre-name': '#a6e22e',
},
'dark_logo': 'logo_dark.svg',
'sidebar_hide_name': True,
}
html_title = project
pygments_style = 'sphinx'
pygments_dark_style = 'monokai'
rst_prolog = """
.. role:: py(code)
:class: highlight
:language: python
"""
templates_path = ['templates']
| 24.491379 | 80 | 0.636748 |
f7523d0a6e50bef72051d9b4e2c27753b138a21c | 11,353 | py | Python | openpype/hosts/nuke/plugins/load/load_mov.py | yosuperdope/OpenPype | 0c90df97ddb8cda291a4f66d35da58b3deb94a71 | [
"MIT"
] | 1 | 2020-09-21T14:55:33.000Z | 2020-09-21T14:55:33.000Z | openpype/hosts/nuke/plugins/load/load_mov.py | jrsndl/pype | f9d80ef2c0663921291c5f47d24bea51fc43bac7 | [
"MIT"
] | null | null | null | openpype/hosts/nuke/plugins/load/load_mov.py | jrsndl/pype | f9d80ef2c0663921291c5f47d24bea51fc43bac7 | [
"MIT"
] | null | null | null | import nuke
from avalon.vendor import qargparse
from avalon import api, io
from openpype.api import get_current_project_settings
from openpype.hosts.nuke.api.lib import (
get_imageio_input_colorspace
)
def add_review_presets_config():
returning = {
"families": list(),
"representations": list()
}
settings = get_current_project_settings()
review_profiles = (
settings["global"]
["publish"]
["ExtractReview"]
["profiles"]
)
outputs = {}
for profile in review_profiles:
outputs.update(profile.get("outputs", {}))
for output, properities in outputs.items():
returning["representations"].append(output)
returning["families"] += properities.get("families", [])
return returning
class LoadMov(api.Loader):
"""Load mov file into Nuke"""
families = ["render", "source", "plate", "review"]
representations = ["mov", "review", "mp4"]
label = "Load mov"
order = -10
icon = "code-fork"
color = "orange"
first_frame = nuke.root()["first_frame"].value()
# options gui
defaults = {
"start_at_workfile": True
}
options = [
qargparse.Boolean(
"start_at_workfile",
help="Load at workfile start frame",
default=True
)
]
node_name_template = "{class_name}_{ext}"
def load(self, context, name, namespace, options):
from avalon.nuke import (
containerise,
viewer_update_and_undo_stop
)
start_at_workfile = options.get(
"start_at_workfile", self.defaults["start_at_workfile"])
version = context['version']
version_data = version.get("data", {})
repr_id = context["representation"]["_id"]
self.handle_start = version_data.get("handleStart", 0)
self.handle_end = version_data.get("handleEnd", 0)
orig_first = version_data.get("frameStart")
orig_last = version_data.get("frameEnd")
diff = orig_first - 1
first = orig_first - diff
last = orig_last - diff
colorspace = version_data.get("colorspace")
repr_cont = context["representation"]["context"]
self.log.debug(
"Representation id `{}` ".format(repr_id))
context["representation"]["_id"]
# create handles offset (only to last, because of mov)
last += self.handle_start + self.handle_end
# Fallback to asset name when namespace is None
if namespace is None:
namespace = context['asset']['name']
file = self.fname
if not file:
self.log.warning(
"Representation id `{}` is failing to load".format(repr_id))
return
file = file.replace("\\", "/")
name_data = {
"asset": repr_cont["asset"],
"subset": repr_cont["subset"],
"representation": context["representation"]["name"],
"ext": repr_cont["representation"],
"id": context["representation"]["_id"],
"class_name": self.__class__.__name__
}
read_name = self.node_name_template.format(**name_data)
read_node = nuke.createNode(
"Read",
"name {}".format(read_name)
)
# to avoid multiple undo steps for rest of process
# we will switch off undo-ing
with viewer_update_and_undo_stop():
read_node["file"].setValue(file)
read_node["origfirst"].setValue(first)
read_node["first"].setValue(first)
read_node["origlast"].setValue(last)
read_node["last"].setValue(last)
read_node['frame_mode'].setValue("start at")
if start_at_workfile:
# start at workfile start
read_node['frame'].setValue(str(self.first_frame))
else:
# start at version frame start
read_node['frame'].setValue(
str(orig_first - self.handle_start))
if colorspace:
read_node["colorspace"].setValue(str(colorspace))
preset_clrsp = get_imageio_input_colorspace(file)
if preset_clrsp is not None:
read_node["colorspace"].setValue(preset_clrsp)
# add additional metadata from the version to imprint Avalon knob
add_keys = [
"frameStart", "frameEnd", "handles", "source", "author",
"fps", "version", "handleStart", "handleEnd"
]
data_imprint = {}
for key in add_keys:
if key == 'version':
data_imprint.update({
key: context["version"]['name']
})
else:
data_imprint.update({
key: context["version"]['data'].get(key, str(None))
})
data_imprint.update({"objectName": read_name})
read_node["tile_color"].setValue(int("0x4ecd25ff", 16))
if version_data.get("retime", None):
speed = version_data.get("speed", 1)
time_warp_nodes = version_data.get("timewarps", [])
self.make_retimes(speed, time_warp_nodes)
return containerise(
read_node,
name=name,
namespace=namespace,
context=context,
loader=self.__class__.__name__,
data=data_imprint
)
def switch(self, container, representation):
self.update(container, representation)
def update(self, container, representation):
"""Update the Loader's path
Nuke automatically tries to reset some variables when changing
the loader's path to a new file. These automatic changes are to its
inputs:
"""
from avalon.nuke import (
update_container
)
read_node = nuke.toNode(container['objectName'])
assert read_node.Class() == "Read", "Must be Read"
file = self.fname
if not file:
repr_id = representation["_id"]
self.log.warning(
"Representation id `{}` is failing to load".format(repr_id))
return
file = file.replace("\\", "/")
# Get start frame from version data
version = io.find_one({
"type": "version",
"_id": representation["parent"]
})
# get all versions in list
versions = io.find({
"type": "version",
"parent": version["parent"]
}).distinct('name')
max_version = max(versions)
version_data = version.get("data", {})
orig_first = version_data.get("frameStart")
orig_last = version_data.get("frameEnd")
diff = orig_first - 1
# set first to 1
first = orig_first - diff
last = orig_last - diff
self.handle_start = version_data.get("handleStart", 0)
self.handle_end = version_data.get("handleEnd", 0)
colorspace = version_data.get("colorspace")
if first is None:
self.log.warning((
"Missing start frame for updated version"
"assuming starts at frame 0 for: "
"{} ({})").format(
read_node['name'].value(), representation))
first = 0
# create handles offset (only to last, because of mov)
last += self.handle_start + self.handle_end
read_node["file"].setValue(file)
# Set the global in to the start frame of the sequence
read_node["origfirst"].setValue(first)
read_node["first"].setValue(first)
read_node["origlast"].setValue(last)
read_node["last"].setValue(last)
read_node['frame_mode'].setValue("start at")
if int(float(self.first_frame)) == int(
float(read_node['frame'].value())):
# start at workfile start
read_node['frame'].setValue(str(self.first_frame))
else:
# start at version frame start
read_node['frame'].setValue(str(orig_first - self.handle_start))
if colorspace:
read_node["colorspace"].setValue(str(colorspace))
preset_clrsp = get_imageio_input_colorspace(file)
if preset_clrsp is not None:
read_node["colorspace"].setValue(preset_clrsp)
updated_dict = {}
updated_dict.update({
"representation": str(representation["_id"]),
"frameStart": str(first),
"frameEnd": str(last),
"version": str(version.get("name")),
"colorspace": version_data.get("colorspace"),
"source": version_data.get("source"),
"handleStart": str(self.handle_start),
"handleEnd": str(self.handle_end),
"fps": str(version_data.get("fps")),
"author": version_data.get("author"),
"outputDir": version_data.get("outputDir")
})
# change color of node
if version.get("name") not in [max_version]:
read_node["tile_color"].setValue(int("0xd84f20ff", 16))
else:
read_node["tile_color"].setValue(int("0x4ecd25ff", 16))
if version_data.get("retime", None):
speed = version_data.get("speed", 1)
time_warp_nodes = version_data.get("timewarps", [])
self.make_retimes(speed, time_warp_nodes)
# Update the imprinted representation
update_container(
read_node, updated_dict
)
self.log.info("udated to version: {}".format(version.get("name")))
def remove(self, container):
from avalon.nuke import viewer_update_and_undo_stop
read_node = nuke.toNode(container['objectName'])
assert read_node.Class() == "Read", "Must be Read"
with viewer_update_and_undo_stop():
nuke.delete(read_node)
def make_retimes(self, speed, time_warp_nodes):
''' Create all retime and timewarping nodes with coppied animation '''
if speed != 1:
rtn = nuke.createNode(
"Retime",
"speed {}".format(speed))
rtn["before"].setValue("continue")
rtn["after"].setValue("continue")
rtn["input.first_lock"].setValue(True)
rtn["input.first"].setValue(
self.first_frame
)
if time_warp_nodes != []:
start_anim = self.first_frame + (self.handle_start / speed)
for timewarp in time_warp_nodes:
twn = nuke.createNode(timewarp["Class"],
"name {}".format(timewarp["name"]))
if isinstance(timewarp["lookup"], list):
# if array for animation
twn["lookup"].setAnimated()
for i, value in enumerate(timewarp["lookup"]):
twn["lookup"].setValueAt(
(start_anim + i) + value,
(start_anim + i))
else:
# if static value `int`
twn["lookup"].setValue(timewarp["lookup"])
| 32.623563 | 78 | 0.554039 |
f7529c1cda7df6a10d75d62cad3b470910c470bf | 5,849 | py | Python | tractseg/models/UNet_Pytorch_DeepSup.py | magreiner/TractSeg | 5ac5278fc3a6d3262f9f06924dbdde01b399ccf6 | [
"Apache-2.0"
] | null | null | null | tractseg/models/UNet_Pytorch_DeepSup.py | magreiner/TractSeg | 5ac5278fc3a6d3262f9f06924dbdde01b399ccf6 | [
"Apache-2.0"
] | null | null | null | tractseg/models/UNet_Pytorch_DeepSup.py | magreiner/TractSeg | 5ac5278fc3a6d3262f9f06924dbdde01b399ccf6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import glob
from os.path import join
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adamax
from torch.optim import Adam
import torch.optim.lr_scheduler as lr_scheduler
from torch.autograd import Variable
from tractseg.libs.PytorchUtils import PytorchUtils
from tractseg.libs.ExpUtils import ExpUtils
from tractseg.models.BaseModel import BaseModel
from tractseg.libs.PytorchUtils import conv2d
from tractseg.libs.PytorchUtils import deconv2d
class UNet_Pytorch_DeepSup(torch.nn.Module):
def __init__(self, n_input_channels=3, n_classes=7, n_filt=64, batchnorm=False, dropout=False):
super(UNet_Pytorch_DeepSup, self).__init__()
self.dropout = dropout
self.in_channel = n_input_channels
self.n_classes = n_classes
self.contr_1_1 = conv2d(n_input_channels, n_filt)
self.contr_1_2 = conv2d(n_filt, n_filt)
self.pool_1 = nn.MaxPool2d((2, 2))
self.contr_2_1 = conv2d(n_filt, n_filt * 2)
self.contr_2_2 = conv2d(n_filt * 2, n_filt * 2)
self.pool_2 = nn.MaxPool2d((2, 2))
self.contr_3_1 = conv2d(n_filt * 2, n_filt * 4)
self.contr_3_2 = conv2d(n_filt * 4, n_filt * 4)
self.pool_3 = nn.MaxPool2d((2, 2))
self.contr_4_1 = conv2d(n_filt * 4, n_filt * 8)
self.contr_4_2 = conv2d(n_filt * 8, n_filt * 8)
self.pool_4 = nn.MaxPool2d((2, 2))
self.dropout = nn.Dropout(p=0.4)
self.encode_1 = conv2d(n_filt * 8, n_filt * 16)
self.encode_2 = conv2d(n_filt * 16, n_filt * 16)
self.deconv_1 = deconv2d(n_filt * 16, n_filt * 16, kernel_size=2, stride=2)
# self.deconv_1 = nn.Upsample(scale_factor=2) #does only upscale width and height #Similar results to deconv2d
self.expand_1_1 = conv2d(n_filt * 8 + n_filt * 16, n_filt * 8)
self.expand_1_2 = conv2d(n_filt * 8, n_filt * 8)
self.deconv_2 = deconv2d(n_filt * 8, n_filt * 8, kernel_size=2, stride=2)
# self.deconv_2 = nn.Upsample(scale_factor=2)
self.expand_2_1 = conv2d(n_filt * 4 + n_filt * 8, n_filt * 4, stride=1)
self.expand_2_2 = conv2d(n_filt * 4, n_filt * 4, stride=1)
self.deconv_3 = deconv2d(n_filt * 4, n_filt * 4, kernel_size=2, stride=2)
# self.deconv_3 = nn.Upsample(scale_factor=2)
self.output_2 = nn.Conv2d(n_filt * 4 + n_filt * 8, n_classes, kernel_size=1, stride=1, padding=0, bias=True)
self.output_2_up = nn.Upsample(scale_factor=2, mode='bilinear') # does only upscale width and height
self.expand_3_1 = conv2d(n_filt * 2 + n_filt * 4, n_filt * 2, stride=1)
self.expand_3_2 = conv2d(n_filt * 2, n_filt * 2, stride=1)
self.deconv_4 = deconv2d(n_filt * 2, n_filt * 2, kernel_size=2, stride=2)
# self.deconv_4 = nn.Upsample(scale_factor=2)
self.output_3 = nn.Conv2d(n_filt * 2 + n_filt * 4, n_classes, kernel_size=1, stride=1, padding=0, bias=True)
self.output_3_up = nn.Upsample(scale_factor=2, mode='bilinear') # does only upscale width and height
self.expand_4_1 = conv2d(n_filt + n_filt * 2, n_filt, stride=1)
self.expand_4_2 = conv2d(n_filt, n_filt, stride=1)
self.conv_5 = nn.Conv2d(n_filt, n_classes, kernel_size=1, stride=1, padding=0, bias=True) # no activation function, because is in LossFunction (...WithLogits)
def forward(self, inpt):
contr_1_1 = self.contr_1_1(inpt)
contr_1_2 = self.contr_1_2(contr_1_1)
pool_1 = self.pool_1(contr_1_2)
contr_2_1 = self.contr_2_1(pool_1)
contr_2_2 = self.contr_2_2(contr_2_1)
pool_2 = self.pool_2(contr_2_2)
contr_3_1 = self.contr_3_1(pool_2)
contr_3_2 = self.contr_3_2(contr_3_1)
pool_3 = self.pool_3(contr_3_2)
contr_4_1 = self.contr_4_1(pool_3)
contr_4_2 = self.contr_4_2(contr_4_1)
pool_4 = self.pool_4(contr_4_2)
# pool_4 = self.dropout(pool_4)
encode_1 = self.encode_1(pool_4)
encode_2 = self.encode_2(encode_1)
deconv_1 = self.deconv_1(encode_2)
concat1 = torch.cat([deconv_1, contr_4_2], 1)
expand_1_1 = self.expand_1_1(concat1)
expand_1_2 = self.expand_1_2(expand_1_1)
deconv_2 = self.deconv_2(expand_1_2)
concat2 = torch.cat([deconv_2, contr_3_2], 1)
expand_2_1 = self.expand_2_1(concat2)
expand_2_2 = self.expand_2_2(expand_2_1)
deconv_3 = self.deconv_3(expand_2_2)
output_2 = self.output_2(concat2)
output_2_up = self.output_2_up(output_2)
concat3 = torch.cat([deconv_3, contr_2_2], 1)
expand_3_1 = self.expand_3_1(concat3)
expand_3_2 = self.expand_3_2(expand_3_1)
deconv_4 = self.deconv_4(expand_3_2)
output_3 = output_2_up + self.output_3(concat3)
output_3_up = self.output_3_up(output_3)
concat4 = torch.cat([deconv_4, contr_1_2], 1)
expand_4_1 = self.expand_4_1(concat4)
expand_4_2 = self.expand_4_2(expand_4_1)
conv_5 = self.conv_5(expand_4_2)
final = output_3_up + conv_5
# return conv_51
# return final
return final, F.sigmoid(final) | 40.061644 | 167 | 0.675158 |
f752b59568a74f56c9b581651e54d1cab2af227f | 341 | bzl | Python | tensorflow/core/kernels/fuzzing/tf_ops_fuzz_target_lib.bzl | tianyapiaozi/tensorflow | fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a | [
"Apache-2.0"
] | 850 | 2018-01-18T05:56:02.000Z | 2022-03-31T08:17:34.000Z | tensorflow/core/kernels/fuzzing/tf_ops_fuzz_target_lib.bzl | shrikunjsarda/tensorflow | 7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae | [
"Apache-2.0"
] | 133 | 2017-04-26T16:49:49.000Z | 2019-10-15T11:39:26.000Z | tensorflow/core/kernels/fuzzing/tf_ops_fuzz_target_lib.bzl | shrikunjsarda/tensorflow | 7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae | [
"Apache-2.0"
] | 364 | 2018-01-22T02:11:16.000Z | 2022-03-27T12:58:47.000Z | """Fuzzing template for TensorFlow ops."""
def tf_ops_fuzz_target_lib(name):
native.cc_library(
name = name + "_fuzz_lib",
srcs = [name + "_fuzz.cc"],
deps = [
"//tensorflow/core/kernels/fuzzing:fuzz_session",
"//tensorflow/cc:cc_ops",
],
tags = ["no_windows"],
alwayslink = 1,
)
| 24.357143 | 59 | 0.571848 |
f752b8c8b4ee879789503b541ffbf114963d79ab | 42 | py | Python | src/server/blueprints/exposable/__init__.py | sudo-at-night/palmox | 90651be4349ae09c3c1fc77b42eb75901fb10d81 | [
"MIT"
] | 1 | 2021-05-18T07:32:37.000Z | 2021-05-18T07:32:37.000Z | src/server/blueprints/exposable/__init__.py | sudo-at-night/palmox | 90651be4349ae09c3c1fc77b42eb75901fb10d81 | [
"MIT"
] | null | null | null | src/server/blueprints/exposable/__init__.py | sudo-at-night/palmox | 90651be4349ae09c3c1fc77b42eb75901fb10d81 | [
"MIT"
] | null | null | null | from .blueprint import exposable_blueprint | 42 | 42 | 0.904762 |
f752bdd1f32ccef9d81aa782593770a301b84a0f | 672 | py | Python | setup.py | xyproto/fnu | 47e41824ab7ce7746637036d60cf37ac9f381a61 | [
"MIT"
] | 3 | 2020-03-11T21:18:55.000Z | 2020-10-05T12:42:30.000Z | setup.py | xyproto/minitree | 47e41824ab7ce7746637036d60cf37ac9f381a61 | [
"MIT"
] | null | null | null | setup.py | xyproto/minitree | 47e41824ab7ce7746637036d60cf37ac9f381a61 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import sys
from setuptools import setup
setup(name="minitree",
version="0.4.1",
description="List files in columns",
url="https://github.com/xyproto/minitree",
author="Alexander F. Rødseth",
author_email="xyproto@archlinux.org",
license="MIT",
py_modules=["mt"],
entry_points={
"console_scripts" : [
"mt = mt:main",
]
},
classifiers=[
"Environment :: Console",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Topic :: System :: Shells",
"Topic :: Utilities",
]
)
| 24 | 51 | 0.541667 |
f752e3223f28a9a09268748c9d23b25198db40b7 | 3,263 | py | Python | prepare_data.py | violetcodes/hiking-ridge | b10550f32172a0e36371e1ad84563be32e67ab4a | [
"Apache-2.0"
] | null | null | null | prepare_data.py | violetcodes/hiking-ridge | b10550f32172a0e36371e1ad84563be32e67ab4a | [
"Apache-2.0"
] | null | null | null | prepare_data.py | violetcodes/hiking-ridge | b10550f32172a0e36371e1ad84563be32e67ab4a | [
"Apache-2.0"
] | null | null | null | import utils
from tqdm import tqdm
jfile_texts = lambda jload: [i['section_title'] + ': ' + i['text'] for i in jload]
def find_index(text, target, width=100, find_only=False, lowr=True):
if lowr:
text = utils.clean_text(text)
target = utils.clean_text(target)
if find_only:
return target in text
if target in text:
start = text.index(target)
end = start + len(target)
return start, end
else:
print('target not found in the text, searching keywords')
def keep_longest_labels(labels):
keep = set()
lab = {i for i in labels}
while lab:
e = lab.pop()
f = {i for i in lab if e in i or i in e}
f.add(e)
l = max(f, key=lambda x: len(x))
keep.add(l)
lab.difference_update(f)
return list(keep)
def get_indx(doc, labels):
labels = keep_longest_labels(labels)
return sorted(
[find_index(doc, label) for label in labels
if find_index(doc, label, find_only=True)],
key=lambda x: x[0])
def split_and_tag(doc, indx):
# indx = [tuple(i) for i in indx]
if indx == []:
return doc.strip().split(), ['O']*len(doc.split())
tokens = []
tags = []
split_points = [0, ] + list({j for i in indx for j in i}) + [len(doc),]
split_points.sort()
for s, e in zip(split_points, split_points[1:]):
sp = doc[s:e].strip().split()
# print(s, e, sp)
if (s, e) in indx:
tags.extend(['B',] + ['I'] * (len(sp) - 1))
else : tags.extend(['O']*len(sp))
tokens.extend(sp)
return tokens, tags
class DataPrep:
def __init__(self, id_label_map=None):
id_lable_map = id_label_map or {}
self.labels_map = id_label_map
self.docs_json = {_id: utils.get_json(_id)
for _id in tqdm(self.labels_map, 'reading files')}
self.docs_text = {_id: jfile_texts(jdoc)
for _id, jdoc in self.docs_json.items()}
self.meta = {f'{_id}_{i}': dict(text=para, doc_id=_id,
para_no=i, id=f'{_id}_{i}',
clean_text=utils.clean_text(para))
for _id, paras in self.docs_text.items()
for i, para in enumerate(paras)}
def prep(self, to_process=None, labels_map=None):
'''to_process -> list[dict,]: each dict must have clean_text and
doc_id
labels_map: -> dict: doc_id to labels mapping
'''
result = []
to_process = to_process or list(self.meta.values())
labels_map = labels_map or self.labels_map
for doc in tqdm(to_process, 'processing...'):
text = doc['clean_text']
doc_id = doc['doc_id']
labels = labels_map[doc_id]
indx = get_indx(text, labels)
if indx != []:
tokens, tags = split_and_tag(text, indx)
result.append(dict(
tokens=tokens,
tags=tags,
id=doc['id']
))
return result
| 32.306931 | 82 | 0.517315 |
f752e9468e334d361da0a8c8c316b96fa818e09f | 688 | py | Python | azure-devops/azext_devops/vstsCompressed/file_container/v4_1/models/__init__.py | vijayraavi/azure-devops-cli-extension | 88f1420c5815cb09bea15b050f4c553e0f326dad | [
"MIT"
] | null | null | null | azure-devops/azext_devops/vstsCompressed/file_container/v4_1/models/__init__.py | vijayraavi/azure-devops-cli-extension | 88f1420c5815cb09bea15b050f4c553e0f326dad | [
"MIT"
] | 37 | 2020-04-27T07:45:19.000Z | 2021-04-05T07:27:15.000Z | azure-devops/azext_devops/vstsCompressed/file_container/v4_1/models/__init__.py | vijayraavi/azure-devops-cli-extension | 88f1420c5815cb09bea15b050f4c553e0f326dad | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .models import FileContainer
from .models import FileContainerItem
__all__ = [
'FileContainer',
'FileContainerItem',
]
| 43 | 94 | 0.452035 |
f75316db187264acd547e92b9a375c0bf8205fd7 | 2,745 | py | Python | angalabiri/suggestion/views.py | dark-codr/ebiangala | 0af3de29b2afa71df3e138cd16ecddc69fbd597d | [
"MIT"
] | 1 | 2021-03-25T14:06:23.000Z | 2021-03-25T14:06:23.000Z | angalabiri/suggestion/views.py | dark-codr/ebiangala | 0af3de29b2afa71df3e138cd16ecddc69fbd597d | [
"MIT"
] | 5 | 2021-09-08T03:08:46.000Z | 2022-03-12T00:56:35.000Z | angalabiri/suggestion/views.py | me-edavids/ebiangala | 0af3de29b2afa71df3e138cd16ecddc69fbd597d | [
"MIT"
] | null | null | null | from django.db.models import Sum, F
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, get_user_model
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib import messages
from django.urls import reverse, reverse_lazy
from django.utils.decorators import method_decorator
from django.views import View
from django.views.generic import DetailView, ListView
from django.views.generic.edit import CreateView, DeleteView, FormMixin, UpdateView
from django.views.generic.edit import FormMixin
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseRedirect, HttpResponseForbidden
from django.utils.http import is_safe_url
from django.utils.safestring import mark_safe
from django.db import transaction
from django.shortcuts import get_object_or_404
from django.utils import timezone
from django.http import JsonResponse
import datetime
from sweetify.views import SweetifySuccessMixin
import sweetify
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from angalabiri.suggestion.models import Suggestion
from angalabiri.suggestion.forms import SuggestionForm
User = get_user_model()
# Create your views here.
class SuggestionList(ListView):
model = Suggestion
template_name = "pages/suggestion/list.html"
ordering = ["title", "-pub_date"]
queryset = Suggestion.objects.all_suggestion()
context_object_name = "objs"
allow_empty = True
paginate_by = 20
slug_field = "slug"
slug_url_kwarg = "slug"
class SuggestionDetail(DetailView):
model = Suggestion
template_name = "pages/suggestion/detail.html"
ordering = ["title", "pub_date"]
allow_empty = True
queryset = Suggestion.objects.all_suggestion()
context_object_name = 'obj'
slug_field = "slug"
slug_url_kwarg = "slug"
class SuggestionCreate(CreateView):
model = Suggestion
template_name = "pages/suggestion/create.html"
form_class = SuggestionForm
def get_success_url(self):
return reverse("suggestion:detail", kwargs={"slug": self.object.slug})
def form_valid(self, form):
suggestion = form.save(commit=False)
msg = """
name: {name}\n
email: {email}\n
text: {content}\n
\n
suggesstion for angalabiri
""".format(name=suggestion.tite, email=suggestion.email, contet=suggestion.content)
send_mail(
"NEW SUGGESSTION HAS BEEN MADE",
msg,
"noreply@ebiangala.ng",
settings.ADMINS,
fail_silently=False
)
return super().form_valid(form)
| 34.3125 | 91 | 0.738434 |
f753235d42eb26c88d363fa1ccf74fbd3ae519b9 | 501 | py | Python | nlp_annotator_api/server/signals/process_pool.py | dasaku-ai/nlpmodel-v-h | 326bb9467df11d517285610c70f9d22627eb5efc | [
"Apache-2.0"
] | 3 | 2022-01-04T12:15:22.000Z | 2022-03-25T21:19:20.000Z | nlp_annotator_api/server/signals/process_pool.py | IBM/deepsearch-nlp-annotator-api-example | 76c2c8fd83c1e6d51c51c7b581a8c3f273b23c40 | [
"Apache-2.0"
] | 1 | 2022-02-02T09:26:44.000Z | 2022-02-02T09:26:44.000Z | nlp_annotator_api/server/signals/process_pool.py | dasaku-ai/nlpmodel-v-h | 326bb9467df11d517285610c70f9d22627eb5efc | [
"Apache-2.0"
] | 5 | 2021-09-27T08:26:09.000Z | 2022-03-10T11:41:35.000Z | import logging
from concurrent.futures.process import ProcessPoolExecutor
logger = logging.getLogger(__name__)
def process_pool_factory(num_workers: int):
async def process_pool(app_instance):
logger.debug("Setting up process pool with %r workers", num_workers)
pool = ProcessPoolExecutor(max_workers=num_workers)
app_instance['process_pool'] = pool
yield
logger.debug("Shutting down process pool")
pool.shutdown()
return process_pool
| 21.782609 | 76 | 0.720559 |
f753245c6149b5b259cd11b8a8f9fa6891a02d8c | 6,261 | py | Python | ucb_cs61A/projects/scheme/scheme_reader.py | tavaresdong/courses-notes | 7fb89103bca679f5ef9b14cbc777152daac1402e | [
"MIT"
] | null | null | null | ucb_cs61A/projects/scheme/scheme_reader.py | tavaresdong/courses-notes | 7fb89103bca679f5ef9b14cbc777152daac1402e | [
"MIT"
] | 1 | 2017-07-31T08:15:26.000Z | 2017-07-31T08:15:26.000Z | ucb_cs61A/projects/scheme/scheme_reader.py | tavaresdong/courses-notes | 7fb89103bca679f5ef9b14cbc777152daac1402e | [
"MIT"
] | 1 | 2019-10-06T16:52:31.000Z | 2019-10-06T16:52:31.000Z | """This module implements the built-in data types of the Scheme language, along
with a parser for Scheme expressions.
In addition to the types defined in this file, some data types in Scheme are
represented by their corresponding type in Python:
number: int or float
symbol: string
boolean: bool
unspecified: None
The __repr__ method of a Scheme value will return a Python expression that
would be evaluated to the value, where possible.
The __str__ method of a Scheme value will return a Scheme expression that
would be read to the value, where possible.
"""
from ucb import main, trace, interact
from scheme_tokens import tokenize_lines, DELIMITERS
from buffer import Buffer, InputReader, LineReader
# Pairs and Scheme lists
class Pair:
"""A pair has two instance attributes: first and second. For a Pair to be
a well-formed list, second is either a well-formed list or nil. Some
methods only apply to well-formed lists.
>>> s = Pair(1, Pair(2, nil))
>>> s
Pair(1, Pair(2, nil))
>>> print(s)
(1 2)
>>> print(s.map(lambda x: x+4))
(5 6)
"""
def __init__(self, first, second):
self.first = first
self.second = second
def __iter__(self):
self.cur = self
return self
def __next__(self):
if self.cur == nil:
raise StopIteration
else:
val = self.cur.first
self.cur = self.cur.second
return val
def __repr__(self):
return "Pair({0}, {1})".format(repr(self.first), repr(self.second))
def __str__(self):
s = "(" + str(self.first)
second = self.second
while isinstance(second, Pair):
s += " " + str(second.first)
second = second.second
if second is not nil:
s += " . " + str(second)
return s + ")"
def __len__(self):
n, second = 1, self.second
while isinstance(second, Pair):
n += 1
second = second.second
if second is not nil:
raise TypeError("length attempted on improper list")
return n
def __eq__(self, p):
if not isinstance(p, Pair):
return False
return self.first == p.first and self.second == p.second
def map(self, fn):
"""Return a Scheme list after mapping Python function FN to SELF."""
mapped = fn(self.first)
if self.second is nil or isinstance(self.second, Pair):
return Pair(mapped, self.second.map(fn))
else:
raise TypeError("ill-formed list")
class nil:
"""The empty list"""
def __repr__(self):
return "nil"
def __str__(self):
return "()"
def __len__(self):
return 0
def map(self, fn):
return self
nil = nil() # Assignment hides the nil class; there is only one instance
# Scheme list parser
def scheme_read(src):
"""Read the next expression from SRC, a Buffer of tokens.
>>> lines = ["(+ 1 ", "(+ 23 4)) ("]
>>> src = Buffer(tokenize_lines(lines))
>>> print(scheme_read(src))
(+ 1 (+ 23 4))
>>> read_line("'hello")
Pair('quote', Pair('hello', nil))
>>> print(read_line("(car '(1 2))"))
(car (quote (1 2)))
"""
if src.current() is None:
raise EOFError
val = src.pop()
if val == "nil":
return nil
elif val not in DELIMITERS:
return val
elif val == "'":
# BEGIN Question 1
return Pair('quote', Pair(scheme_read(src), nil))
# END Question 1
elif val == "(":
return read_tail(src)
else:
raise SyntaxError("unexpected token: {0}".format(val))
def read_tail(src):
"""Return the remainder of a list in SRC, starting before an element or ).
>>> read_tail(Buffer(tokenize_lines([")"])))
nil
>>> read_tail(Buffer(tokenize_lines(["2 3)"])))
Pair(2, Pair(3, nil))
>>> read_tail(Buffer(tokenize_lines(["2 (3 4))"])))
Pair(2, Pair(Pair(3, Pair(4, nil)), nil))
>>> read_line("(1 . 2)")
Pair(1, 2)
>>> read_line("(1 2 . 3)")
Pair(1, Pair(2, 3))
>>> read_line("(1 . 2 3)")
Traceback (most recent call last):
...
SyntaxError: expected one element after .
>>> scheme_read(Buffer(tokenize_lines(["(1", "2 .", "'(3 4))", "4"])))
Pair(1, Pair(2, Pair('quote', Pair(Pair(3, Pair(4, nil)), nil))))
"""
try:
if src.current() is None:
raise SyntaxError("unexpected end of file")
elif src.current() == ")":
src.pop()
return nil
elif src.current() == ".":
# BEGIN Question 2
src.pop()
after = scheme_read(src)
if src.current() != ')':
raise SyntaxError("expected one element after .")
else:
src.pop()
return after
# END Question 2
else:
first = scheme_read(src)
rest = read_tail(src)
return Pair(first, rest)
except EOFError:
raise SyntaxError("unexpected end of file")
# Convenience methods
def buffer_input(prompt="scm> "):
"""Return a Buffer instance containing interactive input."""
return Buffer(tokenize_lines(InputReader(prompt)))
def buffer_lines(lines, prompt="scm> ", show_prompt=False):
"""Return a Buffer instance iterating through LINES."""
if show_prompt:
input_lines = lines
else:
input_lines = LineReader(lines, prompt)
return Buffer(tokenize_lines(input_lines))
def read_line(line):
"""Read a single string LINE as a Scheme expression."""
return scheme_read(Buffer(tokenize_lines([line])))
# Interactive loop
@main
def read_print_loop():
"""Run a read-print loop for Scheme expressions."""
while True:
try:
src = buffer_input("read> ")
while src.more_on_line:
expression = scheme_read(src)
print("str :", expression)
print("repr:", repr(expression))
except (SyntaxError, ValueError) as err:
print(type(err).__name__ + ":", err)
except (KeyboardInterrupt, EOFError): # <Control>-D, etc.
print()
return
| 29.257009 | 79 | 0.576585 |
f7532aa4aa3ade9a6ba2aa21ce694ce0cbe330ea | 4,540 | py | Python | stoq/installer.py | ytreister/stoq | 8bfc78b226ee6500eb78e1bdf361fc83bc5005b7 | [
"Apache-2.0"
] | 385 | 2015-11-20T02:21:18.000Z | 2022-03-24T09:38:24.000Z | stoq/installer.py | ytreister/stoq | 8bfc78b226ee6500eb78e1bdf361fc83bc5005b7 | [
"Apache-2.0"
] | 127 | 2016-07-08T20:23:20.000Z | 2022-02-23T13:52:19.000Z | stoq/installer.py | ytreister/stoq | 8bfc78b226ee6500eb78e1bdf361fc83bc5005b7 | [
"Apache-2.0"
] | 68 | 2015-11-20T12:51:44.000Z | 2022-01-25T04:35:54.000Z | #!/usr/bin/env python3
# Copyright 2014-2018 PUNCH Cyber Analytics Group
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import logging
import requests
import subprocess
from tempfile import NamedTemporaryFile
from .exceptions import StoqException, StoqPluginException
log = logging.getLogger()
class StoqPluginInstaller:
DEFAULT_REPO = 'git+https://github.com/PUNCH-Cyber/stoq-plugins-public.git@v3'
@staticmethod
def install(
plugin_path: str, install_dir: str, upgrade: bool, github: bool
) -> None:
if github:
if plugin_path.startswith('git+http'):
pass
elif plugin_path.startswith('stoq:'):
plugin_name = plugin_path.split(':')[1]
plugin_path = f'{StoqPluginInstaller.DEFAULT_REPO}#egg={plugin_name}&subdirectory={plugin_name}'
else:
raise StoqException('Invalid Github repository specified.')
else:
plugin_path = os.path.abspath(plugin_path)
if not os.path.isdir(plugin_path):
raise StoqException(
f'Given plugin directory does not exist: {plugin_path}'
)
install_dir = os.path.abspath(install_dir)
if not os.path.isdir(install_dir):
raise StoqException(
f'Given install directory does not exist: {install_dir}'
)
StoqPluginInstaller.setup_package(plugin_path, install_dir, upgrade, github)
@staticmethod
def setup_package(
plugin_path: str, install_dir: str, upgrade: bool, github: bool
) -> None:
if github:
url = (
plugin_path.split('+')[1]
.split('#')[0]
.replace('.git', '')
.replace('github.com', 'raw.githubusercontent.com')
.replace('@', '/')
)
path = plugin_path.split('subdirectory=')[1]
requirements = f'{url}/{path}/requirements.txt'
with NamedTemporaryFile() as temp_file:
response = requests.get(requirements)
if response.status_code == 200:
temp_file.write(response.content)
temp_file.flush()
subprocess.check_call(
[
sys.executable,
'-m',
'pip',
'install',
'--quiet',
'-r',
temp_file.name,
]
)
elif response.status_code == 404:
pass
else:
log.info(f'Failed to install requirements from {requirements}')
else:
requirements = f'{plugin_path}/requirements.txt'
if os.path.isfile(requirements):
subprocess.check_call(
[
sys.executable,
'-m',
'pip',
'install',
'--quiet',
'-r',
requirements,
]
)
cmd = [sys.executable, '-m', 'pip', 'install', plugin_path, '-t', install_dir]
if upgrade:
cmd.append('--upgrade')
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
if f'WARNING: Target directory {install_dir}' in output.decode():
raise StoqPluginException(
f'Plugin ({plugin_path}) already exists in {install_dir}'
)
except subprocess.CalledProcessError as err:
if not os.getenv('VIRTUAL_ENV'):
log.error(
'[!!] Plugin install failed. Are you root or in a virtual environment?\n'
)
raise StoqException(err.output)
| 37.213115 | 112 | 0.529075 |
f75358fc6aabeb256f1896a60b41ccf503b178b8 | 9,521 | py | Python | thatkitebot/cogs/laser.py | blablabliam/thatkitebot | 741e59993f05eb437b46d4839db517af7b8f0357 | [
"MIT"
] | null | null | null | thatkitebot/cogs/laser.py | blablabliam/thatkitebot | 741e59993f05eb437b46d4839db517af7b8f0357 | [
"MIT"
] | null | null | null | thatkitebot/cogs/laser.py | blablabliam/thatkitebot | 741e59993f05eb437b46d4839db517af7b8f0357 | [
"MIT"
] | null | null | null | # Copyright (c) 2019-2022 ThatRedKite and contributors
from typing import Optional
from discord.ext import commands
from wand.image import Image as WandImage
from wand.color import Color as WandColor
import discord
import si_prefix
from math import sin, atan
from io import BytesIO
from thatkitebot.backend import util
from thatkitebot.cogs.electronics import parse_input, TooFewArgsError
def wavelength_to_rgb(wavelength, gamma=0.98):
'''This converts a given wavelength of light to an
approximate RGB color value. The wavelength must be given
in nanometers in the range from 380 nm through 750 nm
(789 THz through 400 THz).
Based on code by Dan Bruton
http://www.physics.sfasu.edu/astro/color/spectra.html
'''
wavelength = float(wavelength)
if 380 <= wavelength <= 440:
attenuation = 0.3 + 0.7 * (wavelength - 380) / (440 - 380)
R = ((-(wavelength - 440) / (440 - 380)) * attenuation) ** gamma
G = 0.0
B = (1.0 * attenuation) ** gamma
if 405 <= wavelength < 430:
R = R * (wavelength / 310)
elif 440 <= wavelength <= 490:
R = 0.0
G = ((wavelength - 440) / (490 - 440)) ** gamma
B = 1.0
elif 490 <= wavelength <= 510:
R = 0.0
G = 1.0
B = (-(wavelength - 510) / (510 - 490)) ** gamma
elif 510 <= wavelength <= 580:
R = ((wavelength - 510) / (580 - 510)) ** gamma
G = 1.0
B = 0.0
elif 580 <= wavelength <= 645:
R = 1.0
G = ((-(wavelength - 645) / (645 - 580)) ** gamma) * 0.9
B = 0.0
elif 645 <= wavelength <= 750:
attenuation = 0.3 + 0.7 * (750 - wavelength) / (750 - 645)
R = (1 * attenuation) ** gamma
G = 0.0
B = 0.0
else:
R = 0.0
G = 0.0
B = 0.0
R *= 255
G *= 255
B *= 255
return int(R), int(G), int(B)
def calculate_diffraction(p):
if "lmm" in p:
lmm = si_prefix.si_parse(p["lmm"])
else:
raise TooFewArgsError()
if "l" in p:
l = si_prefix.si_parse(p["l"])
else:
raise TooFewArgsError()
if "d" in p:
d = si_prefix.si_parse(p["d"])
else:
raise TooFewArgsError()
res = 1 / lmm / 1000 * sin((atan(d / (2 * l))))
return dict(res=si_prefix.si_format(res), Lmm=lmm, L=si_prefix.si_format(l), D=si_prefix.si_format(d))
class LaserCog(commands.Cog, name="Laser commands"):
def __init__(self, bot):
self.bot: commands.Bot = bot
@commands.cooldown(5, 10, commands.BucketType.channel)
@commands.command(aliases=["autism"])
async def spectrum(self, ctx):
"""
Returns a picture of visible light spectrum.
"""
embed = discord.Embed(title="Visible Light Spectrum")
embed.set_image(
url="https://media.discordapp.net/attachments/910895468001767484/913348594269036584/unknown.png")
await ctx.send(embed=embed)
@commands.cooldown(1, 5, commands.BucketType.channel)
@commands.group()
async def laser(self, ctx):
"""
General command for laser related things.
"""
if not ctx.subcommand_passed:
await self.goggles(ctx)
@laser.command(aliases=["glasses", "safety"])
async def goggles(self, ctx, section: str = ""):
"""
Returns a laser safety information.
"""
brands = section.lower() in ["brands", "companies", "manufacturers"]
od = section.lower() in ["od", "density"]
amazon = section.lower() in ["amazon", "wish"]
wavelength = section.lower() in ["wavelength", "nm", "nanometers"]
if not brands and not od and not amazon and not wavelength:
brands = True
od = True
amazon = True
wavelength = True
embed = discord.Embed(title="Lasers of all powers can pose a serious risk to your eyes.",
description="""5mW is the safety limit where your blink reflex should save you from any damage.
Anything above that can cause permanent eye damage faster than you can blink and the worse case, permanent blindness.""")
else:
embed = discord.Embed(title="Laser safety guide")
embed.set_thumbnail(
url="https://cdn.discordapp.com/attachments/909159696798220400/912036244073115658/14429.png")
if brands:
embed.add_field(name="\nLaser safety equipment can be found here: ",
value="[Laserglow](https://www.laserglow.com/product/AGF-Laser-Safety-Goggles)\n"
"[Lasertack](https://lasertack.com/en/laser-safety-glasses)\n"
"[Thorlabs](https://www.thorlabs.com/newgrouppage9.cfm?objectgroup_id=762)",
inline=False)
embed.add_field(name="\nOther trusted brands include",
value="Honeywell, Glendale, Sperian,"
"Newport/MKS, Edmund Optics, Laservision/Uvex,"
"Laserglow, NoIR (LaserShield)",
inline=False)
if amazon:
embed.add_field(name="\nAnything from Amazon, AliExpress, Wish is **__unsafe!__**",
value="If you wish to see for the rest of your life, **__do not use them!__**", inline=True)
if od:
embed.add_field(name="\nWhat is OD?",
value="""OD stands for *Optical density*.
It’s the fraction of light (of a certain wavelength) that gets through the goggles,expressed in powers of 10.
OD1 means that *10%* of the light that hits the goggles gets through.
OD2 means *1%*,
OD3 is *0.1%*, and so on.""",
inline=False)
if wavelength:
embed.add_field(name="\nWhat is the wavelength or nm?",
value=f"""The wavelength in nanometers (nm) corresponds to the color.
If you are not sure the wavelength but you know the color,
you can ask someone, do `{self.bot.command_prefix}laser color (color)` or refer to `+spectrum`.""",
inline=True)
embed.set_footer(text=f"For a more in depth explanation, use {self.bot.command_prefix}laser safety")
await ctx.send(embed=embed)
@laser.command()
async def color(self, ctx, color: str):
"""
Returns an approximation of light color given a wavelength.
"""
color = int(color.lower().replace("nm", ""))
new_color = wavelength_to_rgb(color)
with WandImage(width=256, height=256, background=WandColor(f"rgb{new_color}")) as colorimg:
b = colorimg.make_blob(format="jpeg")
file = discord.File(BytesIO(b), filename="color.jpeg")
embed = discord.Embed(title=f"Approximated color for {color}nm")
embed.set_image(url="attachment://color.jpeg")
embed.set_footer(text="This is not 100% accurate since your monitor and\neyes play a role but this is as close as it can get.\n"
"If the color is black, it is considered invisible.1")
await ctx.send(file=file, embed=embed)
@laser.command(aliases=["diff"])
async def diffraction(self, ctx, *, args=None):
"""
Calculates the wavelength of a laser using a diffraction grating. Run command for more information.
"""
if not args:
embed = discord.Embed(title="Diffraction Grating Equation",
description="This is to calculate the wavelength of a laser using a diffraction grating")
embed.set_image(
url="https://cdn.discordapp.com/attachments/909159696798220400/912064371205738566/kitething5fff.png")
embed.add_field(name="Measurements and information you need",
value="The diffraction grating's slits per mm (L/mm) \n Distance from the diffraction grating to a wall (L) \n Distance between split beams (D) ",
inline=False)
embed.add_field(name="Use the bot for calculations.",
value="You can use this command to do the calculation, for example: `{}laser diffraction lmm=1000 L=6.78 D=11.6`".format(
self.bot.command_prefix))
embed.set_footer(text="This command accepts SI prefixes.")
await ctx.send(embed=embed)
else:
try:
p = parse_input(args)
p = {k.lower(): v for k, v in p.items()}
res = calculate_diffraction(p)
embed = discord.Embed(title="Diffraction Grating Equation")
embed.set_image(
url="https://cdn.discordapp.com/attachments/909159696798220400/912064371205738566/kitething5fff.png")
embed.add_field(name="Values:", value=f"L/mm = {res['Lmm']}\nL = {res['L']}m\nD = {res['D']}m")
embed.add_field(name="Wavelength value:", value="{}m".format(str(res["res"])))
await ctx.send(embed=embed)
except TooFewArgsError:
await util.errormsg(ctx, "Not enough arguments to compute anything.")
return
def setup(bot):
bot.add_cog(LaserCog(bot))
| 45.555024 | 174 | 0.573469 |
f75369582837b000711508efcde4a79ae6b1963d | 12,244 | py | Python | tests/components/test_influxdb.py | mbs-technologie/home-assistant | 71fc446425cbb1c0d4670c261ce8ea3bfd83a73d | [
"MIT"
] | 13 | 2017-02-01T13:25:34.000Z | 2022-01-26T01:30:39.000Z | tests/components/test_influxdb.py | 1Forward1Back/home-assistant | ce24ef0c20dea0fd671d6f2c2a8b1456b4b66ba6 | [
"MIT"
] | 9 | 2017-07-26T18:05:32.000Z | 2021-12-05T14:16:34.000Z | tests/components/test_influxdb.py | 1Forward1Back/home-assistant | ce24ef0c20dea0fd671d6f2c2a8b1456b4b66ba6 | [
"MIT"
] | 21 | 2017-07-26T17:09:40.000Z | 2022-03-27T22:37:22.000Z | """The tests for the InfluxDB component."""
import unittest
from unittest import mock
import influxdb as influx_client
from homeassistant.bootstrap import setup_component
import homeassistant.components.influxdb as influxdb
from homeassistant.const import EVENT_STATE_CHANGED, STATE_OFF, STATE_ON
from tests.common import get_test_home_assistant
@mock.patch('influxdb.InfluxDBClient')
class TestInfluxDB(unittest.TestCase):
"""Test the InfluxDB component."""
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.handler_method = None
self.hass.bus.listen = mock.Mock()
def tearDown(self):
"""Clear data."""
self.hass.stop()
def test_setup_config_full(self, mock_client):
"""Test the setup with full configuration."""
config = {
'influxdb': {
'host': 'host',
'port': 123,
'database': 'db',
'username': 'user',
'password': 'password',
'ssl': 'False',
'verify_ssl': 'False',
}
}
assert setup_component(self.hass, influxdb.DOMAIN, config)
self.assertTrue(self.hass.bus.listen.called)
self.assertEqual(
EVENT_STATE_CHANGED, self.hass.bus.listen.call_args_list[0][0][0])
self.assertTrue(mock_client.return_value.query.called)
def test_setup_config_defaults(self, mock_client):
"""Test the setup with default configuration."""
config = {
'influxdb': {
'host': 'host',
'username': 'user',
'password': 'pass',
}
}
assert setup_component(self.hass, influxdb.DOMAIN, config)
self.assertTrue(self.hass.bus.listen.called)
self.assertEqual(
EVENT_STATE_CHANGED, self.hass.bus.listen.call_args_list[0][0][0])
def test_setup_minimal_config(self, mock_client):
"""Test the setup with minimal configuration."""
config = {
'influxdb': {}
}
assert setup_component(self.hass, influxdb.DOMAIN, config)
def test_setup_missing_password(self, mock_client):
"""Test the setup with existing username and missing password."""
config = {
'influxdb': {
'username': 'user'
}
}
assert not setup_component(self.hass, influxdb.DOMAIN, config)
def test_setup_query_fail(self, mock_client):
"""Test the setup for query failures."""
config = {
'influxdb': {
'host': 'host',
'username': 'user',
'password': 'pass',
}
}
mock_client.return_value.query.side_effect = \
influx_client.exceptions.InfluxDBClientError('fake')
assert not setup_component(self.hass, influxdb.DOMAIN, config)
def _setup(self):
"""Setup the client."""
config = {
'influxdb': {
'host': 'host',
'username': 'user',
'password': 'pass',
'blacklist': ['fake.blacklisted']
}
}
assert setup_component(self.hass, influxdb.DOMAIN, config)
self.handler_method = self.hass.bus.listen.call_args_list[0][0][1]
def test_event_listener(self, mock_client):
"""Test the event listener."""
self._setup()
valid = {
'1': 1,
'1.0': 1.0,
STATE_ON: 1,
STATE_OFF: 0,
'foo': 'foo'
}
for in_, out in valid.items():
attrs = {
'unit_of_measurement': 'foobars',
'longitude': '1.1',
'latitude': '2.2'
}
state = mock.MagicMock(
state=in_, domain='fake', object_id='entity', attributes=attrs)
event = mock.MagicMock(data={'new_state': state}, time_fired=12345)
body = [{
'measurement': 'foobars',
'tags': {
'domain': 'fake',
'entity_id': 'entity',
},
'time': 12345,
'fields': {
'value': out,
'longitude': '1.1',
'latitude': '2.2'
},
}]
self.handler_method(event)
self.assertEqual(
mock_client.return_value.write_points.call_count, 1
)
self.assertEqual(
mock_client.return_value.write_points.call_args,
mock.call(body)
)
mock_client.return_value.write_points.reset_mock()
def test_event_listener_no_units(self, mock_client):
"""Test the event listener for missing units."""
self._setup()
for unit in (None, ''):
if unit:
attrs = {'unit_of_measurement': unit}
else:
attrs = {}
state = mock.MagicMock(
state=1, domain='fake', entity_id='entity-id',
object_id='entity', attributes=attrs)
event = mock.MagicMock(data={'new_state': state}, time_fired=12345)
body = [{
'measurement': 'entity-id',
'tags': {
'domain': 'fake',
'entity_id': 'entity',
},
'time': 12345,
'fields': {
'value': 1,
},
}]
self.handler_method(event)
self.assertEqual(
mock_client.return_value.write_points.call_count, 1
)
self.assertEqual(
mock_client.return_value.write_points.call_args,
mock.call(body)
)
mock_client.return_value.write_points.reset_mock()
def test_event_listener_fail_write(self, mock_client):
"""Test the event listener for write failures."""
self._setup()
state = mock.MagicMock(
state=1, domain='fake', entity_id='entity-id', object_id='entity',
attributes={})
event = mock.MagicMock(data={'new_state': state}, time_fired=12345)
mock_client.return_value.write_points.side_effect = \
influx_client.exceptions.InfluxDBClientError('foo')
self.handler_method(event)
def test_event_listener_states(self, mock_client):
"""Test the event listener against ignored states."""
self._setup()
for state_state in (1, 'unknown', '', 'unavailable'):
state = mock.MagicMock(
state=state_state, domain='fake', entity_id='entity-id',
object_id='entity', attributes={})
event = mock.MagicMock(data={'new_state': state}, time_fired=12345)
body = [{
'measurement': 'entity-id',
'tags': {
'domain': 'fake',
'entity_id': 'entity',
},
'time': 12345,
'fields': {
'value': 1,
},
}]
self.handler_method(event)
if state_state == 1:
self.assertEqual(
mock_client.return_value.write_points.call_count, 1
)
self.assertEqual(
mock_client.return_value.write_points.call_args,
mock.call(body)
)
else:
self.assertFalse(mock_client.return_value.write_points.called)
mock_client.return_value.write_points.reset_mock()
def test_event_listener_blacklist(self, mock_client):
"""Test the event listener against a blacklist."""
self._setup()
for entity_id in ('ok', 'blacklisted'):
state = mock.MagicMock(
state=1, domain='fake', entity_id='fake.{}'.format(entity_id),
object_id=entity_id, attributes={})
event = mock.MagicMock(data={'new_state': state}, time_fired=12345)
body = [{
'measurement': 'fake.{}'.format(entity_id),
'tags': {
'domain': 'fake',
'entity_id': entity_id,
},
'time': 12345,
'fields': {
'value': 1,
},
}]
self.handler_method(event)
if entity_id == 'ok':
self.assertEqual(
mock_client.return_value.write_points.call_count, 1
)
self.assertEqual(
mock_client.return_value.write_points.call_args,
mock.call(body)
)
else:
self.assertFalse(mock_client.return_value.write_points.called)
mock_client.return_value.write_points.reset_mock()
def test_event_listener_invalid_type(self, mock_client):
"""Test the event listener when an attirbute has an invalid type."""
self._setup()
valid = {
'1': 1,
'1.0': 1.0,
STATE_ON: 1,
STATE_OFF: 0,
'foo': 'foo'
}
for in_, out in valid.items():
attrs = {
'unit_of_measurement': 'foobars',
'longitude': '1.1',
'latitude': '2.2',
'invalid_attribute': ['value1', 'value2']
}
state = mock.MagicMock(
state=in_, domain='fake', object_id='entity', attributes=attrs)
event = mock.MagicMock(data={'new_state': state}, time_fired=12345)
body = [{
'measurement': 'foobars',
'tags': {
'domain': 'fake',
'entity_id': 'entity',
},
'time': 12345,
'fields': {
'value': out,
'longitude': '1.1',
'latitude': '2.2'
},
}]
self.handler_method(event)
self.assertEqual(
mock_client.return_value.write_points.call_count, 1
)
self.assertEqual(
mock_client.return_value.write_points.call_args,
mock.call(body)
)
mock_client.return_value.write_points.reset_mock()
def test_event_listener_default_measurement(self, mock_client):
"""Test the event listener with a default measurement."""
config = {
'influxdb': {
'host': 'host',
'username': 'user',
'password': 'pass',
'default_measurement': 'state',
'blacklist': ['fake.blacklisted']
}
}
assert setup_component(self.hass, influxdb.DOMAIN, config)
self.handler_method = self.hass.bus.listen.call_args_list[0][0][1]
for entity_id in ('ok', 'blacklisted'):
state = mock.MagicMock(
state=1, domain='fake', entity_id='fake.{}'.format(entity_id),
object_id=entity_id, attributes={})
event = mock.MagicMock(data={'new_state': state}, time_fired=12345)
body = [{
'measurement': 'state',
'tags': {
'domain': 'fake',
'entity_id': entity_id,
},
'time': 12345,
'fields': {
'value': 1,
},
}]
self.handler_method(event)
if entity_id == 'ok':
self.assertEqual(
mock_client.return_value.write_points.call_count, 1
)
self.assertEqual(
mock_client.return_value.write_points.call_args,
mock.call(body)
)
else:
self.assertFalse(mock_client.return_value.write_points.called)
mock_client.return_value.write_points.reset_mock()
| 35.387283 | 79 | 0.501225 |
f75385538ed3357b98f19d7d208efa4e98e8bebc | 34,012 | py | Python | brainbox/plot.py | int-brain-lab/ibllib | 93be6b98848758e05cdc9398caaf19e6a68f7386 | [
"MIT"
] | 38 | 2018-08-07T21:55:29.000Z | 2022-03-21T14:49:03.000Z | brainbox/plot.py | int-brain-lab/ibllib | 93be6b98848758e05cdc9398caaf19e6a68f7386 | [
"MIT"
] | 207 | 2018-07-25T15:10:48.000Z | 2022-03-08T13:23:08.000Z | brainbox/plot.py | int-brain-lab/ibllib | 93be6b98848758e05cdc9398caaf19e6a68f7386 | [
"MIT"
] | 35 | 2018-09-04T14:49:56.000Z | 2022-01-06T21:17:51.000Z | """
Plots metrics that assess quality of single units. Some functions here generate plots for the
output of functions in the brainbox `single_units.py` module.
Run the following to set-up the workspace to run the docstring examples:
>>> from brainbox import processing
>>> import alf.io as aio
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> import ibllib.ephys.spikes as e_spks
# (*Note, if there is no 'alf' directory, make 'alf' directory from 'ks2' output directory):
>>> e_spks.ks2_to_alf(path_to_ks_out, path_to_alf_out)
# Load the alf spikes bunch and clusters bunch, and get a units bunch.
>>> spks_b = aio.load_object(path_to_alf_out, 'spikes')
>>> clstrs_b = aio.load_object(path_to_alf_out, 'clusters')
>>> units_b = processing.get_units_bunch(spks_b) # may take a few mins to compute
"""
import time
from warnings import warn
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
# from matplotlib.ticker import StrMethodFormatter
from brainbox import singlecell
from brainbox.metrics import single_units
from brainbox.processing import bincount2D
from brainbox.io.spikeglx import extract_waveforms
from ibllib.io import spikeglx
def feat_vars(units_b, units=None, feat_name='amps', dist='norm', test='ks', cmap_name='coolwarm',
ax=None):
'''
Plots the coefficients of variation of a particular spike feature for all units as a bar plot,
where each bar is color-coded corresponding to the depth of the max amplitude channel of the
respective unit.
Parameters
----------
units_b : bunch
A units bunch containing fields with spike information (e.g. cluster IDs, times, features,
etc.) for all units.
units : array-like (optional)
A subset of all units for which to create the bar plot. (If `None`, all units are used)
feat_name : string (optional)
The spike feature to plot.
dist : string (optional)
The type of hypothetical null distribution from which the empirical spike feature
distributions are presumed to belong to.
test : string (optional)
The statistical test used to calculate the probability that the empirical spike feature
distributions come from `dist`.
cmap_name : string (optional)
The name of the colormap associated with the plot.
ax : axessubplot (optional)
The axis handle to plot the histogram on. (if `None`, a new figure and axis is created)
Returns
-------
cv_vals : ndarray
The coefficients of variation of `feat_name` for each unit.
p_vals : ndarray
The probabilites that the distribution for `feat_name` for each unit comes from a
`dist` distribution based on the `test` statistical test.
See Also
--------
metrics.unit_stability
Examples
--------
1) Create a bar plot of the coefficients of variation of the spike amplitudes for all units.
>>> fig, var_vals, p_vals = bb.plot.feat_vars(units_b)
'''
# Get units.
if not (units is None): # we're using a subset of all units
unit_list = list(units_b['depths'].keys())
# For each unit in `unit_list`, remove unit from `units_b` if not in `units`.
[units_b['depths'].pop(unit) for unit in unit_list if not (int(unit) in units)]
unit_list = list(units_b['depths'].keys()) # get new `unit_list` after removing unit
# Calculate coefficients of variation for all units
p_vals_b, cv_b = single_units.unit_stability(
units_b, units=units, feat_names=[feat_name], dist=dist, test=test)
cv_vals = np.array(tuple(cv_b[feat_name].values()))
cv_vals = cv_vals * 1e6 if feat_name == 'amps' else cv_vals # convert to uV if amps
p_vals = np.array(tuple(p_vals_b[feat_name].values()))
# Remove any empty units. This must be done AFTER the above calculations for ALL units so that
# we can keep direct indexing.
empty_unit_idxs = np.where([len(units_b['times'][unit]) == 0 for unit in unit_list])[0]
good_units = [unit for unit in unit_list if unit not in empty_unit_idxs.astype(str)]
# Get mean depths of spikes for good units
depths = np.asarray([np.mean(units_b['depths'][str(unit)]) for unit in good_units])
# Create unit normalized colormap based on `depths`, sorted by depth.
cmap = plt.cm.get_cmap(cmap_name)
depths_norm = depths / np.max(depths)
rgba = np.asarray([cmap(depth) for depth in np.sort(np.flip(depths_norm))])
# Plot depth-color-coded h bar plot of CVs for `feature` for each unit, where units are
# sorted descendingly by depth along y-axis.
if ax is None:
fig, ax = plt.subplots()
ax.barh(y=[int(unit) for unit in good_units], width=cv_vals[np.argsort(depths)], color=rgba)
fig = ax.figure
cbar = fig.colorbar(plt.cm.ScalarMappable(cmap=cmap), ax=ax)
max_d = np.max(depths)
tick_labels = [int(max_d * tick) for tick in (0, 0.2, 0.4, 0.6, 0.8, 1.0)]
cbar.set_ticks(cbar.get_ticks()) # must call `set_ticks` to call `set_ticklabels`
cbar.set_ticklabels(tick_labels)
ax.set_title('CV of {feat}'.format(feat=feat_name))
ax.set_ylabel('Unit Number (sorted by depth)')
ax.set_xlabel('CV')
cbar.set_label('Depth', rotation=-90)
return cv_vals, p_vals
def missed_spikes_est(feat, feat_name, spks_per_bin=20, sigma=5, min_num_bins=50, ax=None):
'''
Plots the pdf of an estimated symmetric spike feature distribution, with a vertical cutoff line
that indicates the approximate fraction of spikes missing from the distribution, assuming the
true distribution is symmetric.
Parameters
----------
feat : ndarray
The spikes' feature values.
feat_name : string
The spike feature to plot.
spks_per_bin : int (optional)
The number of spikes per bin from which to compute the spike feature histogram.
sigma : int (optional)
The standard deviation for the gaussian kernel used to compute the pdf from the spike
feature histogram.
min_num_bins : int (optional)
The minimum number of bins used to compute the spike feature histogram.
ax : axessubplot (optional)
The axis handle to plot the histogram on. (if `None`, a new figure and axis is created)
Returns
-------
fraction_missing : float
The fraction of missing spikes (0-0.5). *Note: If more than 50% of spikes are missing, an
accurate estimate isn't possible.
See Also
--------
single_units.feature_cutoff
Examples
--------
1) Plot cutoff line indicating the fraction of spikes missing from a unit based on the recorded
unit's spike amplitudes, assuming the distribution of the unit's spike amplitudes is symmetric.
>>> feat = units_b['amps']['1']
>>> fraction_missing = bb.plot.missed_spikes_est(feat, feat_name='amps', unit=1)
'''
# Calculate the feature distribution histogram and fraction of spikes missing.
fraction_missing, pdf, cutoff_idx = \
single_units.missed_spikes_est(feat, spks_per_bin, sigma, min_num_bins)
# Plot.
if ax is None: # create two axes
fig, ax = plt.subplots(nrows=1, ncols=2)
if ax is None or len(ax) == 2: # plot histogram and pdf on two separate axes
num_bins = int(feat.size / spks_per_bin)
ax[0].hist(feat, bins=num_bins)
ax[0].set_xlabel('{0}'.format(feat_name))
ax[0].set_ylabel('Count')
ax[0].set_title('Histogram of {0}'.format(feat_name))
ax[1].plot(pdf)
ax[1].vlines(cutoff_idx, 0, np.max(pdf), colors='r')
ax[1].set_xlabel('Bin Number')
ax[1].set_ylabel('Density')
ax[1].set_title('PDF Symmetry Cutoff\n'
'(estimated {:.2f}% missing spikes)'.format(fraction_missing * 100))
else: # just plot pdf
ax = ax[0]
ax.plot(pdf)
ax.vlines(cutoff_idx, 0, np.max(pdf), colors='r')
ax.set_xlabel('Bin Number')
ax.set_ylabel('Density')
ax.set_title('PDF Symmetry Cutoff\n'
'(estimated {:.2f}% missing spikes)'.format(fraction_missing * 100))
return fraction_missing
def wf_comp(ephys_file, ts1, ts2, ch, sr=30000, n_ch_probe=385, dtype='int16', car=True,
col=['b', 'r'], ax=None):
'''
Plots two different sets of waveforms across specified channels after (optionally)
common-average-referencing. In this way, waveforms can be compared to see if there is,
e.g. drift during the recording, or if two units should be merged, or one unit should be split.
Parameters
----------
ephys_file : string
The file path to the binary ephys data.
ts1 : array_like
A set of timestamps for which to compare waveforms with `ts2`.
ts2: array_like
A set of timestamps for which to compare waveforms with `ts1`.
ch : array-like
The channels to use for extracting and plotting the waveforms.
sr : int (optional)
The sampling rate (in hz) that the ephys data was acquired at.
n_ch_probe : int (optional)
The number of channels of the recording.
dtype: str (optional)
The datatype represented by the bytes in `ephys_file`.
car: bool (optional)
A flag for whether or not to perform common-average-referencing before extracting waveforms
col: list of strings or float arrays (optional)
Two elements in the list, where each specifies the color the `ts1` and `ts2` waveforms
will be plotted in, respectively.
ax : axessubplot (optional)
The axis handle to plot the histogram on. (if `None`, a new figure and axis is created)
Returns
-------
wf1 : ndarray
The waveforms for the spikes in `ts1`: an array of shape (#spikes, #samples, #channels).
wf2 : ndarray
The waveforms for the spikes in `ts2`: an array of shape (#spikes, #samples, #channels).
s : float
The similarity score between the two sets of waveforms, calculated by
`single_units.wf_similarity`
See Also
--------
io.extract_waveforms
single_units.wf_similarity
Examples
--------
1) Compare first and last 100 spike waveforms for unit1, across 20 channels around the channel
of max amplitude, and compare the waveforms in the first minute to the waveforms in the fourth
minutes for unit2, across 10 channels around the mean.
# Get first and last 100 spikes, and 20 channels around channel of max amp for unit 1:
>>> ts1 = units_b['times']['1'][:100]
>>> ts2 = units_b['times']['1'][-100:]
>>> max_ch = clstrs_b['channels'][1]
>>> if max_ch < n_c_ch: # take only channels greater than `max_ch`.
>>> ch = np.arange(max_ch, max_ch + 20)
>>> elif (max_ch + n_c_ch) > n_ch_probe: # take only channels less than `max_ch`.
>>> ch = np.arange(max_ch - 20, max_ch)
>>> else: # take `n_c_ch` around `max_ch`.
>>> ch = np.arange(max_ch - 10, max_ch + 10)
>>> wf1, wf2, s = bb.plot.wf_comp(path_to_ephys_file, ts1, ts2, ch)
# Plot waveforms for unit2 from the first and fourth minutes across 10 channels.
>>> ts = units_b['times']['2']
>>> ts1_2 = ts[np.where(ts<60)[0]]
>>> ts2_2 = ts[np.where(ts>180)[0][:len(ts1)]]
>>> max_ch = clstrs_b['channels'][2]
>>> if max_ch < n_c_ch: # take only channels greater than `max_ch`.
>>> ch = np.arange(max_ch, max_ch + 10)
>>> elif (max_ch + n_c_ch) > n_ch_probe: # take only channels less than `max_ch`.
>>> ch = np.arange(max_ch - 10, max_ch)
>>> else: # take `n_c_ch` around `max_ch`.
>>> ch = np.arange(max_ch - 5, max_ch + 5)
>>> wf1_2, wf2_2, s_2 = bb.plot.wf_comp(path_to_ephys_file, ts1_2, ts2_2, ch)
'''
# Ensure `ch` is ndarray
ch = np.asarray(ch)
ch = ch.reshape((ch.size, 1)) if ch.size == 1 else ch
# Extract the waveforms for these timestamps and compute similarity score.
wf1 = extract_waveforms(ephys_file, ts1, ch, sr=sr, n_ch_probe=n_ch_probe, dtype=dtype,
car=car)
wf2 = extract_waveforms(ephys_file, ts2, ch, sr=sr, n_ch_probe=n_ch_probe, dtype=dtype,
car=car)
s = single_units.wf_similarity(wf1, wf2)
# Plot these waveforms against each other.
n_ch = ch.size
if ax is None:
fig, ax = plt.subplots(nrows=n_ch, ncols=2) # left col is all waveforms, right col is mean
for cur_ax, cur_ch in enumerate(ch):
ax[cur_ax][0].plot(wf1[:, :, cur_ax].T, c=col[0])
ax[cur_ax][0].plot(wf2[:, :, cur_ax].T, c=col[1])
ax[cur_ax][1].plot(np.mean(wf1[:, :, cur_ax], axis=0), c=col[0])
ax[cur_ax][1].plot(np.mean(wf2[:, :, cur_ax], axis=0), c=col[1])
ax[cur_ax][0].set_ylabel('Ch {0}'.format(cur_ch))
ax[0][0].set_title('All Waveforms. S = {:.2f}'.format(s))
ax[0][1].set_title('Mean Waveforms')
plt.legend(['1st spike set', '2nd spike set'])
return wf1, wf2, s
def amp_heatmap(ephys_file, ts, ch, sr=30000, n_ch_probe=385, dtype='int16', cmap_name='RdBu',
car=True, ax=None):
'''
Plots a heatmap of the normalized voltage values over time and space for given timestamps and
channels, after (optionally) common-average-referencing.
Parameters
----------
ephys_file : string
The file path to the binary ephys data.
ts: array_like
A set of timestamps for which to get the voltage values.
ch : array-like
The channels to use for extracting the voltage values.
sr : int (optional)
The sampling rate (in hz) that the ephys data was acquired at.
n_ch_probe : int (optional)
The number of channels of the recording.
dtype: str (optional)
The datatype represented by the bytes in `ephys_file`.
cmap_name : string (optional)
The name of the colormap associated with the plot.
car: bool (optional)
A flag for whether or not to perform common-average-referencing before extracting waveforms
ax : axessubplot (optional)
The axis handle to plot the histogram on. (if `None`, a new figure and axis is created)
Returns
-------
v_vals : ndarray
The voltage values.
Examples
--------
1) Plot a heatmap of the spike amplitudes across 20 channels around the channel of max
amplitude for all spikes in unit 1.
>>> ts = units_b['times']['1']
>>> max_ch = clstrs_b['channels'][1]
>>> if max_ch < n_c_ch: # take only channels greater than `max_ch`.
>>> ch = np.arange(max_ch, max_ch + 20)
>>> elif (max_ch + n_c_ch) > n_ch_probe: # take only channels less than `max_ch`.
>>> ch = np.arange(max_ch - 20, max_ch)
>>> else: # take `n_c_ch` around `max_ch`.
>>> ch = np.arange(max_ch - 10, max_ch + 10)
>>> bb.plot.amp_heatmap(path_to_ephys_file, ts, ch)
'''
# Ensure `ch` is ndarray
ch = np.asarray(ch)
ch = ch.reshape((ch.size, 1)) if ch.size == 1 else ch
# Get memmapped array of `ephys_file`
s_reader = spikeglx.Reader(ephys_file, open=True)
file_m = s_reader.data
# Get voltage values for each peak amplitude sample for `ch`.
max_amp_samples = (ts * sr).astype(int)
# Currently this is an annoying way to calculate `v_vals` b/c indexing with multiple values
# is currently unsupported.
v_vals = np.zeros((max_amp_samples.size, ch.size))
for sample in range(max_amp_samples.size):
v_vals[sample] = file_m[max_amp_samples[sample]:max_amp_samples[sample] + 1, ch]
if car: # compute spatial noise in chunks, and subtract from `v_vals`.
# Get subset of time (from first to last max amp sample)
n_chunk_samples = 5e6 # number of samples per chunk
n_chunks = np.ceil((max_amp_samples[-1] - max_amp_samples[0]) /
n_chunk_samples).astype('int')
# Get samples that make up each chunk. e.g. `chunk_sample[1] - chunk_sample[0]` are the
# samples that make up the first chunk.
chunk_sample = np.arange(max_amp_samples[0], max_amp_samples[-1], n_chunk_samples,
dtype=int)
chunk_sample = np.append(chunk_sample, max_amp_samples[-1])
noise_s_chunks = np.zeros((n_chunks, ch.size), dtype=np.int16) # spatial noise array
# Give time estimate for computing `noise_s_chunks`.
t0 = time.perf_counter()
np.median(file_m[chunk_sample[0]:chunk_sample[1], ch], axis=0)
dt = time.perf_counter() - t0
print('Performing spatial CAR before waveform extraction. Estimated time is {:.2f} mins.'
' ({})'.format(dt * n_chunks / 60, time.ctime()))
# Compute noise for each chunk, then take the median noise of all chunks.
for chunk in range(n_chunks):
noise_s_chunks[chunk, :] = np.median(
file_m[chunk_sample[chunk]:chunk_sample[chunk + 1], ch], axis=0)
noise_s = np.median(noise_s_chunks, axis=0)
v_vals -= noise_s[None, :]
print('Done. ({})'.format(time.ctime()))
s_reader.close()
# Plot heatmap.
if ax is None:
fig, ax = plt.subplots()
v_vals_norm = (v_vals / np.max(abs(v_vals))).T
cbar_map = ax.imshow(v_vals_norm, cmap=cmap_name, aspect='auto',
extent=[ts[0], ts[-1], ch[0], ch[-1]], origin='lower')
ax.set_yticks(np.arange(ch[0], ch[-1], 5))
ax.set_ylabel('Channel Numbers')
ax.set_xlabel('Time (s)')
ax.set_title('Voltage Heatmap')
fig = ax.figure
cbar = fig.colorbar(cbar_map, ax=ax)
cbar.set_label('V', rotation=-90)
return v_vals
def firing_rate(ts, hist_win=0.01, fr_win=0.5, n_bins=10, show_fr_cv=True, ax=None):
'''
Plots the instantaneous firing rate of for given spike timestamps over time, and optionally
overlays the value of the coefficient of variation of the firing rate for a specified number
of bins.
Parameters
----------
ts : ndarray
The spike timestamps from which to compute the firing rate.
hist_win : float (optional)
The time window (in s) to use for computing spike counts.
fr_win : float (optional)
The time window (in s) to use as a moving slider to compute the instantaneous firing rate.
n_bins : int (optional)
The number of bins in which to compute coefficients of variation of the firing rate.
show_fr_cv : bool (optional)
A flag for whether or not to compute and show the coefficients of variation of the firing
rate for `n_bins`.
ax : axessubplot (optional)
The axis handle to plot the histogram on. (if `None`, a new figure and axis is created)
Returns
-------
fr: ndarray
The instantaneous firing rate over time (in hz).
cv: float
The mean coefficient of variation of the firing rate of the `n_bins` number of coefficients
computed. Can only be returned if `show_fr_cv` is True.
cvs: ndarray
The coefficients of variation of the firing for each bin of `n_bins`. Can only be returned
if `show_fr_cv` is True.
See Also
--------
single_units.firing_rate_cv
singecell.firing_rate
Examples
--------
1) Plot the firing rate for unit 1 from the time of its first to last spike, showing the cv
of the firing rate for 10 evenly spaced bins.
>>> ts = units_b['times']['1']
>>> fr, cv, cvs = bb.plot.firing_rate(ts)
'''
if ax is None:
fig, ax = plt.subplots()
if not (show_fr_cv): # compute just the firing rate
fr = singlecell.firing_rate(ts, hist_win=hist_win, fr_win=fr_win)
else: # compute firing rate and coefficients of variation
cv, cvs, fr = single_units.firing_rate_coeff_var(ts, hist_win=hist_win, fr_win=fr_win,
n_bins=n_bins)
x = np.arange(fr.size) * hist_win
ax.plot(x, fr)
ax.set_title('Firing Rate')
ax.set_xlabel('Time (s)')
ax.set_ylabel('Rate (s$^-1$)')
if not (show_fr_cv):
return fr
else: # show coefficients of variation
y_max = np.max(fr) * 1.05
x_l = x[int(x.size / n_bins)]
# Plot vertical lines separating plots into `n_bins`.
[ax.vlines((x_l * i), 0, y_max, linestyles='dashed', linewidth=2)
for i in range(1, n_bins)]
# Plot text with cv of firing rate for each bin.
[ax.text(x_l * (i + 1), y_max, 'cv={0:.2f}'.format(cvs[i]), fontsize=9, ha='right')
for i in range(n_bins)]
return fr, cv, cvs
def peri_event_time_histogram(
spike_times, spike_clusters, events, cluster_id, # Everything you need for a basic plot
t_before=0.2, t_after=0.5, bin_size=0.025, smoothing=0.025, as_rate=True,
include_raster=False, n_rasters=None, error_bars='std', ax=None,
pethline_kwargs={'color': 'blue', 'lw': 2},
errbar_kwargs={'color': 'blue', 'alpha': 0.5},
eventline_kwargs={'color': 'black', 'alpha': 0.5},
raster_kwargs={'color': 'black', 'lw': 0.5}, **kwargs):
"""
Plot peri-event time histograms, with the meaning firing rate of units centered on a given
series of events. Can optionally add a raster underneath the PETH plot of individual spike
trains about the events.
Parameters
----------
spike_times : array_like
Spike times (in seconds)
spike_clusters : array-like
Cluster identities for each element of spikes
events : array-like
Times to align the histogram(s) to
cluster_id : int
Identity of the cluster for which to plot a PETH
t_before : float, optional
Time before event to plot (default: 0.2s)
t_after : float, optional
Time after event to plot (default: 0.5s)
bin_size :float, optional
Width of bin for histograms (default: 0.025s)
smoothing : float, optional
Sigma of gaussian smoothing to use in histograms. (default: 0.025s)
as_rate : bool, optional
Whether to use spike counts or rates in the plot (default: `True`, uses rates)
include_raster : bool, optional
Whether to put a raster below the PETH of individual spike trains (default: `False`)
n_rasters : int, optional
If include_raster is True, the number of rasters to include. If `None`
will default to plotting rasters around all provided events. (default: `None`)
error_bars : {'std', 'sem', 'none'}, optional
Defines which type of error bars to plot. Options are:
-- `'std'` for 1 standard deviation
-- `'sem'` for standard error of the mean
-- `'none'` for only plotting the mean value
(default: `'std'`)
ax : matplotlib axes, optional
If passed, the function will plot on the passed axes. Note: current
behavior causes whatever was on the axes to be cleared before plotting!
(default: `None`)
pethline_kwargs : dict, optional
Dict containing line properties to define PETH plot line. Default
is a blue line with weight of 2. Needs to have color. See matplotlib plot documentation
for more options.
(default: `{'color': 'blue', 'lw': 2}`)
errbar_kwargs : dict, optional
Dict containing fill-between properties to define PETH error bars.
Default is a blue fill with 50 percent opacity.. Needs to have color. See matplotlib
fill_between documentation for more options.
(default: `{'color': 'blue', 'alpha': 0.5}`)
eventline_kwargs : dict, optional
Dict containing fill-between properties to define line at event.
Default is a black line with 50 percent opacity.. Needs to have color. See matplotlib
vlines documentation for more options.
(default: `{'color': 'black', 'alpha': 0.5}`)
raster_kwargs : dict, optional
Dict containing properties defining lines in the raster plot.
Default is black lines with line width of 0.5. See matplotlib vlines for more options.
(default: `{'color': 'black', 'lw': 0.5}`)
Returns
-------
ax : matplotlib axes
Axes with all of the plots requested.
"""
# Check to make sure if we fail, we fail in an informative way
if not len(spike_times) == len(spike_clusters):
raise ValueError('Spike times and clusters are not of the same shape')
if len(events) == 1:
raise ValueError('Cannot make a PETH with only one event.')
if error_bars not in ('std', 'sem', 'none'):
raise ValueError('Invalid error bar type was passed.')
if not all(np.isfinite(events)):
raise ValueError('There are NaN or inf values in the list of events passed. '
' Please remove non-finite data points and try again.')
# Compute peths
peths, binned_spikes = singlecell.calculate_peths(spike_times, spike_clusters, [cluster_id],
events, t_before, t_after, bin_size,
smoothing, as_rate)
# Construct an axis object if none passed
if ax is None:
plt.figure()
ax = plt.gca()
# Plot the curve and add error bars
mean = peths.means[0, :]
ax.plot(peths.tscale, mean, **pethline_kwargs)
if error_bars == 'std':
bars = peths.stds[0, :]
elif error_bars == 'sem':
bars = peths.stds[0, :] / np.sqrt(len(events))
else:
bars = np.zeros_like(mean)
if error_bars != 'none':
ax.fill_between(peths.tscale, mean - bars, mean + bars, **errbar_kwargs)
# Plot the event marker line. Extends to 5% higher than max value of means plus any error bar.
plot_edge = (mean.max() + bars[mean.argmax()]) * 1.05
ax.vlines(0., 0., plot_edge, **eventline_kwargs)
# Set the limits on the axes to t_before and t_after. Either set the ylim to the 0 and max
# values of the PETH, or if we want to plot a spike raster below, create an equal amount of
# blank space below the zero where the raster will go.
ax.set_xlim([-t_before, t_after])
ax.set_ylim([-plot_edge if include_raster else 0., plot_edge])
# Put y ticks only at min, max, and zero
if mean.min() != 0:
ax.set_yticks([0, mean.min(), mean.max()])
else:
ax.set_yticks([0., mean.max()])
# Move the x axis line from the bottom of the plotting space to zero if including a raster,
# Then plot the raster
if include_raster:
if n_rasters is None:
n_rasters = len(events)
if n_rasters > 60:
warn("Number of raster traces is greater than 60. This might look bad on the plot.")
ax.axhline(0., color='black')
tickheight = plot_edge / len(events[:n_rasters]) # How much space per trace
tickedges = np.arange(0., -plot_edge - 1e-5, -tickheight)
clu_spks = spike_times[spike_clusters == cluster_id]
for i, t in enumerate(events[:n_rasters]):
idx = np.bitwise_and(clu_spks >= t - t_before, clu_spks <= t + t_after)
event_spks = clu_spks[idx]
ax.vlines(event_spks - t, tickedges[i + 1], tickedges[i], **raster_kwargs)
ax.set_ylabel('Firing Rate' if as_rate else 'Number of spikes', y=0.75)
else:
ax.set_ylabel('Firing Rate' if as_rate else 'Number of spikes')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_xlabel('Time (s) after event')
return ax
def driftmap(ts, feat, ax=None, plot_style='bincount',
t_bin=0.01, d_bin=20, weights=None, vmax=None, **kwargs):
"""
Plots the values of a spike feature array (y-axis) over time (x-axis).
Two arguments can be given for the plot_style of the drift map:
- 'scatter' : whereby each value is plotted as a marker (up to 100'000 data point)
- 'bincount' : whereby the values are binned (optimised to represent spike raster)
Parameters
----------
feat : ndarray
The spikes' feature values.
ts : ndarray
The spike timestamps from which to compute the firing rate.
ax : axessubplot (optional)
The axis handle to plot the histogram on. (if `None`, a new figure and axis is created)
t_bin: time bin used when plot_style='bincount'
d_bin: depth bin used when plot_style='bincount'
plot_style: 'scatter', 'bincount'
**kwargs: matplotlib.imshow arguments
Returns
-------
cd: float
The cumulative drift of `feat`.
md: float
The maximum drift of `feat`.
See Also
--------
metrics.cum_drift
metrics.max_drift
Examples
--------
1) Plot the amplitude driftmap for unit 1.
>>> ts = units_b['times']['1']
>>> amps = units_b['amps']['1']
>>> ax = bb.plot.driftmap(ts, amps)
2) Plot the depth driftmap for unit 1.
>>> ts = units_b['times']['1']
>>> depths = units_b['depths']['1']
>>> ax = bb.plot.driftmap(ts, depths)
"""
iok = ~np.isnan(feat)
if ax is None:
fig, ax = plt.subplots()
if plot_style == 'scatter' and len(ts) < 100000:
print('here todo')
if 'color' not in kwargs.keys():
kwargs['color'] = 'k'
ax.plot(ts, feat, **kwargs)
else:
# compute raster map as a function of site depth
R, times, depths = bincount2D(
ts[iok], feat[iok], t_bin, d_bin, weights=weights)
# plot raster map
ax.imshow(R, aspect='auto', cmap='binary', vmin=0, vmax=vmax or np.std(R) * 4,
extent=np.r_[times[[0, -1]], depths[[0, -1]]], origin='lower', **kwargs)
ax.set_xlabel('time (secs)')
ax.set_ylabel('depth (um)')
return ax
def pres_ratio(ts, hist_win=10, ax=None):
'''
Plots the presence ratio of spike counts: the number of bins where there is at least one
spike, over the total number of bins, given a specified bin width.
Parameters
----------
ts : ndarray
The spike timestamps from which to compute the presence ratio.
hist_win : float
The time window (in s) to use for computing the presence ratio.
ax : axessubplot (optional)
The axis handle to plot the histogram on. (if `None`, a new figure and axis is created)
Returns
-------
pr : float
The presence ratio.
spks_bins : ndarray
The number of spks in each bin.
See Also
--------
metrics.pres_ratio
Examples
--------
1) Plot the presence ratio for unit 1, given a window of 10 s.
>>> ts = units_b['times']['1']
>>> pr, pr_bins = bb.plot.pres_ratio(ts)
'''
pr, spks_bins = single_units.pres_ratio(ts, hist_win)
pr_bins = np.where(spks_bins > 0, 1, 0)
if ax is None:
fig, ax = plt.subplots()
ax.plot(pr_bins)
ax.set_xlabel('Bin Number (width={:.1f}s)'.format(hist_win))
ax.set_ylabel('Presence')
ax.set_title('Presence Ratio')
return pr, spks_bins
def driftmap_color(
clusters_depths, spikes_times,
spikes_amps, spikes_depths, spikes_clusters,
ax=None, axesoff=False, return_lims=False):
'''
Plots the driftmap of a session or a trial
The plot shows the spike times vs spike depths.
Each dot is a spike, whose color indicates the cluster
and opacity indicates the spike amplitude.
Parameters
-------------
clusters_depths: ndarray
depths of all clusters
spikes_times: ndarray
spike times of all clusters
spikes_amps: ndarray
amplitude of each spike
spikes_depths: ndarray
depth of each spike
spikes_clusters: ndarray
cluster idx of each spike
ax: matplotlib.axes.Axes object (optional)
The axis object to plot the driftmap on
(if `None`, a new figure and axis is created)
Return
---
ax: matplotlib.axes.Axes object
The axis object with driftmap plotted
x_lim: list of two elements
range of x axis
y_lim: list of two elements
range of y axis
'''
color_bins = sns.color_palette("hls", 500)
new_color_bins = np.vstack(
np.transpose(np.reshape(color_bins, [5, 100, 3]), [1, 0, 2]))
# get the sorted idx of each depth, and create colors based on the idx
sorted_idx = np.argsort(np.argsort(clusters_depths))
colors = np.vstack(
[np.repeat(
new_color_bins[np.mod(idx, 500), :][np.newaxis, ...],
n_spikes, axis=0)
for (idx, n_spikes) in
zip(sorted_idx, np.unique(spikes_clusters,
return_counts=True)[1])])
max_amp = np.percentile(spikes_amps, 90)
min_amp = np.percentile(spikes_amps, 10)
opacity = np.divide(spikes_amps - min_amp, max_amp - min_amp)
opacity[opacity > 1] = 1
opacity[opacity < 0] = 0
colorvec = np.zeros([len(opacity), 4], dtype='float16')
colorvec[:, 3] = opacity.astype('float16')
colorvec[:, 0:3] = colors.astype('float16')
x = spikes_times.astype('float32')
y = spikes_depths.astype('float32')
args = dict(color=colorvec, edgecolors='none')
if ax is None:
fig = plt.Figure(dpi=200, frameon=False, figsize=[10, 10])
ax = plt.Axes(fig, [0.1, 0.1, 0.9, 0.9])
ax.set_xlabel('Time (sec)')
ax.set_ylabel('Distance from the probe tip (um)')
savefig = True
args.update(s=0.1)
ax.scatter(x, y, **args)
x_edge = (max(x) - min(x)) * 0.05
x_lim = [min(x) - x_edge, max(x) + x_edge]
y_lim = [min(y) - 50, max(y) + 100]
ax.set_xlim(x_lim[0], x_lim[1])
ax.set_ylim(y_lim[0], y_lim[1])
if axesoff:
ax.axis('off')
if savefig:
fig.add_axes(ax)
fig.savefig('driftmap.png')
if return_lims:
return ax, x_lim, y_lim
else:
return ax
| 41.176755 | 99 | 0.635599 |
f753a5080fc68a40a6ef32fd98ddc29a3a9f83b6 | 2,339 | py | Python | src/using_multiprocessing.py | ariannasg/optimizing-python | e6c307dc694bc98c776faea1dbd7f420c2928f64 | [
"MIT"
] | null | null | null | src/using_multiprocessing.py | ariannasg/optimizing-python | e6c307dc694bc98c776faea1dbd7f420c2928f64 | [
"MIT"
] | null | null | null | src/using_multiprocessing.py | ariannasg/optimizing-python | e6c307dc694bc98c776faea1dbd7f420c2928f64 | [
"MIT"
] | null | null | null | #!usr/bin/env python3
import bz2
from concurrent.futures import ProcessPoolExecutor
def unpack(requests):
"""Unpack a list of requests compressed in bz2"""
return [bz2.decompress(request) for request in requests]
def unpack_proc(requests):
"""Unpack a list of requests compressed in bz2 using a process pool"""
# Default to numbers of cores
with ProcessPoolExecutor() as pool:
return list(pool.map(bz2.decompress, requests))
if __name__ == '__main__':
with open('src/huck-finn.txt', 'rb') as fp:
data = fp.read()
bz2data = bz2.compress(data)
print(len(bz2data) / len(data)) # About 27%
print(bz2.decompress(bz2data) == data) # Loseless
requests = [bz2data] * 300
# attention to the way we're using %time here: the _ = is so we discard the
# output.
# by seeing that the CPU time and the Wall time were very close, we knew
# that this unpack was a CPU-bound operations, we could optimise it by using
# multiprocessing. the diff in performance is almost the double:
# from 9.76 s to 5.47 s -> almost 2 times faster
# In [5]: %run src/using_multiprocessing.py
# 0.27078187856861546
# True
#
# In [6]: %time _ = unpack(requests)
# CPU times: user 9.37 s, sys: 244 ms, total: 9.61 s
# Wall time: 9.76 s
#
# In [7]: %prun -l 10 unpack(requests)
# 1205 function calls in 9.544 seconds
#
# Ordered by: internal time
#
# ncalls tottime percall cumtime percall filename:lineno(function)
# 300 9.521 0.032 9.521 0.032 {method 'decompress' of '_bz2.BZ2Decompressor' objects}
# 1 0.014 0.014 9.543 9.543 <string>:1(<module>)
# 1 0.004 0.004 9.530 9.530 using_multiprocessing.py:9(<listcomp>)
# 300 0.004 0.000 9.526 0.032 bz2.py:341(decompress)
# 300 0.000 0.000 0.000 0.000 {method 'append' of 'list' objects}
# 300 0.000 0.000 0.000 0.000 {method 'join' of 'bytes' objects}
# 1 0.000 0.000 9.544 9.544 {built-in method builtins.exec}
# 1 0.000 0.000 9.530 9.530 using_multiprocessing.py:7(unpack)
# 1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}
#
# In [8]: %time _ = unpack_proc(requests)
# CPU times: user 436 ms, sys: 423 ms, total: 859 ms
# Wall time: 5.47 s
| 37.725806 | 103 | 0.637452 |
f753c0699ca176986a73a60b3e467af2e45d2c75 | 34,012 | py | Python | gdata/apps/emailsettings/data.py | gitdaniel228/realtor | 4366d57b064be87b31c8a036b3ed7a99b2036461 | [
"BSD-3-Clause"
] | null | null | null | gdata/apps/emailsettings/data.py | gitdaniel228/realtor | 4366d57b064be87b31c8a036b3ed7a99b2036461 | [
"BSD-3-Clause"
] | null | null | null | gdata/apps/emailsettings/data.py | gitdaniel228/realtor | 4366d57b064be87b31c8a036b3ed7a99b2036461 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data model classes for the Email Settings API."""
__author__ = 'Claudio Cherubino <ccherubino@google.com>'
import atom.data
import gdata.apps
import gdata.apps_property
import gdata.data
# This is required to work around a naming conflict between the Google
# Spreadsheets API and Python's built-in property function
pyproperty = property
# The apps:property label of the label property
LABEL_NAME = 'label'
# The apps:property from of the filter property
FILTER_FROM_NAME = 'from'
# The apps:property to of the filter property
FILTER_TO_NAME = 'to'
# The apps:property subject of the filter property
FILTER_SUBJECT_NAME = 'subject'
# The apps:property hasTheWord of the filter property
FILTER_HAS_THE_WORD_NAME = 'hasTheWord'
# The apps:property doesNotHaveTheWord of the filter property
FILTER_DOES_NOT_HAVE_THE_WORD_NAME = 'doesNotHaveTheWord'
# The apps:property hasAttachment of the filter property
FILTER_HAS_ATTACHMENTS_NAME = 'hasAttachment'
# The apps:property label of the filter action property
FILTER_LABEL = 'label'
# The apps:property shouldMarkAsRead of the filter action property
FILTER_MARK_AS_READ = 'shouldMarkAsRead'
# The apps:property shouldArchive of the filter action propertylabel
FILTER_ARCHIVE = 'shouldArchive'
# The apps:property name of the send-as alias property
SENDAS_ALIAS_NAME = 'name'
# The apps:property address of theAPPS_TEMPLATE send-as alias property
SENDAS_ALIAS_ADDRESS = 'address'
# The apps:property replyTo of the send-as alias property
SENDAS_ALIAS_REPLY_TO = 'replyTo'
# The apps:property makeDefault of the send-as alias property
SENDAS_ALIAS_MAKE_DEFAULT = 'makeDefault'
# The apps:property enable of the webclip property
WEBCLIP_ENABLE = 'enable'
# The apps:property enable of the forwarding property
FORWARDING_ENABLE = 'enable'
# The apps:property forwardTo of the forwarding property
FORWARDING_TO = 'forwardTo'
# The apps:property action of the forwarding property
FORWARDING_ACTION = 'action'
# The apps:property enable of the POP property
POP_ENABLE = 'enable'
# The apps:property enableFor of the POP propertyACTION
POP_ENABLE_FOR = 'enableFor'
# The apps:property action of the POP property
POP_ACTION = 'action'
# The apps:property enable of the IMAP property
IMAP_ENABLE = 'enable'
# The apps:property enable of the vacation responder property
VACATION_RESPONDER_ENABLE = 'enable'
# The apps:property subject of the vacation responder property
VACATION_RESPONDER_SUBJECT = 'subject'
# The apps:property message of the vacation responder property
VACATION_RESPONDER_MESSAGE = 'message'
# The apps:property contactsOnly of the vacation responder property
VACATION_RESPONDER_CONTACTS_ONLY = 'contactsOnly'
# The apps:property signature of the signature property
SIGNATURE_VALUE = 'signature'
# The apps:property language of the language property
LANGUAGE_TAG = 'language'
# The apps:property pageSize of the general settings property
GENERAL_PAGE_SIZE = 'pageSize'
# The apps:property shortcuts of the general settings property
GENERAL_SHORTCUTS = 'shortcuts'
# The apps:property arrows of the general settings property
GENERAL_ARROWS = 'arrows'
# The apps:prgdata.appsoperty snippets of the general settings property
GENERAL_SNIPPETS = 'snippets'
# The apps:property uniAppsProcode of the general settings property
GENERAL_UNICODE = 'unicode'
class EmailSettingsEntry(gdata.data.GDEntry):
"""Represents an Email Settings entry in object form."""
property = [gdata.apps_property.AppsProperty]
def _GetProperty(self, name):
"""Get the apps:property value with the given name.
Args:
name: string Name of the apps:property value to get.
Returns:
The apps:property value with the given name, or None if the name was
invalid.
"""
value = None
for p in self.property:
if p.name == name:
value = p.value
break
return value
def _SetProperty(self, name, value):
"""Set the apps:property value with the given name to the given value.
Args:
name: string Name of the apps:property value to set.
value: string Value to give the apps:property value with the given name.
"""
found = False
for i in range(len(self.property)):
if self.property[i].name == name:
self.property[i].value = value
found = True
break
if not found:
self.property.append(gdata.apps_property.AppsProperty(name=name, value=value))
def find_edit_link(self):
return self.uri
class EmailSettingsLabel(EmailSettingsEntry):
"""Represents a Label in object form."""
def GetName(self):
"""Get the name of the Label object.
Returns:
The name of this Label object as a string or None.
"""
return self._GetProperty(LABEL_NAME)
def SetName(self, value):
"""Set the name of this Label object.
Args:
value: string The new label name to give this object.
"""
self._SetProperty(LABEL_NAME, value)
name = pyproperty(GetName, SetName)
def __init__(self, uri=None, name=None, *args, **kwargs):
"""Constructs a new EmailSettingsLabel object with the given arguments.
Args:
uri: string (optional) The uri of of this object for HTTP requests.
name: string (optional) The name to give this new object.
args: The other parameters to pass to gdata.entry.GDEntry constructor.
kwargs: The other parameters to pass to gdata.entry.GDEntry constructor.
"""
super(EmailSettingsLabel, self).__init__(*args, **kwargs)
if uri:
self.uri = uri
if name:
self.name = name
class EmailSettingsFilter(EmailSettingsEntry):
"""Represents an Email Settings Filter in object form."""
def GetFrom(self):
"""Get the From value of the Filter object.
Returns:
The From value of this Filter object as a string or None.
"""
return self._GetProperty(FILTER_FROM_NAME)
def SetFrom(self, value):
"""Set the From value of this Filter object.
Args:
value: string The new From value to give this object.
"""
self._SetProperty(FILTER_FROM_NAME, value)
from_address = pyproperty(GetFrom, SetFrom)
def GetTo(self):
"""Get the To value of the Filter object.
Returns:
The To value of this Filter object as a string or None.
"""
return self._GetProperty(FILTER_TO_NAME)
def SetTo(self, value):
"""Set the To value of this Filter object.
Args:
value: string The new To value to give this object.
"""
self._SetProperty(FILTER_TO_NAME, value)
to_address = pyproperty(GetTo, SetTo)
def GetSubject(self):
"""Get the Subject value of the Filter object.
Returns:
The Subject value of this Filter object as a string or None.
"""
return self._GetProperty(FILTER_SUBJECT_NAME)
def SetSubject(self, value):
"""Set the Subject value of this Filter object.
Args:
value: string The new Subject value to give this object.
"""
self._SetProperty(FILTER_SUBJECT_NAME, value)
subject = pyproperty(GetSubject, SetSubject)
def GetHasTheWord(self):
"""Get the HasTheWord value of the Filter object.
Returns:
The HasTheWord value of this Filter object as a string or None.
"""
return self._GetProperty(FILTER_HAS_THE_WORD_NAME)
def SetHasTheWord(self, value):
"""Set the HasTheWord value of this Filter object.
Args:
value: string The new HasTheWord value to give this object.
"""
self._SetProperty(FILTER_HAS_THE_WORD_NAME, value)
has_the_word = pyproperty(GetHasTheWord, SetHasTheWord)
def GetDoesNotHaveTheWord(self):
"""Get the DoesNotHaveTheWord value of the Filter object.
Returns:
The DoesNotHaveTheWord value of this Filter object as a string or None.
"""
return self._GetProperty(FILTER_DOES_NOT_HAVE_THE_WORD_NAME)
def SetDoesNotHaveTheWord(self, value):
"""Set the DoesNotHaveTheWord value of this Filter object.
Args:
value: string The new DoesNotHaveTheWord value to give this object.
"""
self._SetProperty(FILTER_DOES_NOT_HAVE_THE_WORD_NAME, value)
does_not_have_the_word = pyproperty(GetDoesNotHaveTheWord,
SetDoesNotHaveTheWord)
def GetHasAttachments(self):
"""Get the HasAttachments value of the Filter object.
Returns:
The HasAttachments value of this Filter object as a string or None.
"""
return self._GetProperty(FILTER_HAS_ATTACHMENTS_NAME)
def SetHasAttachments(self, value):
"""Set the HasAttachments value of this Filter object.
Args:
value: string The new HasAttachments value to give this object.
"""
self._SetProperty(FILTER_HAS_ATTACHMENTS_NAME, value)
has_attachments = pyproperty(GetHasAttachments,
SetHasAttachments)
def GetLabel(self):
"""Get the Label value of the Filter object.
Returns:
The Label value of this Filter object as a string or None.
"""
return self._GetProperty(FILTER_LABEL)
def SetLabel(self, value):
"""Set the Label value of this Filter object.
Args:
value: string The new Label value to give this object.
"""
self._SetProperty(FILTER_LABEL, value)
label = pyproperty(GetLabel, SetLabel)
def GetMarkAsRead(self):
"""Get the MarkAsRead value of the Filter object.
Returns:
The MarkAsRead value of this Filter object as a string or None.
"""
return self._GetProperty(FILTER_MARK_AS_READ)
def SetMarkAsRead(self, value):
"""Set the MarkAsRead value of this Filter object.
Args:
value: string The new MarkAsRead value to give this object.
"""
self._SetProperty(FILTER_MARK_AS_READ, value)
mark_as_read = pyproperty(GetMarkAsRead, SetMarkAsRead)
def GetArchive(self):
"""Get the Archive value of the Filter object.
Returns:
The Archive value of this Filter object as a string or None.
"""
return self._GetProperty(FILTER_ARCHIVE)
def SetArchive(self, value):
"""Set the Archive value of this Filter object.
Args:
value: string The new Archive value to give this object.
"""
self._SetProperty(FILTER_ARCHIVE, value)
archive = pyproperty(GetArchive, SetArchive)
def __init__(self, uri=None, from_address=None, to_address=None,
subject=None, has_the_word=None, does_not_have_the_word=None,
has_attachments=None, label=None, mark_as_read=None,
archive=None, *args, **kwargs):
"""Constructs a new EmailSettingsFilter object with the given arguments.
Args:
uri: string (optional) The uri of of this object for HTTP requests.
from_address: string (optional) The source email address for the filter.
to_address: string (optional) The destination email address for
the filter.
subject: string (optional) The value the email must have in its
subject to be filtered.
has_the_word: string (optional) The value the email must have in its
subject or body to be filtered.
does_not_have_the_word: string (optional) The value the email cannot
have in its subject or body to be filtered.
has_attachments: Boolean (optional) Whether or not the email must
have an attachment to be filtered.
label: string (optional) The name of the label to apply to
messages matching the filter criteria.
mark_as_read: Boolean (optional) Whether or not to mark messages
matching the filter criteria as read.
archive: Boolean (optional) Whether or not to move messages
matching to Archived state.
args: The other parameters to pass to gdata.entry.GDEntry constructor.
kwargs: The other parameters to pass to gdata.entry.GDEntry constructor.
"""
super(EmailSettingsFilter, self).__init__(*args, **kwargs)
if uri:
self.uri = uri
if from_address:
self.from_address = from_address
if to_address:
self.to_address = to_address
if subject:
self.subject = subject
if has_the_word:
self.has_the_word = has_the_word
if does_not_have_the_word:
self.does_not_have_the_word = does_not_have_the_word
if has_attachments is not None:
self.has_attachments = str(has_attachments)
if label:
self.label = label
if mark_as_read is not None:
self.mark_as_read = str(mark_as_read)
if archive is not None:
self.archive = str(archive)
class EmailSettingsSendAsAlias(EmailSettingsEntry):
"""Represents an Email Settings send-as Alias in object form."""
def GetName(self):
"""Get the Name of the send-as Alias object.
Returns:
The Name of this send-as Alias object as a string or None.
"""
return self._GetProperty(SENDAS_ALIAS_NAME)
def SetName(self, value):
"""Set the Name of this send-as Alias object.
Args:
value: string The new Name to give this object.
"""
self._SetProperty(SENDAS_ALIAS_NAME, value)
name = pyproperty(GetName, SetName)
def GetAddress(self):
"""Get the Address of the send-as Alias object.
Returns:
The Address of this send-as Alias object as a string or None.
"""
return self._GetProperty(SENDAS_ALIAS_ADDRESS)
def SetAddress(self, value):
"""Set the Address of this send-as Alias object.
Args:
value: string The new Address to give this object.
"""
self._SetProperty(SENDAS_ALIAS_ADDRESS, value)
address = pyproperty(GetAddress, SetAddress)
def GetReplyTo(self):
"""Get the ReplyTo address of the send-as Alias object.
Returns:
The ReplyTo address of this send-as Alias object as a string or None.
"""
return self._GetProperty(SENDAS_ALIAS_REPLY_TO)
def SetReplyTo(self, value):
"""Set the ReplyTo address of this send-as Alias object.
Args:
value: string The new ReplyTo address to give this object.
"""
self._SetProperty(SENDAS_ALIAS_REPLY_TO, value)
reply_to = pyproperty(GetReplyTo, SetReplyTo)
def GetMakeDefault(self):
"""Get the MakeDefault value of the send-as Alias object.
Returns:
The MakeDefault value of this send-as Alias object as a string or None.
"""
return self._GetProperty(SENDAS_ALIAS_MAKE_DEFAULT)
def SetMakeDefault(self, value):
"""Set the MakeDefault value of this send-as Alias object.
Args:
value: string The new MakeDefault valueto give this object.WebClip
"""
self._SetProperty(SENDAS_ALIAS_MAKE_DEFAULT, value)
make_default = pyproperty(GetMakeDefault, SetMakeDefault)
def __init__(self, uri=None, name=None, address=None, reply_to=None,
make_default=None, *args, **kwargs):
"""Constructs a new EmailSettingsSendAsAlias object with the given
arguments.
Args:
uri: string (optional) The uri of of this object for HTTP requests.
name: string (optional) The name that will appear in the "From" field
for this user.
address: string (optional) The email address that appears as the
origination address for emails sent by this user.
reply_to: string (optional) The address to be used as the reply-to
address in email sent using the alias.
make_default: Boolean (optional) Whether or not this alias should
become the default alias for this user.
args: The other parameters to pass to gdata.entry.GDEntry constructor.
kwargs: The other parameters to pass to gdata.entry.GDEntry constructor.
"""
super(EmailSettingsSendAsAlias, self).__init__(*args, **kwargs)
if uri:
self.uri = uri
if name:
self.name = name
if address:
self.address = address
if reply_to:
self.reply_to = reply_to
if make_default is not None:
self.make_default = str(make_default)
class EmailSettingsWebClip(EmailSettingsEntry):
"""Represents a WebClip in object form."""
def GetEnable(self):
"""Get the Enable value of the WebClip object.
Returns:
The Enable value of this WebClip object as a string or None.
"""
return self._GetProperty(WEBCLIP_ENABLE)
def SetEnable(self, value):
"""Set the Enable value of this WebClip object.
Args:
value: string The new Enable value to give this object.
"""
self._SetProperty(WEBCLIP_ENABLE, value)
enable = pyproperty(GetEnable, SetEnable)
def __init__(self, uri=None, enable=None, *args, **kwargs):
"""Constructs a new EmailSettingsWebClip object with the given arguments.
Args:
uri: string (optional) The uri of of this object for HTTP requests.
enable: Boolean (optional) Whether to enable showing Web clips.
args: The other parameters to pass to gdata.entry.GDEntry constructor.
kwargs: The other parameters to pass to gdata.entry.GDEntry constructor.
"""
super(EmailSettingsWebClip, self).__init__(*args, **kwargs)
if uri:
self.uri = uri
if enable is not None:
self.enable = str(enable)
class EmailSettingsForwarding(EmailSettingsEntry):
"""Represents Forwarding settings in object form."""
def GetEnable(self):
"""Get the Enable value of the Forwarding object.
Returns:
The Enable value of this Forwarding object as a string or None.
"""
return self._GetProperty(FORWARDING_ENABLE)
def SetEnable(self, value):
"""Set the Enable value of this Forwarding object.
Args:
value: string The new Enable value to give this object.
"""
self._SetProperty(FORWARDING_ENABLE, value)
enable = pyproperty(GetEnable, SetEnable)
def GetForwardTo(self):
"""Get the ForwardTo value of the Forwarding object.
Returns:
The ForwardTo value of this Forwarding object as a string or None.
"""
return self._GetProperty(FORWARDING_TO)
def SetForwardTo(self, value):
"""Set the ForwardTo value of this Forwarding object.
Args:
value: string The new ForwardTo value to give this object.
"""
self._SetProperty(FORWARDING_TO, value)
forward_to = pyproperty(GetForwardTo, SetForwardTo)
def GetAction(self):
"""Get the Action value of the Forwarding object.
Returns:
The Action value of this Forwarding object as a string or None.
"""
return self._GetProperty(FORWARDING_ACTION)
def SetAction(self, value):
"""Set the Action value of this Forwarding object.
Args:
value: string The new Action value to give this object.
"""
self._SetProperty(FORWARDING_ACTION, value)
action = pyproperty(GetAction, SetAction)
def __init__(self, uri=None, enable=None, forward_to=None, action=None,
*args, **kwargs):
"""Constructs a new EmailSettingsForwarding object with the given arguments.
Args:
uri: string (optional) The uri of of this object for HTTP requests.
enable: Boolean (optional) Whether to enable incoming email forwarding.
forward_to: string (optional) The address email will be forwarded to.
action: string (optional) The action to perform after forwarding an
email ("KEEP", "ARCHIVE", "DELETE").
args: The other parameters to pass to gdata.entry.GDEntry constructor.
kwargs: The other parameters to pass to gdata.entry.GDEntry constructor.
"""
super(EmailSettingsForwarding, self).__init__(*args, **kwargs)
if uri:
self.uri = uri
if enable is not None:
self.enable = str(enable)
if forward_to:
self.forward_to = forward_to
if action:
self.action = action
class EmailSettingsPop(EmailSettingsEntry):
"""Represents POP settings in object form."""
def GetEnable(self):
"""Get the Enable value of the POP object.
Returns:
The Enable value of this POP object as a string or None.
"""
return self._GetProperty(POP_ENABLE)
def SetEnable(self, value):
"""Set the Enable value of this POP object.
Args:
value: string The new Enable value to give this object.
"""
self._SetProperty(POP_ENABLE, value)
enable = pyproperty(GetEnable, SetEnable)
def GetEnableFor(self):
"""Get the EnableFor value of the POP object.
Returns:
The EnableFor value of this POP object as a string or None.
"""
return self._GetProperty(POP_ENABLE_FOR)
def SetEnableFor(self, value):
"""Set the EnableFor value of this POP object.
Args:
value: string The new EnableFor value to give this object.
"""
self._SetProperty(POP_ENABLE_FOR, value)
enable_for = pyproperty(GetEnableFor, SetEnableFor)
def GetPopAction(self):
"""Get the Action value of the POP object.
Returns:
The Action value of this POP object as a string or None.
"""
return self._GetProperty(POP_ACTION)
def SetPopAction(self, value):
"""Set the Action value of this POP object.
Args:
value: string The new Action value to give this object.
"""
self._SetProperty(POP_ACTION, value)
action = pyproperty(GetPopAction, SetPopAction)
def __init__(self, uri=None, enable=None, enable_for=None,
action=None, *args, **kwargs):
"""Constructs a new EmailSettingsPOP object with the given arguments.
Args:
uri: string (optional) The uri of of this object for HTTP requests.
enable: Boolean (optional) Whether to enable incoming POP3 access.
enable_for: string (optional) Whether to enable POP3 for all mail
("ALL_MAIL"), or mail from now on ("MAIL_FROM_NOW_ON").
action: string (optional) What Google Mail should do with its copy
of the email after it is retrieved using POP
("KEEP", "ARCHIVE", or "DELETE").
args: The other parameters to pass to gdata.entry.GDEntry constructor.
kwargs: The other parameters to pass to gdata.entry.GDEntry constructor.
"""
super(EmailSettingsPop, self).__init__(*args, **kwargs)
if uri:
self.uri = uri
if enable is not None:
self.enable = str(enable)
if enable_for:
self.enable_for = enable_for
if action:
self.action = action
class EmailSettingsImap(EmailSettingsEntry):
"""Represents IMAP settings in object form."""
def GetEnable(self):
"""Get the Enable value of the IMAP object.
Returns:
The Enable value of this IMAP object as a string or None.
"""
return self._GetProperty(IMAP_ENABLE)
def SetEnable(self, value):
"""Set the Enable value of this IMAP object.
Args:
value: string The new Enable value to give this object.
"""
self._SetProperty(IMAP_ENABLE, value)
enable = pyproperty(GetEnable, SetEnable)
def __init__(self, uri=None, enable=None, *args, **kwargs):
"""Constructs a new EmailSettingsImap object with the given arguments.
Args:
uri: string (optional) The uri of of this object for HTTP requests.
enable: Boolean (optional) Whether to enable IMAP access.
args: The other parameters to pass to gdata.entry.GDEntry constructor.
kwargs: The other parameters to pass to gdata.entry.GDEntry constructor.
"""
super(EmailSettingsImap, self).__init__(*args, **kwargs)
if uri:
self.uri = uri
if enable is not None:
self.enable = str(enable)
class EmailSettingsVacationResponder(EmailSettingsEntry):
"""Represents Vacation Responder settings in object form."""
def GetEnable(self):
"""Get the Enable value of the Vacation Responder object.
Returns:
The Enable value of this Vacation Responder object as a string or None.
"""
return self._GetProperty(VACATION_RESPONDER_ENABLE)
def SetEnable(self, value):
"""Set the Enable value of this Vacation Responder object.
Args:
value: string The new Enable value to give this object.
"""
self._SetProperty(VACATION_RESPONDER_ENABLE, value)
enable = pyproperty(GetEnable, SetEnable)
def GetSubject(self):
"""Get the Subject value of the Vacation Responder object.
Returns:
The Subject value of this Vacation Responder object as a string or None.
"""
return self._GetProperty(VACATION_RESPONDER_SUBJECT)
def SetSubject(self, value):
"""Set the Subject value of this Vacation Responder object.
Args:
value: string The new Subject value to give this object.
"""
self._SetProperty(VACATION_RESPONDER_SUBJECT, value)
subject = pyproperty(GetSubject, SetSubject)
def GetMessage(self):
"""Get the Message value of the Vacation Responder object.
Returns:
The Message value of this Vacation Responder object as a string or None.
"""
return self._GetProperty(VACATION_RESPONDER_MESSAGE)
def SetMessage(self, value):
"""Set the Message value of this Vacation Responder object.
Args:
value: string The new Message value to give this object.
"""
self._SetProperty(VACATION_RESPONDER_MESSAGE, value)
message = pyproperty(GetMessage, SetMessage)
def GetContactsOnly(self):
"""Get the ContactsOnly value of the Vacation Responder object.
Returns:
The ContactsOnly value of this Vacation Responder object as a
string or None.
"""
return self._GetProperty(VACATION_RESPONDER_ENABLE)
def SetContactsOnly(self, value):
"""Set the ContactsOnly value of this Vacation Responder object.
Args:
value: string The new ContactsOnly value to give this object.
"""
self._SetProperty(VACATION_RESPONDER_CONTACTS_ONLY, value)
contacts_only = pyproperty(GetContactsOnly, SetContactsOnly)
def __init__(self, uri=None, enable=None, subject=None,
message=None, contacts_only=None, *args, **kwargs):
"""Constructs a new EmailSettingsVacationResponder object with the
given arguments.
Args:
uri: string (optional) The uri of of this object for HTTP requests.
enable: Boolean (optional) Whether to enable the vacation responder.
subject: string (optional) The subject line of the vacation responder
autoresponse.
message: string (optional) The message body of the vacation responder
autoresponse.
contacts_only: Boolean (optional) Whether to only send autoresponses
to known contacts.
args: The other parameters to pass to gdata.entry.GDEntry constructor.
kwargs: The other parameters to pass to gdata.entry.GDEntry constructor.
"""
super(EmailSettingsVacationResponder, self).__init__(*args, **kwargs)
if uri:
self.uri = uri
if enable is not None:
self.enable = str(enable)
if subject:
self.subject = subject
if message:
self.message = message
if contacts_only is not None:
self.contacts_only = str(contacts_only)
class EmailSettingsSignature(EmailSettingsEntry):
"""Represents a Signature in object form."""
def GetValue(self):
"""Get the value of the Signature object.
Returns:
The value of this Signature object as a string or None.
"""
value = self._GetProperty(SIGNATURE_VALUE)
if value == ' ': # hack to support empty signature
return ''
else:
return value
def SetValue(self, value):
"""Set the name of this Signature object.
Args:
value: string The new signature value to give this object.
"""
if value == '': # hack to support empty signature
value = ' '
self._SetProperty(SIGNATURE_VALUE, value)
signature_value = pyproperty(GetValue, SetValue)
def __init__(self, uri=None, signature=None, *args, **kwargs):
"""Constructs a new EmailSettingsSignature object with the given arguments.
Args:
uri: string (optional) The uri of of this object for HTTP requests.
signature: string (optional) The signature to be appended to outgoing
messages.
args: The other parameters to pass to gdata.entry.GDEntry constructor.
kwargs: The other parameters to pass to gdata.entry.GDEntry constructor.
"""
super(EmailSettingsSignature, self).__init__(*args, **kwargs)
if uri:
self.uri = uri
if signature is not None:
self.signature_value = signature
class EmailSettingsLanguage(EmailSettingsEntry):
"""Represents Language Settings in object form."""
def GetLanguage(self):
"""Get the tag of the Language object.
Returns:
The tag of this Language object as a string or None.
"""
return self._GetProperty(LANGUAGE_TAG)
def SetLanguage(self, value):
"""Set the tag of this Language object.
Args:
value: string The new tag value to give this object.
"""
self._SetProperty(LANGUAGE_TAG, value)
language_tag = pyproperty(GetLanguage, SetLanguage)
def __init__(self, uri=None, language=None, *args, **kwargs):
"""Constructs a new EmailSettingsLanguage object with the given arguments.
Args:
uri: string (optional) The uri of of this object for HTTP requests.
language: string (optional) The language tag for Google Mail's display
language.
args: The other parameters to pass to gdata.entry.GDEntry constructor.
kwargs: The other parameters to pass to gdata.entry.GDEntry constructor.
"""
super(EmailSettingsLanguage, self).__init__(*args, **kwargs)
if uri:
self.uri = uri
if language:
self.language_tag = language
class EmailSettingsGeneral(EmailSettingsEntry):
"""Represents General Settings in object form."""
def GetPageSize(self):
"""Get the Page Size value of the General Settings object.
Returns:
The Page Size value of this General Settings object as a string or None.
"""
return self._GetProperty(GENERAL_PAGE_SIZE)
def SetPageSize(self, value):
"""Set the Page Size value of this General Settings object.
Args:
value: string The new Page Size value to give this object.
"""
self._SetProperty(GENERAL_PAGE_SIZE, value)
page_size = pyproperty(GetPageSize, SetPageSize)
def GetShortcuts(self):
"""Get the Shortcuts value of the General Settings object.
Returns:
The Shortcuts value of this General Settings object as a string or None.
"""
return self._GetProperty(GENERAL_SHORTCUTS)
def SetShortcuts(self, value):
"""Set the Shortcuts value of this General Settings object.
Args:
value: string The new Shortcuts value to give this object.
"""
self._SetProperty(GENERAL_SHORTCUTS, value)
shortcuts = pyproperty(GetShortcuts, SetShortcuts)
def GetArrows(self):
"""Get the Arrows value of the General Settings object.
Returns:
The Arrows value of this General Settings object as a string or None.
"""
return self._GetProperty(GENERAL_ARROWS)
def SetArrows(self, value):
"""Set the Arrows value of this General Settings object.
Args:
value: string The new Arrows value to give this object.
"""
self._SetProperty(GENERAL_ARROWS, value)
arrows = pyproperty(GetArrows, SetArrows)
def GetSnippets(self):
"""Get the Snippets value of the General Settings object.
Returns:
The Snippets value of this General Settings object as a string or None.
"""
return self._GetProperty(GENERAL_SNIPPETS)
def SetSnippets(self, value):
"""Set the Snippets value of this General Settings object.
Args:
value: string The new Snippets value to give this object.
"""
self._SetProperty(GENERAL_SNIPPETS, value)
snippets = pyproperty(GetSnippets, SetSnippets)
def GetUnicode(self):
"""Get the Unicode value of the General Settings object.
Returns:
The Unicode value of this General Settings object as a string or None.
"""
return self._GetProperty(GENERAL_UNICODE)
def SetUnicode(self, value):
"""Set the Unicode value of this General Settings object.
Args:
value: string The new Unicode value to give this object.
"""
self._SetProperty(GENERAL_UNICODE, value)
use_unicode = pyproperty(GetUnicode, SetUnicode)
def __init__(self, uri=None, page_size=None, shortcuts=None,
arrows=None, snippets=None, use_unicode=None, *args, **kwargs):
"""Constructs a new EmailSettingsGeneral object with the given arguments.
Args:
uri: string (optional) The uri of of this object for HTTP requests.
page_size: int (optional) The number of conversations to be shown per page.
shortcuts: Boolean (optional) Whether to enable keyboard shortcuts.
arrows: Boolean (optional) Whether to display arrow-shaped personal
indicators next to email sent specifically to the user.
snippets: Boolean (optional) Whether to display snippets of the messages
in the inbox and when searching.
use_unicode: Boolean (optional) Whether to use UTF-8 (unicode) encoding
for all outgoing messages.
args: The other parameters to pass to gdata.entry.GDEntry constructor.
kwargs: The other parameters to pass to gdata.entry.GDEntry constructor.
"""
super(EmailSettingsGeneral, self).__init__(*args, **kwargs)
if uri:
self.uri = uri
if page_size is not None:
self.page_size = str(page_size)
if shortcuts is not None:
self.shortcuts = str(shortcuts)
if arrows is not None:
self.arrows = str(arrows)
if snippets is not None:
self.snippets = str(snippets)
if use_unicode is not None:
self.use_unicode = str(use_unicode)
| 30.072502 | 84 | 0.703193 |
f753cc0757c9c46acd73bd5eb278b702035d8dd5 | 5,842 | py | Python | test/test_sliprules.py | lynnmunday/neml | 2c0e3db9f849345dba01d64fc8488e2b97e477dd | [
"MIT"
] | null | null | null | test/test_sliprules.py | lynnmunday/neml | 2c0e3db9f849345dba01d64fc8488e2b97e477dd | [
"MIT"
] | null | null | null | test/test_sliprules.py | lynnmunday/neml | 2c0e3db9f849345dba01d64fc8488e2b97e477dd | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from neml import history, interpolate
from neml.math import tensors, rotations
from neml.cp import crystallography, slipharden, sliprules
from common import differentiate
from nicediff import *
import unittest
import numpy as np
import numpy.linalg as la
class CommonSlipRule(object):
def test_d_slip_d_stress(self):
for g in range(self.L.ngroup):
for i in range(self.L.nslip(g)):
d = self.model.d_slip_d_s(g, i, self.S, self.Q, self.H, self.L, self.T, self.fixed)
nd = diff_scalar_symmetric(lambda s: self.model.slip(g, i, s, self.Q, self.H,
self.L, self.T, self.fixed), self.S)
self.assertEqual(d, nd)
def test_d_slip_d_hist(self):
for g in range(self.L.ngroup):
for i in range(self.L.nslip(g)):
d = np.array(self.model.d_slip_d_h(g, i, self.S, self.Q, self.H, self.L, self.T, self.fixed))
nd = np.array(diff_history_scalar(lambda h: self.model.slip(g, i, self.S, self.Q, h,
self.L, self.T, self.fixed), self.H))
self.assertTrue(np.allclose(nd.reshape(d.shape), d))
def test_d_hist_rate_d_stress(self):
d = np.array(self.model.d_hist_rate_d_stress(self.S, self.Q, self.H, self.L, self.T, self.fixed))
nd = diff_history_symmetric(lambda s: self.model.hist_rate(s, self.Q, self.H, self.L,
self.T, self.fixed), self.S)
self.assertTrue(np.allclose(nd.reshape(d.shape), d))
def test_d_hist_rate_d_hist(self):
d = np.array(self.model.d_hist_rate_d_hist(self.S, self.Q, self.H, self.L, self.T, self.fixed))
nd = diff_history_history(lambda h: self.model.hist_rate(self.S, self.Q, h, self.L,
self.T, self.fixed), self.H)
self.assertTrue(np.allclose(nd.reshape(d.shape), d))
class CommonSlipStrengthSlipRule(object):
def test_init_hist(self):
H1 = history.History()
self.model.populate_history(H1)
self.model.init_history(H1)
H2 = history.History()
self.strengthmodel.populate_history(H2)
self.strengthmodel.init_history(H2)
self.assertTrue(np.allclose(np.array(H1),
np.array(H2)))
def test_slip(self):
for g in range(self.L.ngroup):
for i in range(self.L.nslip(g)):
rs = self.L.shear(g, i, self.Q, self.S)
strength = self.strength + self.static
self.assertTrue(np.isclose(self.model.slip(g, i, self.S, self.Q, self.H, self.L, self.T, self.fixed),
self.model.sslip(g, i, rs, strength, self.T)))
def test_d_hist_rate(self):
self.assertTrue(np.allclose(
np.array(self.model.hist_rate(self.S, self.Q, self.H, self.L, self.T, self.fixed)),
np.array(self.strengthmodel.hist(self.S, self.Q, self.H, self.L, self.T, self.model, self.fixed))))
def test_d_sslip_d_tau(self):
for g in range(self.L.ngroup):
for i in range(self.L.nslip(g)):
nd = differentiate(lambda t: self.model.sslip(g, i, t, self.strength, self.T),
self.tau)
d = self.model.d_sslip_dtau(g, i, self.tau, self.strength, self.T)
self.assertTrue(np.isclose(nd,d))
def test_d_sslip_d_strength(self):
for g in range(self.L.ngroup):
for i in range(self.L.nslip(g)):
nd = differentiate(lambda s: self.model.sslip(g, i, self.tau, s, self.T), self.strength)
d = self.model.d_sslip_dstrength(g, i, self.tau, self.strength, self.T)
print(nd)
print(d)
self.assertTrue(np.isclose(nd, d))
class TestPowerLawSlip(unittest.TestCase, CommonSlipStrengthSlipRule, CommonSlipRule):
def setUp(self):
self.L = crystallography.CubicLattice(1.0)
self.L.add_slip_system([1,1,0],[1,1,1])
self.Q = rotations.Orientation(35.0,17.0,14.0, angle_type = "degrees")
self.S = tensors.Symmetric(np.array([
[100.0,-25.0,10.0],
[-25.0,-17.0,15.0],
[10.0, 15.0,35.0]]))
self.strength = 35.0
self.H = history.History()
self.H.add_scalar("strength")
self.H.set_scalar("strength", self.strength)
self.T = 300.0
self.tau0 = 10.0
self.tau_sat = 50.0
self.b = 2.5
self.strengthmodel = slipharden.VoceSlipHardening(self.tau_sat, self.b, self.tau0)
self.static = self.tau0
self.g0 = 1.0
self.n = 3.0
self.model = sliprules.PowerLawSlipRule(self.strengthmodel, self.g0, self.n)
self.tau = 33.0
self.fixed = history.History()
def test_scalar_rate(self):
for g in range(self.L.ngroup):
for i in range(self.L.nslip(g)):
self.assertTrue(np.isclose(self.model.sslip(g, i, self.tau, self.strength, self.T),
self.g0 * np.abs(self.tau/self.strength)**(self.n-1.0) * self.tau/self.strength))
class TestBiVoceSlip(unittest.TestCase, CommonSlipStrengthSlipRule, CommonSlipRule):
def setUp(self):
self.L = crystallography.CubicLattice(1.0)
self.L.add_slip_system([1,1,0],[1,1,1])
self.Q = rotations.Orientation(35.0,17.0,14.0, angle_type = "degrees")
self.S = tensors.Symmetric(np.array([
[100.0,-25.0,10.0],
[-25.0,-17.0,15.0],
[10.0, 15.0,35.0]]))
self.strength_1 = 35.0
self.strength_2 = 25.0
self.strength = self.strength_1 + self.strength_2
self.H = history.History()
self.H.add_scalar("strength0")
self.H.set_scalar("strength0", self.strength_1)
self.H.add_scalar("strength1")
self.H.set_scalar("strength1", self.strength_2)
self.T = 300.0
self.tau0 = 10.0
self.tau_sat = 50.0
self.b = 2.5
self.strengthmodel = slipharden.SumSlipSingleStrengthHardening(
[slipharden.VoceSlipHardening(self.tau_sat, self.b, self.tau0),
slipharden.VoceSlipHardening(self.tau_sat/2, self.b/2, self.tau0/2)])
self.static = self.tau0 + self.tau0 / 2
self.g0 = 1.0
self.n = 3.0
self.model = sliprules.PowerLawSlipRule(self.strengthmodel, self.g0, self.n)
self.tau = 33.0
self.fixed = history.History()
| 35.621951 | 109 | 0.657823 |
f753d4a3851fb7cad5ea563490d94024d0061b05 | 80,496 | py | Python | Kernel.py | jaredly/meerk40t | 446427e29104cb89fd2ee17ad824fc801d44afe0 | [
"MIT"
] | null | null | null | Kernel.py | jaredly/meerk40t | 446427e29104cb89fd2ee17ad824fc801d44afe0 | [
"MIT"
] | null | null | null | Kernel.py | jaredly/meerk40t | 446427e29104cb89fd2ee17ad824fc801d44afe0 | [
"MIT"
] | null | null | null | import time
from threading import Thread, Lock
from LaserOperation import *
from svgelements import Path, SVGText
STATE_UNKNOWN = -1
STATE_INITIALIZE = 0
STATE_IDLE = 1
STATE_ACTIVE = 2
STATE_BUSY = 3
STATE_PAUSE = 4
STATE_END = 5
STATE_TERMINATE = 10
INTERPRETER_STATE_RAPID = 0
INTERPRETER_STATE_FINISH = 1
INTERPRETER_STATE_PROGRAM = 2
class Module:
"""
Modules are a generic lifecycle object. When attached they are initialized to that device. When that device
is shutdown, the shutdown() event is called. This permits the knowing registering and unregistering of
other kernel objects. and the attachment of the module to a device.
Registered Modules are notified of the activation and deactivation of their device.
Modules can also be scheduled in the kernel to run at a particular time and a given number of times.
"""
def __init__(self, name=None, device=None, process=None, args=(), interval=1.0, times=None):
self.name = name
self.device = device
self.interval = interval
self.last_run = None
self.next_run = time.time() + self.interval
self.process = process
self.args = args
self.times = times
self.paused = False
self.executing = False
@property
def scheduled(self):
return self.next_run is not None and time.time() >= self.next_run
def cancel(self):
self.times = -1
def schedule(self):
if self not in self.device.jobs:
self.device.jobs.append(self)
def unschedule(self):
if self in self.device.jobs:
self.device.jobs.remove(self)
def attach(self, device, name=None):
self.device = device
self.name = name
self.initialize()
def detach(self, device, channel=None):
try:
if self.name is not None and self.name in device.instances['module']:
del device.instances['module'][self.name]
except KeyError:
pass
self.shutdown(channel)
self.device = None
def initialize(self):
pass
def shutdown(self, channel):
pass
class Spooler(Module):
"""
The spooler module stores spoolable lasercode events, as a synchronous queue.
Spooler registers itself as the device.spooler object and provides a standard location to send data to an unknown device.
* Peek()
* Pop()
* Send_Job()
* Clear_Queue()
"""
def __init__(self):
Module.__init__(self)
self.queue_lock = Lock()
self._queue = []
def __repr__(self):
return "Spooler()"
def attach(self, device, name=None):
Module.attach(self, device, name)
self.device.spooler = self
self.initialize()
def peek(self):
if len(self._queue) == 0:
return None
return self._queue[0]
def pop(self):
if len(self._queue) == 0:
return None
self.queue_lock.acquire(True)
queue_head = self._queue[0]
del self._queue[0]
self.queue_lock.release()
self.device.signal('spooler;queue', len(self._queue))
return queue_head
def job(self, *job):
"""
Send a single job event with parameters as needed.
The job can be a single command with (COMMAND_MOVE 20 20) or without parameters (COMMAND_HOME), or a generator
which can yield many lasercode commands.
:param job: job to send to the spooler.
:return:
"""
self.queue_lock.acquire(True)
if len(job) == 1:
self._queue.extend(job)
else:
self._queue.append(job)
self.queue_lock.release()
self.device.signal('spooler;queue', len(self._queue))
def jobs(self, jobs):
"""
Send several jobs generators to be appended to the end of the queue.
The jobs parameter must be suitable to be .extended to the end of the queue list.
:param jobs: jobs to extend
:return:
"""
self.queue_lock.acquire(True)
if isinstance(jobs, (list, tuple)):
self._queue.extend(jobs)
else:
self._queue.append(jobs)
self.queue_lock.release()
self.device.signal('spooler;queue', len(self._queue))
def job_if_idle(self, element):
if len(self._queue) == 0:
self.job(element)
return True
else:
return False
def clear_queue(self):
self.queue_lock.acquire(True)
self._queue = []
self.queue_lock.release()
self.device.signal('spooler;queue', len(self._queue))
def remove(self, element):
self.queue_lock.acquire(True)
self._queue.remove(element)
self.queue_lock.release()
self.device.signal('spooler;queue', len(self._queue))
class Interpreter(Module):
"""
An Interpreter Module takes spoolable commands and turns those commands into states and code in a language
agnostic fashion. This is intended to be overridden by a subclass or class with the required methods.
Interpreters register themselves as device.interpreter objects.
Interpreters expect the device.spooler object exists to provide spooled commands as needed.
These modules function to interpret hardware specific backend information from the reusable spoolers and server
objects that may also be common within devices.
"""
def __init__(self, pipe=None):
Module.__init__(self)
self.process_item = None
self.spooled_item = None
self.process = self.process_spool
self.interval = 0.01
self.pipe = pipe
self.extra_hold = None
self.state = INTERPRETER_STATE_RAPID
self.pulse_total = 0.0
self.pulse_modulation = True
self.properties = 0
self.is_relative = False
self.laser = False
self.laser_enabled = True
self.raster_step = 0
self.overscan = 20
self.speed = 30
self.power = 1000.0
self.d_ratio = None # None means to use speedcode default.
self.acceleration = None # None means to use speedcode default
def attach(self, device, name=None):
Module.attach(self, device, name)
self.device.interpreter = self
self.device.setting(int, 'current_x', 0)
self.device.setting(int, 'current_y', 0)
self.initialize()
self.schedule()
def process_spool(self, *args):
"""
Get next spooled element if needed.
Calls execute.
:param args:
:return:
"""
if self.spooled_item is None:
self.fetch_next_item()
if self.spooled_item is not None:
self.execute()
def execute(self):
"""
Default process to run entire command as a single call.
"""
if self.hold():
return
if self.spooled_item is None:
return
if isinstance(self.spooled_item, tuple):
self.command(self.spooled_item[0], *self.spooled_item[1:])
self.spooled_item = None
return
try:
e = next(self.spooled_item)
if isinstance(e, int):
self.command(e)
else:
self.command(e[0], *e[1:])
except StopIteration:
self.spooled_item = None
def fetch_next_item(self):
element = self.device.spooler.peek()
if element is None:
return # Spooler is empty.
self.device.spooler.pop()
if isinstance(element, int):
self.spooled_item = (element,)
elif isinstance(element, tuple):
self.spooled_item = element
else:
try:
self.spooled_item = element.generate()
except AttributeError:
self.spooled_item = element()
def command(self, command, *values):
"""Commands are middle language LaserCommandConstants there values are given."""
try:
if command == COMMAND_LASER_OFF:
self.laser_off()
elif command == COMMAND_LASER_ON:
self.laser_on()
elif command == COMMAND_LASER_DISABLE:
self.laser_disable()
elif command == COMMAND_LASER_ENABLE:
self.laser_enable()
elif command == COMMAND_CUT:
x, y = values
self.cut(x, y)
elif command == COMMAND_MOVE:
x, y = values
self.move(x, y)
elif command == COMMAND_HOME:
self.home()
elif command == COMMAND_LOCK:
self.lock_rail()
elif command == COMMAND_UNLOCK:
self.unlock_rail()
elif command == COMMAND_PLOT:
self.plot_path(values[0])
elif command == COMMAND_RASTER:
self.plot_raster(values[0])
elif command == COMMAND_SET_SPEED:
self.set_speed(values[0])
elif command == COMMAND_SET_POWER:
self.set_power(values[0])
elif command == COMMAND_SET_PPI:
self.set_ppi(values[0])
elif command == COMMAND_SET_PWM:
self.set_pwm(values[0])
elif command == COMMAND_SET_STEP:
self.set_step(values[0])
elif command == COMMAND_SET_OVERSCAN:
self.set_overscan(values[0])
elif command == COMMAND_SET_ACCELERATION:
self.set_acceleration(values[0])
elif command == COMMAND_SET_D_RATIO:
self.set_d_ratio(values[0])
elif command == COMMAND_SET_DIRECTION:
self.set_directions(values[0], values[1], values[2], values[3])
elif command == COMMAND_SET_INCREMENTAL:
self.set_incremental()
elif command == COMMAND_SET_ABSOLUTE:
self.set_absolute()
elif command == COMMAND_SET_POSITION:
self.set_position(values[0], values[1])
elif command == COMMAND_MODE_RAPID:
self.ensure_rapid_mode()
elif command == COMMAND_MODE_PROGRAM:
self.ensure_program_mode()
elif command == COMMAND_MODE_FINISHED:
self.ensure_finished_mode()
elif command == COMMAND_WAIT:
self.wait(values[0])
elif command == COMMAND_WAIT_FINISH:
self.wait_finish()
elif command == COMMAND_BEEP:
print('\a') # Beep.
elif command == COMMAND_FUNCTION:
if len(values) >= 1:
t = values[0]
if callable(t):
t()
elif command == COMMAND_SIGNAL:
if isinstance(values, str):
self.device.signal(values, None)
elif len(values) >= 2:
self.device.signal(values[0], *values[1:])
except AttributeError:
pass
def realtime_command(self, command, *values):
"""Asks for the execution of a realtime command. Unlike the spooled commands these
return False if rejected and something else if able to be performed. These will not
be queued. If rejected. They must be performed in realtime or cancelled.
"""
try:
if command == REALTIME_PAUSE:
self.pause()
elif command == REALTIME_RESUME:
self.resume()
elif command == REALTIME_RESET:
self.reset()
elif command == REALTIME_STATUS:
self.status()
elif command == REALTIME_SAFETY_DOOR:
self.safety_door()
elif command == REALTIME_JOG_CANCEL:
self.jog_cancel(*values)
elif command == REALTIME_SPEED_PERCENT:
self.realtime_speed_percent(*values)
elif command == REALTIME_SPEED:
self.realtime_speed(*values)
elif command == REALTIME_RAPID_PERCENT:
self.realtime_rapid_percent(*values)
elif command == REALTIME_RAPID:
self.realtime_rapid(*values)
elif command == REALTIME_POWER_PERCENT:
self.realtime_power_percent(*values)
elif command == REALTIME_POWER:
self.realtime_power(*values)
elif command == REALTIME_OVERSCAN:
self.realtime_overscan(*values)
elif command == REALTIME_LASER_DISABLE:
self.realtime_laser_disable(*values)
elif command == REALTIME_LASER_ENABLE:
self.realtime_laser_enable(*values)
elif command == REALTIME_FLOOD_COOLANT:
self.realtime_flood_coolant(*values)
elif command == REALTIME_MIST_COOLANT:
self.realtime_mist_coolant(*values)
except AttributeError:
pass # Method doesn't exist.
def hold(self):
if self.extra_hold is not None:
if self.extra_hold():
return True
else:
self.extra_hold = None
return False
def laser_off(self, *values):
self.laser = False
def laser_on(self, *values):
self.laser = True
def laser_disable(self, *values):
self.laser_enabled = False
def laser_enable(self, *values):
self.laser_enabled = True
def move(self, x, y):
self.device.current_x = x
self.device.current_y = y
def cut(self, x, y):
self.device.current_x = x
self.device.current_y = y
def home(self, *values):
self.device.current_x = 0
self.device.current_y = 0
def ensure_rapid_mode(self, *values):
if self.state == INTERPRETER_STATE_RAPID:
return
self.state = INTERPRETER_STATE_RAPID
self.device.signal('interpreter;mode', self.state)
def ensure_finished_mode(self, *values):
if self.state == INTERPRETER_STATE_FINISH:
return
self.state = INTERPRETER_STATE_FINISH
self.device.signal('interpreter;mode', self.state)
def ensure_program_mode(self, *values):
if self.state == INTERPRETER_STATE_PROGRAM:
return
self.state = INTERPRETER_STATE_PROGRAM
self.device.signal('interpreter;mode', self.state)
def set_speed(self, speed=None):
self.speed = speed
def set_power(self, power=1000.0):
self.power = power
def set_ppi(self, power=1000.0):
self.power = power
def set_pwm(self, power=1000.0):
self.power = power
def set_d_ratio(self, d_ratio=None):
self.d_ratio = d_ratio
def set_acceleration(self, accel=None):
self.acceleration = accel
def set_step(self, step=None):
self.raster_step = step
def set_overscan(self, overscan=None):
self.overscan = overscan
def set_incremental(self, *values):
self.is_relative = True
def set_absolute(self, *values):
self.is_relative = False
def set_position(self, x, y):
self.device.current_x = x
self.device.current_y = y
def wait(self, t):
self.next_run = t
def wait_finish(self, *values):
"""Adds an additional holding requirement if the pipe has any data."""
self.extra_hold = lambda: len(self.pipe) != 0
def reset(self):
self.spooled_item = None
self.device.spooler.clear_queue()
self.spooled_item = None
self.extra_hold = None
def status(self):
parts = list()
parts.append("x=%f" % self.device.current_x)
parts.append("y=%f" % self.device.current_y)
parts.append("speed=%f" % self.speed)
parts.append("power=%d" % self.power)
status = ";".join(parts)
self.device.signal('interpreter;status', status)
def set_prop(self, mask):
self.properties |= mask
def unset_prop(self, mask):
self.properties &= ~mask
def is_prop(self, mask):
return bool(self.properties & mask)
def toggle_prop(self, mask):
if self.is_prop(mask):
self.unset_prop(mask)
else:
self.set_prop(mask)
class Pipe:
"""
Pipes are a generic file-like object with write commands and a realtime_write function.
The realtime_write function should exist, but code using pipes should do so in a try block. Excepting
the AttributeError if it doesn't exist. So that pipes are able to be exchanged for real file-like objects.
Buffer size general information is provided through len() builtin.
"""
def __len__(self):
return 0
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def open(self):
pass
def close(self):
pass
def write(self, bytes_to_write):
pass
def realtime_write(self, bytes_to_write):
"""
This method shall be permitted to not exist.
To facilitate pipes being easily replaced with filelike objects, any calls
to this method should assume pipe may not have this command.
"""
self.write(bytes_to_write)
class Effect:
"""
Effects are intended to be external program modifications of the data.
None of these are implemented yet.
The select is a selections string for the exporting element selection.
The save is the export file to use.
The path refers to the external program.
The command is the command to call the path with.
The load is the file expected to exist when the execution finishes.
"""
def __init__(self, select, save, path, command, load):
self.select = select
self.save = save
self.path = path
self.command = command
self.load = load
class Modification:
"""
Modifications are intended to be lazy implemented changes to SVGElement objects and groups. The intent is to
provide a method for delayed modifications of data.
Modifications are functions called on single SVGElement objects.
Type Input is the input type kind of element this is intended to act upon.
Type Output is the output type of the element this is intended to produce.
"""
def __init__(self, input_type, output_type):
self.input_type = input_type
self.output_type = output_type
class Signaler(Module):
"""
Signaler provides the signals functionality for a device. It replaces the functions for .signal(), .listen(),
.unlisten(), .last_signal().
"""
def __init__(self):
Module.__init__(self)
self.listeners = {}
self.adding_listeners = []
self.removing_listeners = []
self.last_message = {}
self.queue_lock = Lock()
self.message_queue = {}
self._is_queue_processing = False
self.process = self.delegate_messages
self.interval = 0.05
self.args = ()
def attach(self, device, name=None):
Module.attach(self, device, name)
self.device.signal = self.signal
self.device.listen = self.listen
self.device.unlisten = self.unlisten
self.device.last_signal = self.last_signal
self.schedule()
def shutdown(self, channel):
_ = self.device.device_root.translation
for key, listener in self.listeners.items():
if len(listener):
channel(_("WARNING: Listener '%s' still registered to %s.") % (key, str(listener)))
self.last_message = {}
self.listeners = {}
# Signal processing.
def signal(self, code, *message):
"""
Signals add the latest message to the message queue.
:param code: Signal code
:param message: Message to send.
"""
self.queue_lock.acquire(True)
self.message_queue[code] = message
self.queue_lock.release()
def delegate_messages(self):
"""
Delegate the process queue to the run_later thread.
run_later should be a threading instance wherein all signals are delivered.
"""
if self._is_queue_processing:
return
if self.device.run_later is not None:
self.device.run_later(self.process_queue, None)
else:
self.process_queue(None)
def process_queue(self, *args):
"""
Performed in the run_later thread. Signal groups. Threadsafe.
Process the signals queued up. Inserting any attaching listeners, removing any removing listeners. And
providing the newly attached listeners the last message known from that signal.
:param args: None
:return:
"""
if len(self.message_queue) == 0 and len(self.adding_listeners) == 0 and len(self.removing_listeners) == 0:
return
self._is_queue_processing = True
add = None
remove = None
self.queue_lock.acquire(True)
queue = self.message_queue
if len(self.adding_listeners) != 0:
add = self.adding_listeners
self.adding_listeners = []
if len(self.removing_listeners):
remove = self.removing_listeners
self.removing_listeners = []
self.message_queue = {}
self.queue_lock.release()
if add is not None:
for signal, funct in add:
if signal in self.listeners:
listeners = self.listeners[signal]
listeners.append(funct)
else:
self.listeners[signal] = [funct]
if signal in self.last_message:
last_message = self.last_message[signal]
funct(*last_message)
if remove is not None:
for signal, funct in remove:
if signal in self.listeners:
listeners = self.listeners[signal]
try:
listeners.remove(funct)
except ValueError:
print("Value error removing: %s %s" % (str(listeners), signal))
for code, message in queue.items():
if code in self.listeners:
listeners = self.listeners[code]
for listener in listeners:
listener(*message)
self.last_message[code] = message
self._is_queue_processing = False
def last_signal(self, code):
"""
Queries the last signal for a particular code.
:param code: code to query.
:return: Last signal sent through the kernel for that code.
"""
try:
return self.last_message[code]
except KeyError:
return None
def listen(self, signal, funct):
self.queue_lock.acquire(True)
self.adding_listeners.append((signal, funct))
self.queue_lock.release()
def unlisten(self, signal, funct):
self.queue_lock.acquire(True)
self.removing_listeners.append((signal, funct))
self.queue_lock.release()
class Elemental(Module):
"""
The elemental module is governs all the interactions with the various elements,
operations, and filenodes. Handling structure change and selection, emphasis, and
highlighting changes. The goal of this module is to make sure that the life cycle
of the elements is strictly enforced. For example, every element that is removed
must have had the .cache deleted. And anything selecting an element must propagate
that information out to inform other interested modules.
"""
def __init__(self):
Module.__init__(self)
self._operations = list()
self._elements = list()
self._filenodes = {}
self._bounds = None
def attach(self, device, name=None):
Module.attach(self, device, name)
self.device.elements = self
self.device.classify = self.classify
self.device.save = self.save
self.device.save_types = self.save_types
self.device.load = self.load
self.device.load_types = self.load_types
def register(self, obj):
obj.cache = None
obj.icon = None
obj.bounds = None
obj.last_transform = None
obj.selected = False
obj.emphasized = False
obj.highlighted = False
def select():
obj.selected = True
self.device.signal('selected', obj)
def unselect():
obj.selected = False
self.device.signal('selected', obj)
def highlight():
obj.highlighted = True
self.device.signal('highlighted', obj)
def unhighlight():
obj.highlighted = False
self.device.signal('highlighted', obj)
def emphasize():
obj.emphasized = True
self.device.signal('emphasized', obj)
self.validate_bounds()
def unemphasize():
obj.emphasized = False
self.device.signal('emphasized', obj)
self.validate_bounds()
def modified():
"""
The matrix transformation was changed.
"""
obj.bounds = None
self._bounds = None
self.validate_bounds()
self.device.signal('modified', obj)
def altered():
"""
The data structure was changed.
"""
try:
obj.cache.UnGetNativePath(obj.cache.NativePath)
except AttributeError:
pass
del obj.cache
obj.cache = None
del obj.icon
obj.icon = None
obj.bounds = None
self._bounds = None
self.validate_bounds()
self.device.signal('altered', obj)
obj.select = select
obj.unselect = unselect
obj.highlight = highlight
obj.unhighlight = unhighlight
obj.emphasize = emphasize
obj.unemphasize = unemphasize
obj.modified = modified
obj.altered = altered
def unregister(self, e):
try:
e.cache.UngetNativePath(e.cache.NativePath)
except AttributeError:
pass
try:
del e.cache
except AttributeError:
pass
try:
del e.icon
except AttributeError:
pass
try:
e.unselect()
e.unemphasize()
e.unhighlight()
e.modified()
except AttributeError:
pass
def items(self, **kwargs):
def combined(*args):
for listv in args:
for itemv in listv:
yield itemv
for j in combined(self.ops(**kwargs), self.elems(**kwargs)):
yield j
def _filtered_list(self, item_list, **kwargs):
"""
Filters a list of items with selected, emphasized, and highlighted.
False values means find where that parameter is false.
True values means find where that parameter is true.
If the filter does not exist then it isn't used to filter that data.
Items which are set to None are skipped.
:param item_list:
:param kwargs:
:return:
"""
s = 'selected' in kwargs
if s:
s = kwargs['selected']
else:
s = None
e = 'emphasized' in kwargs
if e:
e = kwargs['emphasized']
else:
e = None
h = 'highlighted' in kwargs
if h:
h = kwargs['highlighted']
else:
h = None
for obj in item_list:
if obj is None:
continue
if s is not None and s != obj.selected:
continue
if e is not None and e != obj.emphasized:
continue
if h is not None and s != obj.highlighted:
continue
yield obj
def ops(self, **kwargs):
for item in self._filtered_list(self._operations, **kwargs):
yield item
def elems(self, **kwargs):
for item in self._filtered_list(self._elements, **kwargs):
yield item
def first_element(self, **kwargs):
for e in self.elems(**kwargs):
return e
return None
def has_emphasis(self):
return self.first_element(emphasized=True) is not None
def count_elems(self, **kwargs):
return len(list(self.elems(**kwargs)))
def count_op(self, **kwargs):
return len(list(self.ops(**kwargs)))
def get_op(self, index, **kwargs):
for i, op in enumerate(self.ops(**kwargs)):
if i == index:
return op
raise IndexError
def get_elem(self, index, **kwargs):
for i, elem in enumerate(self.elems(**kwargs)):
if i == index:
return elem
raise IndexError
def add_op(self, op):
self._operations.append(op)
self.register(op)
self.device.signal('operation_added', op)
def add_ops(self, adding_ops):
self._operations.extend(adding_ops)
for op in adding_ops:
self.register(op)
self.device.signal('operation_added', adding_ops)
def add_elem(self, element):
self._elements.append(element)
self.register(element)
self.device.signal('element_added', element)
def add_elems(self, adding_elements):
self._elements.extend(adding_elements)
for element in adding_elements:
self.register(element)
self.device.signal('element_added', adding_elements)
def files(self):
return self._filenodes
def clear_operations(self):
for op in self._operations:
self.unregister(op)
self.device.signal('operation_removed', op)
self._operations.clear()
def clear_elements(self):
for e in self._elements:
self.unregister(e)
self.device.signal('element_removed', e)
self._elements.clear()
def clear_files(self):
self._filenodes.clear()
def clear_elements_and_operations(self):
self.clear_elements()
self.clear_operations()
def clear_all(self):
self.clear_elements()
self.clear_operations()
self.clear_files()
self.validate_bounds()
def remove_files(self, file_list):
for f in file_list:
del self._filenodes[f]
def remove_elements(self, elements_list):
for elem in elements_list:
for i, e in enumerate(self._elements):
if elem is e:
self.unregister(elem)
self.device.signal('element_removed', elem)
self._elements[i] = None
self.remove_elements_from_operations(elements_list)
def remove_operations(self, operations_list):
for op in operations_list:
for i, o in enumerate(self._operations):
if o is op:
self.unregister(op)
self.device.signal('operation_removed', op)
self._operations[i] = None
self.purge_unset()
def remove_elements_from_operations(self, elements_list):
for i, op in enumerate(self._operations):
if op is None:
continue
elems = [e for e in op if e not in elements_list]
op.clear()
op.extend(elems)
if len(op) == 0:
self._operations[i] = None
self.purge_unset()
def purge_unset(self):
if None in self._operations:
ops = [op for op in self._operations if op is not None]
self._operations.clear()
self._operations.extend(ops)
if None in self._elements:
elems = [elem for elem in self._elements if elem is not None]
self._elements.clear()
self._elements.extend(elems)
def bounds(self):
return self._bounds
def validate_bounds(self):
boundary_points = []
for e in self._elements:
if e.last_transform is None or e.last_transform != e.transform or e.bounds is None:
e.bounds = e.bbox(False)
e.last_transform = copy(e.transform)
if e.bounds is None:
continue
if not e.emphasized:
continue
box = e.bounds
top_left = e.transform.point_in_matrix_space([box[0], box[1]])
top_right = e.transform.point_in_matrix_space([box[2], box[1]])
bottom_left = e.transform.point_in_matrix_space([box[0], box[3]])
bottom_right = e.transform.point_in_matrix_space([box[2], box[3]])
boundary_points.append(top_left)
boundary_points.append(top_right)
boundary_points.append(bottom_left)
boundary_points.append(bottom_right)
if len(boundary_points) == 0:
new_bounds = None
else:
xmin = min([e[0] for e in boundary_points])
ymin = min([e[1] for e in boundary_points])
xmax = max([e[0] for e in boundary_points])
ymax = max([e[1] for e in boundary_points])
new_bounds = [xmin, ymin, xmax, ymax]
if self._bounds != new_bounds:
self._bounds = new_bounds
self.device.device_root.signal('selected_bounds', self._bounds)
def is_in_set(self, v, selected, flat=True):
for q in selected:
if flat and isinstance(q, (list, tuple)) and self.is_in_set(v, q, flat):
return True
if q is v:
return True
return False
def set_selected(self, selected):
"""
Sets selected and other properties of a given element.
All selected elements are also semi-selected.
If elements itself is selected, all subelements are semiselected.
If any operation is selected, all sub-operations are highlighted.
"""
if selected is None:
selected = []
for s in self._elements:
should_select = self.is_in_set(s, selected, False)
should_emphasize = self.is_in_set(s, selected)
if s.emphasized:
if not should_emphasize:
s.unemphasize()
else:
if should_emphasize:
s.emphasize()
if s.selected:
if not should_select:
s.unselect()
else:
if should_select:
s.select()
for s in self._operations:
should_select = self.is_in_set(s, selected, False)
should_emphasize = self.is_in_set(s, selected)
if s.emphasized:
if not should_emphasize:
s.unemphasize()
else:
if should_emphasize:
s.emphasize()
if s.selected:
if not should_select:
s.unselect()
else:
if should_select:
s.select()
def center(self):
bounds = self._bounds
return (bounds[2] + bounds[0]) / 2.0, (bounds[3] + bounds[1]) / 2.0
def ensure_positive_bounds(self):
b = self._bounds
self._bounds = [min(b[0], b[2]), min(b[1], b[3]), max(b[0], b[2]), max(b[1], b[3])]
self.device.device_root.signal('selected_bounds', self._bounds)
def update_bounds(self, b):
self._bounds = [b[0], b[1], b[0], b[1]]
self.device.device_root.signal('selected_bounds', self._bounds)
@staticmethod
def bounding_box(elements):
if isinstance(elements, SVGElement):
elements = [elements]
elif isinstance(elements, list):
try:
elements = [e.object for e in elements if isinstance(e.object, SVGElement)]
except AttributeError:
pass
boundary_points = []
for e in elements:
box = e.bbox(False)
if box is None:
continue
top_left = e.transform.point_in_matrix_space([box[0], box[1]])
top_right = e.transform.point_in_matrix_space([box[2], box[1]])
bottom_left = e.transform.point_in_matrix_space([box[0], box[3]])
bottom_right = e.transform.point_in_matrix_space([box[2], box[3]])
boundary_points.append(top_left)
boundary_points.append(top_right)
boundary_points.append(bottom_left)
boundary_points.append(bottom_right)
if len(boundary_points) == 0:
return None
xmin = min([e[0] for e in boundary_points])
ymin = min([e[1] for e in boundary_points])
xmax = max([e[0] for e in boundary_points])
ymax = max([e[1] for e in boundary_points])
return xmin, ymin, xmax, ymax
def move_selected(self, dx, dy):
for obj in self.elems(emphasized=True):
obj.transform.post_translate(dx, dy)
obj.modified()
def set_selected_by_position(self, position):
def contains(box, x, y=None):
if y is None:
y = x[1]
x = x[0]
return box[0] <= x <= box[2] and box[1] <= y <= box[3]
if self.has_emphasis():
if self._bounds is not None and contains(self._bounds, position):
return # Select by position aborted since selection position within current select bounds.
for e in reversed(list(self.elems())):
bounds = e.bbox()
if bounds is None:
continue
if contains(bounds, position):
self.set_selected([e])
return
self.set_selected(None)
def classify(self, elements):
"""
Classify does the initial placement of elements as operations.
RasterOperation is the default for images.
If element strokes are red they get classed as cut operations
If they are otherwise they get classed as engrave.
"""
if elements is None:
return
raster = None
engrave = None
cut = None
rasters = []
engraves = []
cuts = []
self.device.setting(bool, 'cut_acceleration_custom', False)
self.device.setting(int, 'cut_acceleration', 4)
self.device.setting(bool, 'cut_dratio_custom', False)
self.device.setting(float, 'cut_dratio', None)
self.device.setting(float, 'cut_speed', 10.0)
self.device.setting(float, 'cut_power', 1000.0)
self.device.setting(bool, 'engrave_acceleration_custom', False)
self.device.setting(int, 'engrave_acceleration', 4)
self.device.setting(bool, 'engrave_dratio_custom', False)
self.device.setting(float, 'engrave_dratio', None)
self.device.setting(float, 'engrave_speed', 35.0)
self.device.setting(float, 'engrave_power', 1000.0)
self.device.setting(bool, 'raster_acceleration_custom', False)
self.device.setting(int, 'raster_acceleration', 4)
self.device.setting(float, 'raster_speed', 200.0)
self.device.setting(float, 'raster_power', 1000.0)
self.device.setting(int, 'raster_step', 2)
self.device.setting(int, 'raster_direction', 0)
self.device.setting(int, 'raster_overscan', 20)
if not isinstance(elements, list):
elements = [elements]
for element in elements:
if isinstance(element, (Path, SVGText)):
if element.stroke == "red" and not isinstance(element, SVGText):
if cut is None or not cut.has_same_properties(element.values):
cut = CutOperation(speed=self.device.cut_speed,
power=self.device.cut_power,
dratio_custom=self.device.cut_dratio_custom,
dratio=self.device.cut_dratio,
acceleration_custom=self.device.cut_acceleration_custom,
acceleration=self.device.cut_acceleration)
cuts.append(cut)
cut.set_properties(element.values)
cut.append(element)
elif element.stroke == "blue" and not isinstance(element, SVGText):
if engrave is None or not engrave.has_same_properties(element.values):
engrave = EngraveOperation(speed=self.device.engrave_speed,
power=self.device.engrave_power,
dratio_custom=self.device.engrave_dratio_custom,
dratio=self.device.engrave_dratio,
acceleration_custom=self.device.engrave_acceleration_custom,
acceleration=self.device.engrave_acceleration)
engraves.append(engrave)
engrave.set_properties(element.values)
engrave.append(element)
if (element.stroke != "red" and element.stroke != "blue") or \
(element.fill is not None and element.fill != "none") or \
isinstance(element, SVGText):
# not classed already, or was already classed but has a fill.
if raster is None or not raster.has_same_properties(element.values):
raster = RasterOperation(speed=self.device.raster_speed,
power=self.device.raster_power,
raster_step=self.device.raster_step,
raster_direction=self.device.raster_direction,
overscan=self.device.raster_overscan,
acceleration_custom=self.device.raster_acceleration_custom,
acceleration=self.device.raster_acceleration)
rasters.append(raster)
raster.set_properties(element.values)
raster.append(element)
elif isinstance(element, SVGImage):
try:
step = element.values['raster_step']
except KeyError:
step = self.device.raster_step
rasters.append(RasterOperation(element,
speed=self.device.raster_speed,
power=self.device.raster_power,
raster_step=step,
raster_direction=self.device.raster_direction,
overscan=self.device.raster_overscan,
acceleration_custom=self.device.raster_acceleration_custom,
acceleration=self.device.raster_acceleration))
rasters = [r for r in rasters if len(r) != 0]
engraves = [r for r in engraves if len(r) != 0]
cuts = [r for r in cuts if len(r) != 0]
ops = []
self.add_ops(rasters)
self.add_ops(engraves)
self.add_ops(cuts)
return ops
def load(self, pathname, **kwargs):
for loader_name, loader in self.device.registered['load'].items():
for description, extensions, mimetype in loader.load_types():
if pathname.lower().endswith(extensions):
results = loader.load(self.device, pathname, **kwargs)
if results is None:
continue
elements, pathname, basename = results
self._filenodes[pathname] = elements
self.add_elems(elements)
return elements, pathname, basename
return None
def load_types(self, all=True):
filetypes = []
if all:
filetypes.append('All valid types')
exts = []
for loader_name, loader in self.device.registered['load'].items():
for description, extensions, mimetype in loader.load_types():
for ext in extensions:
exts.append('*.%s' % ext)
filetypes.append(';'.join(exts))
for loader_name, loader in self.device.registered['load'].items():
for description, extensions, mimetype in loader.load_types():
exts = []
for ext in extensions:
exts.append('*.%s' % ext)
filetypes.append("%s (%s)" % (description, extensions[0]))
filetypes.append(';'.join(exts))
return "|".join(filetypes)
def save(self, pathname):
for save_name, saver in self.device.registered['save'].items():
for description, extension, mimetype in saver.save_types():
if pathname.lower().endswith(extension):
saver.save(self.device, pathname, 'default')
return True
return False
def save_types(self):
filetypes = []
for saver_name, saver in self.device.registered['save'].items():
for description, extension, mimetype in saver.save_types():
filetypes.append("%s (%s)" % (description, extension))
filetypes.append("*.%s" % (extension))
return "|".join(filetypes)
class Device:
"""
A Device is a specific module cluster that serves a unified purpose.
The Kernel is a type of device which provides root functionality.
* Provides job scheduler
* Registers devices, modules, pipes, modifications, and effects.
* Stores instanced devices, modules, pipes, channels, controls and threads.
* Processes local channels.
Channels are a device object with specific uids that sends messages to watcher functions. These can be watched
even if the channels are not ever opened or used. The channels can opened and provided information without any
consideration of what might be watching.
"""
def __init__(self, root=None, uid=0):
self.thread = None
self.name = None
self.device_root = root
self.device_name = "Device"
self.device_version = "0.0.0"
self.device_location = "Kernel"
self.uid = uid
self.state = STATE_UNKNOWN
self.jobs = []
self.registered = {}
self.instances = {}
# Channel processing.
self.channels = {}
self.watchers = {}
self.buffer = {}
self.greet = {}
self.element = None
def __str__(self):
if self.uid == 0:
return "Project"
else:
return "%s:%d" % (self.device_name, self.uid)
def __call__(self, code, *message):
self.signal(code, *message)
def __setitem__(self, key, value):
"""
Kernel value settings. If Config is registered this will be persistent.
:param key: Key to set.
:param value: Value to set
:return: None
"""
if isinstance(key, str):
self.write_persistent(key, value)
def __getitem__(self, item):
"""
Kernel value get. If Config is set registered this will be persistent.
As a shorthand any float, int, string, or bool set with this will also be found at kernel.item
:param item:
:return:
"""
if isinstance(item, tuple):
if len(item) == 2:
t, key = item
return self.read_persistent(t, key)
else:
t, key, default = item
return self.read_persistent(t, key, default)
return self.read_item_persistent(item)
def attach(self, device, name=None):
self.device_root = device
self.name = name
self.initialize(device)
def detach(self, device, channel=None):
if 'device' in self.device_root.instances:
devices = self.device_root.instances['device']
if self.uid in devices:
del devices[self.uid]
def initialize(self, device):
pass
def read_item_persistent(self, item):
return self.device_root.read_item_persistent(item)
def write_persistent(self, key, value, uid=0):
self.device_root.write_persistent(key, value, uid=uid)
def read_persistent(self, t, key, default=None, uid=0):
return self.device_root.read_persistent(t, key, default, uid=uid)
def threaded(self, func, thread_name=None):
if thread_name is None:
thread_name = func.__name__
thread = Thread(name=thread_name)
def run():
self.thread_instance_add(thread_name, thread)
try:
func()
except:
import sys
sys.excepthook(*sys.exc_info())
self.thread_instance_remove(thread_name)
thread.run = run
thread.start()
return thread
def boot(self):
"""
Kernel boot sequence. This should be called after all the registered devices are established.
:return:
"""
if self.thread is None or not self.thread.is_alive():
self.thread = self.threaded(self.run, 'Device%d' % int(self.uid))
self.control_instance_add("Debug Device", self._start_debugging)
def shutdown(self, channel=None):
"""
Begins device shutdown procedure.
"""
self.state = STATE_TERMINATE
_ = self.device_root.translation
channel(_("Shutting down."))
self.detach(self, channel=channel)
channel(_("Saving Device State: '%s'") % str(self))
self.flush()
if 'device' in self.instances:
# Join and shutdown any child devices.
devices = self.instances['device']
del self.instances['device']
for device_name in devices:
device = devices[device_name]
channel(_("Device Shutdown Started: '%s'") % str(device))
device_thread = device.thread
device.stop()
if device_thread is not None:
device_thread.join()
channel(_("Device Shutdown Finished: '%s'") % str(device))
for type_name in list(self.instances):
if type_name in ('control', 'thread'):
continue
for module_name in list(self.instances[type_name]):
obj = self.instances[type_name][module_name]
try:
obj.stop()
channel(_("Stopping %s %s: %s") % (module_name, type_name, str(obj)))
except AttributeError:
pass
if 'thread' in self.instances:
for thread_name in list(self.instances['thread']):
try:
thread = self.instances['thread'][thread_name]
except KeyError:
channel(_("Thread %s exited safely %s") % (thread_name, str(thread)))
continue
if not thread.is_alive:
channel(_("WARNING: Dead thread %s still registered to %s.") % (thread_name, str(thread)))
continue
channel(_("Finishing Thread %s for %s") % (thread_name, str(thread)))
if thread is self.thread:
channel(_("%s is the current shutdown thread") % (thread_name))
continue
# Do not sleep thread waiting for that thread to die. This is that thread.
try:
channel(_("Asking thread to stop."))
thread.stop()
except AttributeError:
pass
channel(_("Waiting for thread %s: %s") % (thread_name, str(thread)))
thread.join()
channel(_("Thread %s finished. %s") % (thread_name, str(thread)))
else:
channel(_("No threads required halting."))
for type_name in list(self.instances):
if type_name in ('control'):
continue
for module_name in list(self.instances[type_name]):
obj = self.instances[type_name][module_name]
try:
obj.detach(self, channel=channel)
channel(_("Shutting down %s %s: %s") % (module_name, type_name, str(obj)))
except AttributeError:
pass
for type_name in list(self.instances):
if type_name in ('control'):
continue
for module_name in self.instances[type_name]:
obj = self.instances[type_name][module_name]
if obj is self.thread:
continue # Don't warn about failure to close current thread.
channel(_("WARNING: %s %s was not closed.") % (type_name, module_name))
channel(_("Shutdown."))
shutdown_root = False
if not self.is_root():
if 'device' in self.device_root.instances:
root_devices = self.device_root.instances['device']
if root_devices is None:
shutdown_root = True
else:
if str(self.uid) in root_devices:
del root_devices[str(self.uid)]
if len(root_devices) == 0:
shutdown_root = True
else:
shutdown_root = True
if shutdown_root:
channel(_("All Devices are shutdown. Stopping Kernel."))
self.device_root.stop()
def add_job(self, run, args=(), interval=1.0, times=None):
"""
Adds a job to the scheduler.
:param run: function to run
:param args: arguments to give to that function.
:param interval: in seconds, how often should the job be run.
:param times: limit on number of executions.
:return: Reference to the job added.
"""
job = Module(self, process=run, args=args, interval=interval, times=times)
self.jobs.append(job)
return job
def run(self):
"""
Scheduler main loop.
Check the Scheduler thread state, and whether it should abort or pause.
Check each job, and if that job is scheduled to run. Executes that job.
:return:
"""
self.state = STATE_ACTIVE
while self.state != STATE_END:
time.sleep(0.005) # 200 ticks a second.
if self.state == STATE_TERMINATE:
break
while self.state == STATE_PAUSE:
# The scheduler is paused.
time.sleep(1.0)
jobs = self.jobs
jobs_update = False
for job in jobs:
# Checking if jobs should run.
if job.scheduled:
job.next_run = 0 # Set to zero while running.
if job.times is not None:
job.times = job.times - 1
if job.times <= 0:
jobs_update = True
if job.times < 0:
continue
try:
if isinstance(jobs, int):
job.process(job.args[0])
elif isinstance(job.args, tuple):
job.process(*job.args)
else:
job.process(job.args)
except:
import sys
sys.excepthook(*sys.exc_info())
job.last_run = time.time()
job.next_run += job.last_run + job.interval
if jobs_update:
self.jobs = [job for job in jobs if job.times is None or job.times > 0]
self.state = STATE_END
# If we aborted the thread, we trigger Kernel Shutdown in this thread.
self.shutdown(self.device_root.channel_open('shutdown'))
def _start_debugging(self):
"""
Debug function hooks all functions within the device with a debug call that saves the data to the disk and
prints that information.
:return:
"""
import functools
import datetime
import types
filename = "MeerK40t-debug-{date:%Y-%m-%d_%H_%M_%S}.txt".format(date=datetime.datetime.now())
debug_file = open(filename, "a")
debug_file.write("\n\n\n")
def debug(func, obj):
@functools.wraps(func)
def wrapper_debug(*args, **kwargs):
args_repr = [repr(a) for a in args]
kwargs_repr = ["%s=%s" % (k, v) for k, v in kwargs.items()]
signature = ", ".join(args_repr + kwargs_repr)
start = "Calling %s.%s(%s)" % (str(obj), func.__name__, signature)
debug_file.write(start + '\n')
print(start)
t = time.time()
value = func(*args, **kwargs)
t = time.time() - t
finish = " %s returned %s after %fms" % (func.__name__, value, t * 1000)
print(finish)
debug_file.write(finish + '\n')
debug_file.flush()
return value
return wrapper_debug
attach_list = [modules for modules, module_name in self.instances['module'].items()]
attach_list.append(self)
for obj in attach_list:
for attr in dir(obj):
if attr.startswith('_'):
continue
fn = getattr(obj, attr)
if not isinstance(fn, types.FunctionType) and \
not isinstance(fn, types.MethodType):
continue
setattr(obj, attr, debug(fn, obj))
def setting(self, setting_type, setting_name, default=None):
"""
Registers a setting to be used between modules.
If the setting exists, its value remains unchanged.
If the setting exists in the persistent storage that value is used.
If there is no settings value, the default will be used.
:param setting_type: int, float, str, or bool value
:param setting_name: name of the setting
:param default: default value for the setting to have.
:return: load_value
"""
if self.uid != 0:
setting_uid_name = '%s/%s' % (self.uid, setting_name)
else:
setting_uid_name = setting_name
if hasattr(self, setting_name) and getattr(self, setting_name) is not None:
return getattr(self, setting_name)
if not setting_name.startswith('_'):
load_value = self.read_persistent(setting_type, setting_uid_name, default)
else:
load_value = default
setattr(self, setting_name, load_value)
return load_value
def flush(self):
for attr in dir(self):
if attr.startswith('_'):
continue
if attr == 'uid':
continue
value = getattr(self, attr)
if value is None:
continue
if self.uid != 0:
uid_attr = '%d/%s' % (self.uid, attr)
else:
uid_attr = attr
if isinstance(value, (int, bool, str, float)):
self.write_persistent(uid_attr, value)
def update(self, setting_name, value):
if hasattr(self, setting_name):
old_value = getattr(self, setting_name)
else:
old_value = None
setattr(self, setting_name, value)
self(setting_name, (value, old_value))
def execute(self, control_name, *args):
self.instances['control'][control_name](*args)
def signal(self, code, *message):
if self.uid != 0:
code = '%d;%s' % (self.uid, code)
if self.device_root is not None and self.device_root is not self:
self.device_root.signal(code, *message)
def last_signal(self, signal):
if self.uid != 0:
signal = '%d;%s' % (self.uid, signal)
if self.device_root is not None and self.device_root is not self:
try:
return self.device_root.last_signal(signal)
except AttributeError:
pass
return None
def listen(self, signal, funct):
if self.uid != 0:
signal = '%d;%s' % (self.uid, signal)
if self.device_root is not None and self.device_root is not self:
self.device_root.listen(signal, funct)
def unlisten(self, signal, funct):
if self.uid != 0:
signal = '%d;%s' % (self.uid, signal)
if self.device_root is not None and self.device_root is not self:
self.device_root.unlisten(signal, funct)
def state(self):
return self.state
def resume(self):
self.state = STATE_ACTIVE
def pause(self):
self.state = STATE_PAUSE
def stop(self):
self.state = STATE_TERMINATE
# Channel processing
def add_greet(self, channel, greet):
self.greet[channel] = greet
if channel in self.channels:
self.channels[channel](greet)
def add_watcher(self, channel, monitor_function):
if channel not in self.watchers:
self.watchers[channel] = [monitor_function]
else:
for q in self.watchers[channel]:
if q is monitor_function:
return # This is already being watched by that.
self.watchers[channel].append(monitor_function)
if channel in self.greet:
monitor_function(self.greet[channel])
if channel in self.buffer:
for line in self.buffer[channel]:
monitor_function(line)
def remove_watcher(self, channel, monitor_function):
self.watchers[channel].remove(monitor_function)
def channel_open(self, channel, buffer=0):
if channel not in self.channels:
def chan(message):
if channel in self.watchers:
for w in self.watchers[channel]:
w(message)
if buffer <= 0:
return
try:
buff = self.buffer[channel]
except KeyError:
buff = list()
self.buffer[channel] = buff
buff.append(message)
if len(buff) + 10 > buffer:
self.buffer[channel] = buff[-buffer:]
self.channels[channel] = chan
if channel in self.greet:
chan(self.greet[channel])
return self.channels[channel]
# Kernel object registration
def register(self, object_type, name, obj):
if object_type not in self.registered:
self.registered[object_type] = {}
self.registered[object_type][name] = obj
try:
obj.sub_register(self)
except AttributeError:
pass
def register_module(self, name, obj):
self.register('module', name, obj)
def register_device(self, name, obj):
self.register('device', name, obj)
def register_pipe(self, name, obj):
self.register('pipe', name, obj)
def register_modification(self, name, obj):
self.register('modification', name, obj)
def register_effect(self, name, obj):
self.register('effect', name, obj)
# Device kernel object
def is_root(self):
return self.device_root is None or self.device_root is self
def device_instance_open(self, device_name, instance_name=None, **kwargs):
if instance_name is None:
instance_name = device_name
return self.open('device', device_name, self, instance_name=instance_name, **kwargs)
def device_instance_close(self, name):
self.close('device', name)
def device_instance_remove(self, name):
if name in self.instances['device']:
del self.instances['device'][name]
def using(self, type_name, object_name, *args, instance_name=None, **kwargs):
if instance_name is None:
instance_name = object_name
if type_name in self.instances:
if instance_name in self.instances[type_name]:
return self.instances[type_name][instance_name]
return self.open(type_name, object_name, *args, instance_name=instance_name, **kwargs)
def open(self, type_name, object_name, *args, instance_name=None, **kwargs):
if instance_name is None:
instance_name = object_name
if self.device_root is None or self.device_root is self:
module_object = self.registered[type_name][object_name]
else:
module_object = self.device_root.registered[type_name][object_name]
instance = module_object(*args, **kwargs)
instance.attach(self, name=instance_name)
self.add(type_name, instance_name, instance)
return instance
def close(self, type_name, name):
if type_name in self.instances and name in self.instances[type_name]:
obj = self.instances[type_name][name]
try:
obj.close()
except AttributeError:
pass
obj.detach(self)
if name in self.instances[type_name]:
del self.instances[type_name][name]
def add(self, type_name, name, instance):
if type_name not in self.instances:
self.instances[type_name] = {}
self.instances[type_name][name] = instance
def remove(self, type_name, name):
if name in self.instances[type_name]:
del self.instances[type_name][name]
def module_instance_open(self, module_name, *args, instance_name=None, **kwargs):
return self.open('module', module_name, *args, instance_name=instance_name, **kwargs)
def module_instance_close(self, name):
self.close('module', name)
def module_instance_remove(self, name):
self.remove('module', name)
# Pipe kernel object
def pipe_instance_open(self, pipe_name, instance_name=None, **kwargs):
self.open('pipe', pipe_name, instance_name=instance_name, **kwargs)
# Control kernel object. Registered function calls.
def control_instance_add(self, control_name, function):
self.add('control', control_name, function)
def control_instance_remove(self, control_name):
self.remove('control', control_name)
# Thread kernel object. Registered Threads.
def thread_instance_add(self, thread_name, obj):
self.add('thread', thread_name, obj)
def thread_instance_remove(self, thread_name):
self.remove('thread', thread_name)
def get_text_thread_state(self, state):
_ = self.device_root.translation
if state == STATE_INITIALIZE:
return _("Unstarted")
elif state == STATE_TERMINATE:
return _("Abort")
elif state == STATE_END:
return _("Finished")
elif state == STATE_PAUSE:
return _("Pause")
elif state == STATE_BUSY:
return _("Busy")
elif state == STATE_ACTIVE:
return _("Active")
elif state == STATE_IDLE:
return _("Idle")
elif state == STATE_UNKNOWN:
return _("Unknown")
def get_state(self, thread_name):
try:
return self.instances['thread'][thread_name].state()
except AttributeError:
return STATE_UNKNOWN
def classify(self, elements):
if self.device_root is not None and self.device_root is not self:
return self.device_root.classify(elements)
def load(self, pathname, **kwargs):
if self.device_root is not None and self.device_root is not self:
return self.device_root.load(pathname, **kwargs)
def load_types(self, all=True):
if self.device_root is not None and self.device_root is not self:
return self.device_root.load_types(all)
def save(self, pathname):
if self.device_root is not None and self.device_root is not self:
return self.device_root.save(pathname)
def save_types(self):
if self.device_root is not None and self.device_root is not self:
return self.device_root.save_types()
class Kernel(Device):
"""
The Kernel is the device root object. It stores device independent settings, values, and functions.
* It is itself a type of device. It has no root, and should be the DeviceRoot.
* Shared location of loaded elements data
* Registered loaders and savers.
* The persistent storage object
* The translation function
* The run later function
* The keymap object
"""
def __init__(self, config=None):
Device.__init__(self, self, 0)
# Current Project.
self.device_name = "MeerK40t"
self.device_version = "0.6.0"
self.device_root = self
# Persistent storage if it exists.
self.config = None
if config is not None:
self.set_config(config)
# Translation function if exists.
self.translation = lambda e: e # Default for this code is do nothing.
# Keymap/alias values
self.keymap = {}
self.alias = {}
self.run_later = lambda listener, message: listener(message)
self.register_module('Signaler', Signaler)
self.register_module('Elemental', Elemental)
self.register_module('Spooler', Spooler)
def boot(self):
"""
Kernel boot sequence. This should be called after all the registered devices are established.
:return:
"""
Device.boot(self)
self.default_keymap()
self.default_alias()
self.device_boot()
def shutdown(self, channel=None):
"""
Begins kernel shutdown procedure.
"""
Device.shutdown(self, channel)
if self.config is not None:
self.config.Flush()
def default_keymap(self):
self.keymap["escape"] = "window open Adjustments"
self.keymap["d"] = "+right"
self.keymap["a"] = "+left"
self.keymap["w"] = "+up"
self.keymap["s"] = "+down"
self.keymap['numpad_down'] = '+translate_down'
self.keymap['numpad_up'] = '+translate_up'
self.keymap['numpad_left'] = '+translate_left'
self.keymap['numpad_right'] = '+translate_right'
self.keymap['numpad*'] = '+scale_up'
self.keymap['numpad/'] = '+scale_down'
self.keymap['numpad+'] = '+rotate_cw'
self.keymap['numpad-'] = '+rotate_ccw'
self.keymap['control+a'] = 'element *'
self.keymap['control+i'] = 'element ~'
self.keymap['control+f'] = 'control Fill'
self.keymap['control+s'] = 'control Stroke'
self.keymap['control+r'] = 'rect 0 0 1000 1000'
self.keymap['control+e'] = 'circle 500 500 500'
self.keymap['control+d'] = 'element copy'
self.keymap['control+shift+h'] = 'scale -1 1'
self.keymap['control+shift+v'] = 'scale 1 -1'
self.keymap['control+1'] = "bind 1 move $x $y"
self.keymap['control+2'] = "bind 2 move $x $y"
self.keymap['control+3'] = "bind 3 move $x $y"
self.keymap['control+4'] = "bind 4 move $x $y"
self.keymap['control+5'] = "bind 5 move $x $y"
self.keymap['alt+r'] = 'raster'
self.keymap['alt+e'] = 'engrave'
self.keymap['alt+c'] = 'cut'
self.keymap['delete'] = 'element delete'
self.keymap['f4'] = "window open CameraInterface"
self.keymap['f5'] = "refresh"
self.keymap['f6'] = "window open JobSpooler"
self.keymap['f7'] = "window open Controller"
self.keymap['f8'] = "control Path"
self.keymap['f9'] = "control Transform"
self.keymap['f12'] = "window open Terminal"
self.keymap['alt+f12'] = "terminal_ruida"
self.keymap['alt+f13'] = 'terminal_watch'
self.keymap['pause'] = "control Realtime Pause_Resume"
self.keymap['home'] = "home"
def default_alias(self):
self.alias['+scale_up'] = "loop scale 1.02"
self.alias['+scale_down'] = "loop scale 0.98"
self.alias['+rotate_cw'] = "loop rotate 2"
self.alias['+rotate_ccw'] = "loop rotate -2"
self.alias['+translate_right'] = "loop translate 1mm 0"
self.alias['+translate_left'] = "loop translate -1mm 0"
self.alias['+translate_down'] = "loop translate 0 1mm"
self.alias['+translate_up'] = "loop translate 0 -1mm"
self.alias['+right'] = "loop right 1mm"
self.alias['+left'] = "loop left 1mm"
self.alias['+up'] = "loop up 1mm"
self.alias['+down'] = "loop down 1mm"
self.alias['+upright'] = "loop move_relative 1mm -1mm"
self.alias['+downright'] = "loop move_relative 1mm 1mm"
self.alias['+upleft'] = "loop move_relative -1mm -1mm"
self.alias['+downleft'] = "loop move_relative -1mm 1mm"
self.alias['-scale_up'] = "end scale 1.02"
self.alias['-scale_down'] = "end scale 0.98"
self.alias['-rotate_cw'] = "end rotate 2"
self.alias['-rotate_ccw'] = "end rotate -2"
self.alias['-translate_right'] = "end translate 1mm 0"
self.alias['-translate_left'] = "end translate -1mm 0"
self.alias['-translate_down'] = "end translate 0 1mm"
self.alias['-translate_up'] = "end translate 0 -1mm"
self.alias['-right'] = "end right 1mm"
self.alias['-left'] = "end left 1mm"
self.alias['-up'] = "end up 1mm"
self.alias['-down'] = "end down 1mm"
self.alias['-upright'] = "end move_relative 1mm -1mm"
self.alias['-downright'] = "end move_relative 1mm 1mm"
self.alias['-upleft'] = "end move_relative -1mm -1mm"
self.alias['-downleft'] = "end move_relative -1mm 1mm"
self.alias['terminal_ruida'] = "window open Terminal;ruidaserver"
self.alias['terminal_watch'] = "window open Terminal;channel save usb;channel save send;channel save recv"
def read_item_persistent(self, item):
return self.config.Read(item)
def read_persistent(self, t, key, default=None, uid=0):
if self.config is None:
return default
if uid != 0:
key = '%s/%s' % (str(uid), key)
if default is not None:
if t == str:
return self.config.Read(key, default)
elif t == int:
return self.config.ReadInt(key, default)
elif t == float:
return self.config.ReadFloat(key, default)
elif t == bool:
return self.config.ReadBool(key, default)
if t == str:
return self.config.Read(key)
elif t == int:
return self.config.ReadInt(key)
elif t == float:
return self.config.ReadFloat(key)
elif t == bool:
return self.config.ReadBool(key)
def write_persistent(self, key, value, uid=0):
if self.config is None:
return
if uid != 0:
key = '%d/%s' % (uid, key)
if isinstance(value, str):
self.config.Write(key, value)
elif isinstance(value, int):
self.config.WriteInt(key, value)
elif isinstance(value, float):
self.config.WriteFloat(key, value)
elif isinstance(value, bool):
self.config.WriteBool(key, value)
def set_config(self, config):
self.config = config
for attr in dir(self):
if attr.startswith('_'):
continue
value = getattr(self, attr)
if value is None:
continue
if isinstance(value, (int, bool, float, str)):
self.write_persistent(attr, value)
more, value, index = config.GetFirstEntry()
while more:
if not value.startswith('_'):
if not hasattr(self, value):
setattr(self, value, None)
more, value, index = config.GetNextEntry(index)
def device_boot(self):
"""
Boots any devices that are set to boot.
:return:
"""
self.setting(str, 'list_devices', '')
devices = self.list_devices
for device in devices.split(';'):
try:
d = int(device)
except ValueError:
return
device_name = self.read_persistent(str, 'device_name', 'Lhystudios', uid=d)
autoboot = self.read_persistent(bool, 'autoboot', True, uid=d)
if autoboot:
dev = self.device_instance_open(device_name, uid=d, instance_name=str(device))
dev.boot()
def device_add(self, device_type, device_uid):
self.write_persistent('device_name', device_type, uid=device_uid)
self.write_persistent('autoboot', True, uid=device_uid)
self.setting(str, 'list_devices', '')
devices = [d for d in self.list_devices.split(';') if d != '']
devices.append(str(device_uid))
self.list_devices = ';'.join(devices)
self.write_persistent('list_devices', self.list_devices)
def register_loader(self, name, obj):
self.registered['load'][name] = obj
def register_saver(self, name, obj):
self.registered['save'][name] = obj
| 37.146285 | 126 | 0.555667 |
f753dffc2fb865ec5ac4199a5e21df845c7e2b12 | 1,244 | py | Python | 00_LIBRARIES/00_NUMPY/03_numpy_manipulations.py | san99tiago/ML_BASICS | ebd51827f7dd427c848b5c8e1d4bfd017d2fb56f | [
"MIT"
] | 2 | 2021-03-18T06:07:09.000Z | 2021-05-08T22:14:14.000Z | 00_LIBRARIES/00_NUMPY/03_numpy_manipulations.py | san99tiago/ML_BASICS | ebd51827f7dd427c848b5c8e1d4bfd017d2fb56f | [
"MIT"
] | null | null | null | 00_LIBRARIES/00_NUMPY/03_numpy_manipulations.py | san99tiago/ML_BASICS | ebd51827f7dd427c848b5c8e1d4bfd017d2fb56f | [
"MIT"
] | null | null | null | # NUMPY MANIPULATIONS OF ARRAYS
# Santiago Garcia Arango
# -------------------------------------------------------------------------
import numpy as np
my_array = np.arange(1, 11) # [1,2,..,8,9,10]
print("my_array=\n", my_array, "\n")
# -----------------CHECKING CONDITIONS IN ARRAY ITEMS----------------------
# FIRST WAY...
# This is how we show boolean result of a desired condition of an array
boolean_array = my_array > 5
print("my_array > 5 --> ", boolean_array, "\n")
# We can take advantage of the boolean_array, by calling the main array...
# ..."evaluated" in the True statements of the boolean_array.
# This will give us only the original array where the conditions are True
print("my_array[boolean_array] = ", my_array[boolean_array], "\n")
# SECOND WAY...
# This previous two step process is usually done in one step!!!
# Remark: This is the most common way to to this!!!
print("my_array[my_array>5] = ", my_array[my_array > 5], "\n")
# -----------------------CREATE MATRICES EASIER----------------------------
# Example: create this matrix:
# 1 1 1 1 1
# 1 0 0 0 1
# 1 0 9 0 1
# 1 0 0 0 1
# 1 1 1 1 1
cool_matrix = np.ones((5, 5))
cool_matrix[1:4, 1:4] = 0
cool_matrix[2, 2] = 9
print("cool_matrix:\n", cool_matrix, "\n")
| 32.736842 | 75 | 0.596463 |
f754708b2250b03d251cc3778718e2d65bd43090 | 661 | py | Python | design_patterns/oop/alien_game/__init__.py | JASTYN/pythonmaster | 46638ab09d28b65ce5431cd0759fe6df272fb85d | [
"Apache-2.0",
"MIT"
] | 3 | 2017-05-02T10:28:13.000Z | 2019-02-06T09:10:11.000Z | design_patterns/oop/alien_game/__init__.py | JASTYN/pythonmaster | 46638ab09d28b65ce5431cd0759fe6df272fb85d | [
"Apache-2.0",
"MIT"
] | 2 | 2017-06-21T20:39:14.000Z | 2020-02-25T10:28:57.000Z | design_patterns/oop/alien_game/__init__.py | JASTYN/pythonmaster | 46638ab09d28b65ce5431cd0759fe6df272fb85d | [
"Apache-2.0",
"MIT"
] | 2 | 2016-07-29T04:35:22.000Z | 2017-01-18T17:05:36.000Z | from abc import ABCMeta, abstractmethod
class Scene(object):
__metaclass__ = ABCMeta
@abstractmethod
def enter(self):
"""
Enter method to every scene
:return:
"""
pass
class Engine(object):
__metaclass__ = ABCMeta
def __init__(self, scene_map):
self.scene_map = scene_map
@abstractmethod
def play(self):
pass
class Map(object):
__metaclass__ = ABCMeta
def __init__(self, start_scene):
self.start_scene = start_scene
@abstractmethod
def next_scene(self, scene_name):
pass
@abstractmethod
def opening_scene(self):
pass
| 16.525 | 39 | 0.620272 |
f7547ea80bb2de55ad47fa7ca92198ad1630f16d | 845 | py | Python | models/languageManager.py | falconsmilie/Raspberry-Pi-3-Weather | 132ed29d7d717e1597d1c5e8dcb5fcf716c81d17 | [
"MIT"
] | null | null | null | models/languageManager.py | falconsmilie/Raspberry-Pi-3-Weather | 132ed29d7d717e1597d1c5e8dcb5fcf716c81d17 | [
"MIT"
] | null | null | null | models/languageManager.py | falconsmilie/Raspberry-Pi-3-Weather | 132ed29d7d717e1597d1c5e8dcb5fcf716c81d17 | [
"MIT"
] | null | null | null | from os import path
from pathlib import Path as path_lib
import json
class LanguageManager(object):
def __init__(self):
self._path_to_language_file = 'config/languageConfig.json'
def get_languages(self):
languages = []
language_file_path = path.join(
path.dirname(path.realpath('__file__')),
self._path_to_language_file
)
if path_lib(language_file_path).is_file():
with open(language_file_path, 'r') as language_config:
language_json = json.load(language_config)
for lang_id, lang in language_json.items():
languages.append(''.join([lang, ' (', lang_id, ')']))
languages = sorted(languages)
else:
raise Exception('Cannot load Weather Languages.')
return languages
| 24.142857 | 69 | 0.617751 |
f754c72ffb9265cfed27b504d38730f5d180db82 | 1,743 | py | Python | util.py | eth-sri/transformation-smoothing | 12a653e881a6d61c5c63a3e16d58292435486cbd | [
"Apache-2.0"
] | 3 | 2020-11-07T18:12:50.000Z | 2021-06-11T22:56:09.000Z | util.py | eth-sri/transformation-smoothing | 12a653e881a6d61c5c63a3e16d58292435486cbd | [
"Apache-2.0"
] | null | null | null | util.py | eth-sri/transformation-smoothing | 12a653e881a6d61c5c63a3e16d58292435486cbd | [
"Apache-2.0"
] | null | null | null | import PIL
import PIL.Image
from functional import compose
import numpy as np
import argparse
lmap = compose(list, map)
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def split(a, n):
"""
Splits a list into n parts of approx. equal length
from https://stackoverflow.com/questions/2130016/splitting-a-list-into-n-parts-of-approximately-equal-length
"""
k, m = divmod(len(a), n)
return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n))
def str2FloatOrNone(v):
if v.lower() == 'none':
return None
try:
return float(v)
except ValueError:
raise argparse.ArgumentTypeError('Float or none value expected.')
def torch_image_to_PIL(img):
img = img.cpu().numpy()
if len(img.shape) == 4:
img = img[0, ...]
elif len(img.shape) == 3:
pass
else:
assert False
img = 255 * np.transpose(img, (1, 2, 0))
img = np.clip(np.round(img), 0, 255).astype(np.uint8)
return PIL.Image.fromarray(img)
class Logger(object):
def __init__(self, filename, stdout):
self.terminal = stdout
if filename is not None:
self.log = open(filename, "a")
else:
self.log = None
def write(self, message):
self.terminal.write(message)
if self.log is not None:
self.log.write(message)
def flush(self):
self.terminal.flush()
if self.log is not None:
self.log.flush()
def get_interpolation(i):
return getattr(PIL.Image, i.upper())
| 24.208333 | 112 | 0.588067 |
f754fbd282c7e4cd1caf9ef441e8ccd2e128b008 | 1,476 | py | Python | backend/appengine/routes/chamados/rest.py | cbeloni/pychronesapp | fff744dfdbc1932968a950fda614e8ec84d162a3 | [
"MIT"
] | null | null | null | backend/appengine/routes/chamados/rest.py | cbeloni/pychronesapp | fff744dfdbc1932968a950fda614e8ec84d162a3 | [
"MIT"
] | null | null | null | backend/appengine/routes/chamados/rest.py | cbeloni/pychronesapp | fff744dfdbc1932968a950fda614e8ec84d162a3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from gaebusiness.business import CommandExecutionException
from gaecookie.decorator import no_csrf
from gaepermission.decorator import login_not_required
from tekton.gae.middleware.json_middleware import JsonResponse
from chamado_app import chamado_facade # @UnresolvedImport
@login_not_required
@no_csrf
def index():
cmd = chamado_facade.list_chamados_cmd()
chamado_list = cmd()
chamado_form=chamado_facade.chamado_form()
chamado_dcts = [chamado_form.fill_with_model(m) for m in chamado_list]
return JsonResponse(chamado_dcts)
@login_not_required
@no_csrf
def new(_resp, **chamado_properties):
cmd = chamado_facade.save_chamado_cmd(**chamado_properties)
return _save_or_update_json_response(cmd, _resp)
@login_not_required
@no_csrf
def edit(_resp, chamado_id, **chamado_properties):
cmd = chamado_facade.update_chamado_cmd(chamado_id, **chamado_properties)
return _save_or_update_json_response(cmd, _resp)
@login_not_required
@no_csrf
def delete(chamado_id):
chamado_facade.delete_chamado_cmd(chamado_id)()
@login_not_required
@no_csrf
def _save_or_update_json_response(cmd, _resp):
try:
chamado = cmd()
except CommandExecutionException:
_resp.status_code = 400
return JsonResponse({'errors': cmd.errors})
chamado_form=chamado_facade.chamado_form()
return JsonResponse(chamado_form.fill_with_model(chamado))
| 32.086957 | 77 | 0.79336 |
f75518064427d54bf16e59aa446c4d99ede28d8e | 1,131 | py | Python | Chapter_01/blog/sitemaps.py | codingEzio/code_py_book_django2_by_example | d215d0c87a557685824286822186966b06fa8d59 | [
"Unlicense"
] | 1 | 2021-04-23T16:35:45.000Z | 2021-04-23T16:35:45.000Z | Chapter_01/blog/sitemaps.py | codingEzio/code_py_book_django2_by_example | d215d0c87a557685824286822186966b06fa8d59 | [
"Unlicense"
] | null | null | null | Chapter_01/blog/sitemaps.py | codingEzio/code_py_book_django2_by_example | d215d0c87a557685824286822186966b06fa8d59 | [
"Unlicense"
] | null | null | null | from django.contrib.sitemaps import Sitemap
from .models import Post
class PostSiteMap(Sitemap):
"""
Just a reminder:
We're (simply) override
the (static) attrs of the class 'Sitemap' :P
Some attrs & methods:
changefreq possible vals: 'always', 'daily', 'yearly' etc.
priority quotes from 'sitemaps.org'
Search engines may use this information
when selecting between URLs on the same site,
so you can use this to increase the likelihood
that your most important pages are present in searching.
items what to include (in the sitemap)
lastmod the last-mod-time of the post (get from 'items()')
the 'updated' is the field from our model 'Posts' :P
"""
changefreq = 'weekly'
priority = 0.9
def items(self):
return Post.published.all()
def lastmod(self, obj):
return obj.updated | 31.416667 | 84 | 0.521662 |
f7552098abd265f505cb89767c6bafe1d11daf95 | 153,949 | py | Python | controller/project_controller/projects/WaferFaultDetection_new/best_model_finder/tuner.py | rohandhanraj/Auto-AI-Pipeline | d5f39715c802db45afae0d5978d228bf0bcd2f0a | [
"MIT"
] | null | null | null | controller/project_controller/projects/WaferFaultDetection_new/best_model_finder/tuner.py | rohandhanraj/Auto-AI-Pipeline | d5f39715c802db45afae0d5978d228bf0bcd2f0a | [
"MIT"
] | null | null | null | controller/project_controller/projects/WaferFaultDetection_new/best_model_finder/tuner.py | rohandhanraj/Auto-AI-Pipeline | d5f39715c802db45afae0d5978d228bf0bcd2f0a | [
"MIT"
] | null | null | null | import uuid
import numpy
import pandas
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeRegressor
from xgboost import XGBClassifier, XGBRegressor
from sklearn.linear_model import SGDRegressor
from sklearn.metrics import roc_auc_score, accuracy_score, r2_score, roc_curve
import sys
from exception_layer.generic_exception.generic_exception import GenericException as RandomForestClassificationException
from exception_layer.generic_exception.generic_exception import GenericException as XGBoostClassificationException
from exception_layer.generic_exception.generic_exception import GenericException as ModelFinderException
from plotly_dash.accuracy_graph.accuracy_graph import AccurayGraph
from sklearn.naive_bayes import GaussianNB
from project_library_layer.initializer.initializer import Initializer
from sklearn.linear_model import Ridge, Lasso, RidgeCV, LassoCV, ElasticNet, ElasticNetCV
class ModelFinder:
"""
This class shall be used to find the model with best accuracy and AUC score.
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
def __init__(self, project_id, file_object, logger_object):
try:
self.project_id = project_id
self.file_object = file_object
self.logger_object = logger_object
self.clf = RandomForestClassifier()
self.knn = KNeighborsClassifier()
self.xgb = XGBClassifier(objective='binary:logistic')
self.sv_classifier = SVC()
self.gnb = GaussianNB()
self.linearReg = LinearRegression()
self.RandomForestReg = RandomForestRegressor()
self.DecisionTreeReg = DecisionTreeRegressor()
self.sv_regressor = SVR()
self.sgd_regression = SGDRegressor()
self.initailizer = Initializer()
self.model_name = []
self.model = []
self.score = []
except Exception as e:
model_finder = ModelFinderException(
"Failed during object instantiation in module [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.__init__.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
def get_best_params_for_ridge_regression(self, train_x, train_y):
try:
self.logger_object.log("Entered the get best params for Ridge Repressor")
alphas = numpy.random.uniform(low=0, high=10, size=(50,))
ridge_cv = RidgeCV(alphas=alphas, cv=5, normalize=True)
ridge_cv.fit(train_x, train_y)
alpha = ridge_cv.alpha_
ridge_model = Ridge(alpha=alpha)
ridge_model.fit(train_x, train_y)
self.logger_object.log(
'Ridge Regressor best params <alpha value: ' + str(ridge_cv.alpha_) + '>. Exited the '
'get_best_params_for_ridge_regression method of the Model_Finder class')
return ridge_model
except Exception as e:
model_finder = ModelFinderException(
"Model Selection Failed in module [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_params_for_ridge_regression.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
def get_best_params_for_support_vector_regressor(self, train_x, train_y):
try:
self.logger_object.log("Entered the get best params for Support Vector Repressor")
param_grid = {'C': [0.1, 1, 10, 50, 100, 500], 'gamma': [1, 0.5, 0.1, 0.01, 0.001]}
grid = GridSearchCV(SVR(), param_grid, verbose=3, cv=5)
grid.fit(train_x, train_y)
C = grid.best_params_['C']
gamma = grid.best_params_['gamma']
svr_reg = SVR(C=C, gamma=gamma)
svr_reg.fit(train_x, train_y)
self.logger_object.log('Support Vector Regressor best params: ' + str(grid.best_params_) + '. Exited the '
'get_best_params_for_support_vector_regressor method of the Model_Finder class')
return svr_reg
except Exception as e:
model_finder = ModelFinderException(
"Model Selection Failed in module [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_params_for_support_vector_regressor.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
def get_best_params_for_random_forest(self, train_x, train_y):
"""
Method Name: get_best_params_for_random_forest
Description: get the parameters for Random Forest Algorithm which give the best accuracy.
Use Hyper Parameter Tuning.
Output: The model with the best parameters
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
self.logger_object.log('Entered the get_best_params_for_random_forest method of the Model_Finder class')
# initializing with different combination of parameters
param_grid = {"n_estimators": [10, 130], "criterion": ['gini', 'entropy'],
"max_depth": range(2, 4, 1), "max_features": ['auto', 'log2']}
# Creating an object of the Grid Search class
grid = GridSearchCV(estimator=self.clf, param_grid=param_grid, cv=5, verbose=3)
# finding the best parameters
grid.fit(train_x, train_y)
# extracting the best parameters
criterion = grid.best_params_['criterion']
max_depth = grid.best_params_['max_depth']
max_features = grid.best_params_['max_features']
n_estimators = grid.best_params_['n_estimators']
# creating a new model with the best parameters
self.clf = RandomForestClassifier(n_estimators=n_estimators, criterion=criterion,
max_depth=max_depth, max_features=max_features)
# training the mew model
self.clf.fit(train_x, train_y)
self.logger_object.log('Random Forest best params: ' + str(grid.best_params_) + '. Exited the '
'get_best_params_for_random_forest method of the Model_Finder class')
return self.clf
except Exception as e:
random_clf_exception = RandomForestClassificationException(
"Random Forest Parameter tuning failed in module [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_params_for_random_forest.__name__))
raise Exception(random_clf_exception.error_message_detail(str(e), sys)) from e
def get_best_params_for_xgboost(self, train_x, train_y):
"""
Method Name: get_best_params_for_xgboost
Description: get the parameters for XGBoost Algorithm which give the best accuracy.
Use Hyper Parameter Tuning.
Output: The model with the best parameters
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
self.logger_object.log('Entered the get_best_params_for_xgboost method of the Model_Finder class')
# initializing with different combination of parameters
param_grid_xgboost = {
'learning_rate': [0.5, 0.001],
'max_depth': [20],
'n_estimators': [10, 200]
}
# Creating an object of the Grid Search class
grid = GridSearchCV(XGBClassifier(objective='binary:logistic'), param_grid_xgboost, verbose=3, cv=5)
# finding the best parameters
grid.fit(train_x, train_y)
# extracting the best parameters
learning_rate = grid.best_params_['learning_rate']
max_depth = grid.best_params_['max_depth']
n_estimators = grid.best_params_['n_estimators']
# creating a new model with the best parameters
self.xgb = XGBClassifier(learning_rate=learning_rate, max_depth=max_depth, n_estimators=n_estimators)
# training the mew model
self.xgb.fit(train_x, train_y)
self.logger_object.log('XGBoost best params: ' + str(grid.best_params_) + '. Exited the '
'get_best_params_for_xgboost method of the Model_Finder class')
return self.xgb
except Exception as e:
xg_boost_clf_exception = XGBoostClassificationException(
"XGBoost Parameter tuning failed in module [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_params_for_xgboost.__name__))
raise Exception(xg_boost_clf_exception.error_message_detail(str(e), sys)) from e
def get_best_model(self, train_x, train_y, test_x, test_y, cluster_no=None):
"""
Method Name: get_best_model
Description: Find out the Model which has the best AUC score.
Output: The best model name and the model object
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
# create best model for XGBoost
try:
if cluster_no is not None:
title_generator = " Cluster " + cluster_no + " model {}"
else:
title_generator = "Model {}"
# XG Boost model
self.model_name.append('XG_BOOST')
title = title_generator.format('XG_BOOST')
self.logger_object.log('Entered the get_best_model method of the Model_Finder class')
xgboost = self.get_best_params_for_xgboost(train_x, train_y)
prediction_xgboost = xgboost.predict(test_x) # Predictions using the XGBoost Model
if len(test_y.unique()) == 1: # if there is only one label in y, then roc_auc_score returns error. We
# will use accuracy in that case
xgboost_score = accuracy_score(test_y, prediction_xgboost)
self.logger_object.log('Accuracy for XGBoost:' + str(xgboost_score)) # Log AUC
else:
xgboost_score = roc_auc_score(test_y, prediction_xgboost) # AUC for XGBoost
self.logger_object.log('AUC for XGBoost:' + str(xgboost_score)) # Log AUC
y_score = xgboost.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.model.append(xgboost)
self.score.append(xgboost_score)
# create best model for naive bayes
self.model_name.append('NAIVE_BAYES')
title = title_generator.format('NAIVE_BAYES')
naive_bayes = self.get_best_params_for_naive_bayes(train_x, train_y)
prediction_naive_bayes = naive_bayes.predict(test_x) # prediction using the Random Forest Algorithm
self.model.append(naive_bayes)
if len(test_y.unique()) == 1: # if there is only one label in y,
# then roc_auc_score returns error. We will use accuracy in that case
naive_bayes_score = accuracy_score(test_y, prediction_naive_bayes)
self.logger_object.log('Accuracy for naive bayes score' + str(naive_bayes_score))
else:
naive_bayes_score = roc_auc_score(test_y, prediction_naive_bayes) # AUC for Random Forest
self.logger_object.log('AUC for naive bayes score:' + str(naive_bayes_score))
y_score = naive_bayes.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[0])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.score.append(naive_bayes_score)
# create best model for Random forest
self.model_name.append('Random_Forest')
title = title_generator.format('Random_Forest')
random_forest = self.get_best_params_for_random_forest(train_x, train_y)
prediction_random_forest = random_forest.predict(test_x)
self.model.append(random_forest)
if len(test_y.unique()) == 1:
random_forest_score = accuracy_score(test_y, prediction_random_forest)
self.logger_object.log('Accuracy for Random Forest' + str(random_forest_score))
else:
random_forest_score = roc_auc_score(test_y, prediction_random_forest) # AUC for Random Forest
self.logger_object.log('AUC for Random Forest' + str(random_forest_score))
y_score = random_forest.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.score.append(random_forest_score)
# create best model for KNN
self.model_name.append('KNN')
title = title_generator.format('KNN')
knn_clf = self.get_best_params_for_KNN(train_x, train_y)
prediction_knn = knn_clf.predict(test_x)
self.model.append(knn_clf)
if len(test_y.unique()) == 1:
knn_score = accuracy_score(test_y, prediction_knn)
self.logger_object.log('Accuracy for KNN clf' + str(knn_score))
else:
knn_score = roc_auc_score(test_y, prediction_knn) # AUC for Random Forest
self.logger_object.log('AUC for KNN' + str(knn_score))
y_score = knn_clf.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.score.append(knn_score)
""" 5. SVC """
if len(test_y.unique()) != 1:
self.model_name.append("SVC")
title = title_generator.format("SVC")
svc_clf = self.get_best_params_for_svm_fraud_detection_and_scania(train_x, train_y)
prediction_svc = svc_clf.predict(test_x)
self.model.append(svc_clf)
if len(test_y.unique()) == 1:
svc_score = accuracy_score(test_y, prediction_svc)
self.logger_object.log('Accuracy for svc clf' + str(svc_score))
else:
svc_score = roc_auc_score(test_y, prediction_svc) # AUC for Random Forest
self.logger_object.log('AUC for svc' + str(svc_score))
y_score = svc_clf.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.score.append(svc_score)
AccurayGraph().save_accuracy_bar_graph(
model_name_list=self.model_name,
accuracy_score_list=self.score,
project_id=self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
x_label="Model List",
y_label="Accuracy score comparison {}".format(self.model_name),
title="Accuracy Score "
)
execution_model_comparison_id = str(uuid.uuid4())
for data in zip(self.model_name, self.score):
self.save_accuracy_data(model_name=data[0], score=data[1],
execution_model_comparision_id=execution_model_comparison_id)
# comparing the two models
return self.get_best_model_on_score(model_name=self.model_name, model=self.model, score=self.score)
except Exception as e:
model_finder = ModelFinderException(
"Model Selection Failed in module [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_model.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
def get_best_params_for_random_forest_thyroid(self, train_x, train_y):
"""
Method Name: get_best_params_for_random_forest
Description: get the parameters for Random Forest Algorithm which give the best accuracy.
Use Hyper Parameter Tuning.
Output: The model with the best parameters
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
self.logger_object.log('Entered the get_best_params_for_random_forest method of the Model_Finder class')
try:
# initializing with different combination of parameters
param_grid = {"n_estimators": [10, 50, 100, 130], "criterion": ['gini', 'entropy'],
"max_depth": range(2, 4, 1), "max_features": ['auto', 'log2']}
# Creating an object of the Grid Search class
grid = GridSearchCV(estimator=RandomForestClassifier(), param_grid=param_grid, cv=5, verbose=3)
# finding the best parameters
grid.fit(train_x, train_y)
# extracting the best parameters
criterion = grid.best_params_['criterion']
max_depth = grid.best_params_['max_depth']
max_features = grid.best_params_['max_features']
n_estimators = grid.best_params_['n_estimators']
# creating a new model with the best parameters
clf = RandomForestClassifier(n_estimators=n_estimators, criterion=criterion,
max_depth=max_depth, max_features=max_features)
# training the mew model
clf.fit(train_x, train_y)
self.logger_object.log('Random Forest best params: ' + str(
grid.best_params_) + '. Exited the get_best_params_for_random_forest method of the Model_Finder class')
return clf
except Exception as e:
model_finder = ModelFinderException(
"Model Selection Failed in module [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_params_for_random_forest_thyroid.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
def get_best_params_for_KNN_fraud_detection(self, train_x, train_y):
"""
Method Name: get_best_params_for_KNN
Description: get the parameters for KNN Algorithm which give the best accuracy.
Use Hyper Parameter Tuning.
Output: The model with the best parameters
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
self.logger_object.log('Entered the get_best_params_for_Ensembled_KNN method of the Model_Finder class')
try:
# initializing with different combination of parameters
param_grid_knn = {
'algorithm': ['ball_tree', 'kd_tree', 'brute'],
'leaf_size': [10, 17, 24, 28, 30, 35],
'n_neighbors': [4, 5],
'p': [1, 2]
}
# Creating an object of the Grid Search class
grid = GridSearchCV(KNeighborsClassifier(), param_grid_knn, verbose=3,
cv=5)
# finding the best parameters
grid.fit(train_x, train_y)
# extracting the best parameters
algorithm = grid.best_params_['algorithm']
leaf_size = grid.best_params_['leaf_size']
n_neighbors = grid.best_params_['n_neighbors']
p = grid.best_params_['p']
# creating a new model with the best parameters
knn = KNeighborsClassifier(algorithm=algorithm, leaf_size=leaf_size,
n_neighbors=n_neighbors, p=p, n_jobs=-1)
# training the mew model
knn.fit(train_x, train_y)
self.logger_object.log('KNN best params: ' + str(
grid.best_params_) + '. Exited the KNN method of the Model_Finder class')
return knn
except Exception as e:
model_finder = ModelFinderException(
"Model Selection Failed in module [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_params_for_KNN_fraud_detection.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
def get_best_params_for_KNN(self, train_x, train_y):
"""
Method Name: get_best_params_for_KNN
Description: get the parameters for KNN Algorithm which give the best accuracy.
Use Hyper Parameter Tuning.
Output: The model with the best parameters
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
self.logger_object.log('Entered the get_best_params_for_Ensembled_KNN method of the Model_Finder class')
try:
# initializing with different combination of parameters
param_grid_knn = {
'algorithm': ['ball_tree', 'kd_tree', 'brute'],
'leaf_size': [10, 17, 24, 28, 30, 35],
'n_neighbors': [4, 5, 8, 10, 11],
'p': [1, 2]
}
# Creating an object of the Grid Search class
grid = GridSearchCV(KNeighborsClassifier(), param_grid_knn, verbose=3,
cv=5)
# finding the best parameters
grid.fit(train_x, train_y)
# extracting the best parameters
algorithm = grid.best_params_['algorithm']
leaf_size = grid.best_params_['leaf_size']
n_neighbors = grid.best_params_['n_neighbors']
p = grid.best_params_['p']
# creating a new model with the best parameters
knn = KNeighborsClassifier(algorithm=algorithm, leaf_size=leaf_size,
n_neighbors=n_neighbors, p=p, n_jobs=-1)
# training the mew model
knn.fit(train_x, train_y)
self.logger_object.log('KNN best params: ' + str(
grid.best_params_) + '. Exited the KNN method of the Model_Finder class')
return knn
except Exception as e:
model_finder = ModelFinderException(
"Model Selection Failed in module [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_params_for_KNN.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
def get_best_model_thyroid(self, train_x, train_y, test_x, test_y, cluster_no=None):
"""
Method Name: get_best_model
Description: Find out the Model which has the best AUC score.
Output: The best model name and the model object
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
# create best model for KNN
try:
self.logger_object.log('Entered the get_best_model method of the Model_Finder class')
if cluster_no is not None:
title_generator = " Cluster " + cluster_no + " model {}"
else:
title_generator = "Model {}"
# XG Boost model
self.model_name.append('XG_BOOST')
title = title_generator.format('XG_BOOST')
self.logger_object.log('Entered the get_best_model method of the Model_Finder class')
xgboost = self.get_best_params_for_xgboost(train_x, train_y)
prediction_xgboost = xgboost.predict(test_x) # Predictions using the XGBoost Model
if len(test_y.unique()) == 1: # if there is only one label in y, then roc_auc_score returns error. We
# will use accuracy in that case
xgboost_score = accuracy_score(test_y, prediction_xgboost)
self.logger_object.log('Accuracy for XGBoost:' + str(xgboost_score)) # Log AUC
else:
y_scores = xgboost.predict_proba(test_x)
AccurayGraph().save_plot_multiclass_roc_curve(test_y, y_scores, xgboost,
project_id=self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title
)
xgboost_score = roc_auc_score(test_y, y_scores, multi_class='ovr') # AUC for XGBoost
self.logger_object.log('AUC for XGBoost:' + str(xgboost_score)) # Log AUC
self.model.append(xgboost)
self.score.append(xgboost_score)
# create best model for naive bayes
self.model_name.append('NAIVE_BAYES')
title = title_generator.format('NAIVE_BAYES')
naive_bayes = self.get_best_params_for_naive_bayes(train_x, train_y)
prediction_naive_bayes = naive_bayes.predict(test_x) # prediction using the Random Forest Algorithm
self.model.append(naive_bayes)
if len(test_y.unique()) == 1: # if there is only one label in y,
# then roc_auc_score returns error. We will use accuracy in that case
naive_bayes_score = accuracy_score(test_y, prediction_naive_bayes)
self.logger_object.log('Accuracy for naive bayes score' + str(naive_bayes_score))
else:
y_scores = naive_bayes.predict_proba(test_x)
AccurayGraph().save_plot_multiclass_roc_curve(test_y, y_scores, naive_bayes,
project_id=self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title
)
naive_bayes_score = roc_auc_score(test_y, y_scores,
multi_class='ovr') # AUC for Random Forest
self.logger_object.log('AUC for naive bayes score:' + str(naive_bayes_score))
self.score.append(naive_bayes_score)
# create best model for Random forest
self.model_name.append('Random_Forest')
title = title_generator.format('Random_Forest')
random_forest = self.get_best_params_for_random_forest_thyroid(train_x, train_y)
prediction_random_forest = random_forest.predict(test_x)
self.model.append(random_forest)
if len(test_y.unique()) == 1:
random_forest_score = accuracy_score(test_y, prediction_random_forest)
self.logger_object.log('Accuracy for Random Forest' + str(random_forest_score))
else:
y_scores = random_forest.predict_proba(test_x)
AccurayGraph().save_plot_multiclass_roc_curve(test_y, y_scores, random_forest,
project_id=self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title
)
random_forest_score = roc_auc_score(test_y, y_scores,
multi_class='ovr') # AUC for Random Forest
self.logger_object.log('AUC for Random Forest' + str(random_forest_score))
self.score.append(random_forest_score)
# create best model for KNN
self.model_name.append('KNN')
title = title_generator.format('KNN')
knn_clf = self.get_best_params_for_KNN(train_x, train_y)
prediction_knn = knn_clf.predict(test_x)
self.model.append(knn_clf)
if len(test_y.unique()) == 1:
knn_score = accuracy_score(test_y, prediction_knn)
self.logger_object.log('Accuracy for KNN clf' + str(knn_score))
else:
y_scores = knn_clf.predict_proba(test_x)
AccurayGraph().save_plot_multiclass_roc_curve(test_y, y_scores, knn_clf,
project_id=self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title
)
knn_score = roc_auc_score(test_y, y_scores, multi_class='ovr') # AUC for Random Forest
self.logger_object.log('AUC for KNN' + str(knn_score))
self.score.append(knn_score)
""" 5. SVC """
if len(test_y.unique()) != 1:
self.model_name.append("SVC")
title = title_generator.format("SVC")
svc_clf = self.get_best_params_for_svm_fraud_detection_and_scania(train_x, train_y)
prediction_svc = svc_clf.predict(test_x)
self.model.append(svc_clf)
if len(test_y.unique()) == 1:
svc_score = accuracy_score(test_y, prediction_svc)
self.logger_object.log('Accuracy for svc clf' + str(svc_score))
else:
y_scores = svc_clf.predict_proba(test_x)
AccurayGraph().save_plot_multiclass_roc_curve(test_y, y_scores, svc_clf,
project_id=self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title
)
svc_score = roc_auc_score(test_y, y_scores, multi_class='ovr') # AUC for Random Forest
self.logger_object.log('AUC for svc' + str(svc_score))
self.score.append(svc_score)
AccurayGraph().save_accuracy_bar_graph(
model_name_list=self.model_name,
accuracy_score_list=self.score,
project_id=self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
x_label="Model List",
y_label="Accuracy score comparison {}".format(self.model_name),
title="Accuracy Score "
)
execution_model_comparison_id = str(uuid.uuid4())
for data in zip(self.model_name, self.score):
self.save_accuracy_data(model_name=data[0], score=data[1],
execution_model_comparision_id=execution_model_comparison_id)
return self.get_best_model_on_score(model_name=self.model_name, model=self.model, score=self.score)
except Exception as e:
model_finder = ModelFinderException(
"Model Selection Failed in module [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_model_thyroid.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
def get_best_params_for_random_forest_mushroom(self, train_x, train_y):
"""
Method Name: get_best_params_for_random_forest
Description: get the parameters for Random Forest Algorithm which give the best accuracy.
Use Hyper Parameter Tuning.
Output: The model with the best parameters
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
self.logger_object.log('Entered the get_best_params_for_random_forest method of the Model_Finder class')
# initializing with different combination of parameters
param_grid = {"n_estimators": [10, 50, 100, 130], "criterion": ['gini', 'entropy'],
"max_depth": range(2, 4, 1), "max_features": ['auto', 'log2']}
# Creating an object of the Grid Search class
grid = GridSearchCV(estimator=RandomForestClassifier(), param_grid=param_grid, cv=5, verbose=3)
# finding the best parameters
grid.fit(train_x, train_y)
# extracting the best parameters
criterion = grid.best_params_['criterion']
max_depth = grid.best_params_['max_depth']
max_features = grid.best_params_['max_features']
n_estimators = grid.best_params_['n_estimators']
# creating a new model with the best parameters
clf = RandomForestClassifier(n_estimators=n_estimators, criterion=criterion,
max_depth=max_depth, max_features=max_features)
# training the mew model
clf.fit(train_x, train_y)
self.logger_object.log('Random Forest best params: ' + str(
grid.best_params_) + '.Exited the get_best_params_for_random_forest method of the Model_Finder class')
return clf
except Exception as e:
model_finder = ModelFinderException(
"Model Selection Failed in module [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_params_for_random_forest_mushroom.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
def get_best_params_for_KNN_mushroom(self, train_x, train_y):
"""
Method Name: get_best_params_for_KNN
Description: get the parameters for KNN Algorithm which give the best accuracy.
Use Hyper Parameter Tuning.
Output: The model with the best parameters
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
self.logger_object.log('Entered the get_best_params_for_KNN method of the Model_Finder class')
# initializing with different combination of parameters
param_grid_knn = {
'algorithm': ['ball_tree', 'kd_tree', 'brute'],
'leaf_size': [10, 17, 24, 28, 30, 35],
'n_neighbors': [4, 5, 8, 10, 11],
'p': [1, 2]
}
# Creating an object of the Grid Search class
grid = GridSearchCV(KNeighborsClassifier(), param_grid_knn, verbose=3,
cv=5)
# finding the best parameters
grid.fit(train_x, train_y)
# extracting the best parameters
algorithm = grid.best_params_['algorithm']
leaf_size = grid.best_params_['leaf_size']
n_neighbors = grid.best_params_['n_neighbors']
p = grid.best_params_['p']
# creating a new model with the best parameters
knn = KNeighborsClassifier(algorithm=algorithm, leaf_size=leaf_size,
n_neighbors=n_neighbors, p=p, n_jobs=-1)
# training the mew model
knn.fit(train_x, train_y)
self.logger_object.log('KNN best params: ' + str(
grid.best_params_) + '. Exited the KNN method of the Model_Finder class')
return knn
except Exception as e:
model_finder = ModelFinderException(
"Model Selection Failed in module [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_params_for_KNN_mushroom.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
def get_binary_format_target_value(self, target_column):
try:
column_value = target_column.unique()
target_column = target_column.replace(column_value[0], 0)
target_column = target_column.replace(column_value[1], 1)
return target_column
except Exception as e:
model_finder = ModelFinderException(
"Model Selection Failed in module [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_params_for_KNN_mushroom.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
def get_best_model_mushroom(self, train_x, train_y, test_x, test_y, cluster_no=None):
"""
Method Name: get_best_model
Description: Find out the Model which has the best AUC score.
Output: The best model name and the model object
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
# create best model for KNN
try:
self.logger_object.log('Entered the get_best_model method of the Model_Finder class')
title_generator = " Cluster " + cluster_no + " model {}"
# XG Boost model
self.model_name.append('XG_BOOST')
title = title_generator.format('XG_BOOST')
xgboost = self.get_best_params_for_xgboost_income_prediction(train_x, train_y)
prediction_xgboost = xgboost.predict(test_x) # Predictions using the XGBoost Model
if len(test_y.unique()) == 1: # if there is only one label in y, then roc_auc_score returns error. We
# will use accuracy in that case
xgboost_score = accuracy_score(test_y, prediction_xgboost)
self.logger_object.log('Accuracy for XGBoost:' + str(xgboost_score)) # Log AUC
else:
xgboost_score = roc_auc_score(test_y, prediction_xgboost) # AUC for XGBoost
self.logger_object.log('AUC for XGBoost:' + str(xgboost_score)) # Log AUC
y_score = xgboost.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.model.append(xgboost)
self.score.append(xgboost_score)
# create best model for naive bayes
self.model_name.append('NAIVE_BAYES')
title = title_generator.format('NAIVE_BAYES')
naive_bayes = self.get_best_params_for_naive_bayes(train_x, train_y)
prediction_naive_bayes = naive_bayes.predict(test_x) # prediction using the Random Forest Algorithm
self.model.append(naive_bayes)
if len(test_y.unique()) == 1: # if there is only one label in y,
# then roc_auc_score returns error. We will use accuracy in that case
naive_bayes_score = accuracy_score(test_y, prediction_naive_bayes)
self.logger_object.log('Accuracy for naive bayes score' + str(naive_bayes_score))
else:
naive_bayes_score = roc_auc_score(test_y, prediction_naive_bayes) # AUC for Random Forest
self.logger_object.log('AUC for naive bayes score:' + str(naive_bayes_score))
y_score = naive_bayes.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[0])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.score.append(naive_bayes_score)
# create best model for Random forest
self.model_name.append('Random_Forest')
title = title_generator.format('Random_Forest')
random_forest = self.get_best_params_for_random_forest_mushroom(train_x, train_y)
prediction_random_forest = random_forest.predict(test_x)
self.model.append(random_forest)
if len(test_y.unique()) == 1:
random_forest_score = accuracy_score(test_y, prediction_random_forest)
self.logger_object.log('Accuracy for Random Forest' + str(random_forest_score))
else:
random_forest_score = roc_auc_score(test_y, prediction_random_forest) # AUC for Random Forest
self.logger_object.log('AUC for Random Forest' + str(random_forest_score))
y_score = random_forest.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.score.append(random_forest_score)
# create best model for KNN
self.model_name.append('KNN')
title = title_generator.format('KNN')
knn_clf = self.get_best_params_for_KNN_mushroom(train_x, train_y)
prediction_knn = knn_clf.predict(test_x)
self.model.append(knn_clf)
if len(test_y.unique()) == 1:
knn_score = accuracy_score(test_y, prediction_knn)
self.logger_object.log('Accuracy for KNN clf' + str(knn_score))
else:
knn_score = roc_auc_score(test_y, prediction_knn) # AUC for Random Forest
self.logger_object.log('AUC for KNN' + str(knn_score))
y_score = knn_clf.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.score.append(knn_score)
if len(test_y.unique()) != 1:
""" 5. SVC """
self.model_name.append("SVC")
title = title_generator.format("SVC")
svc_clf = self.get_best_params_for_svm_fraud_detection_and_scania(train_x, train_y)
prediction_svc = svc_clf.predict(test_x)
self.model.append(svc_clf)
if len(test_y.unique()) == 1:
svc_score = accuracy_score(test_y, prediction_svc)
self.logger_object.log('Accuracy for svc clf' + str(svc_score))
else:
svc_score = roc_auc_score(test_y, prediction_svc) # AUC for Random Forest
self.logger_object.log('AUC for svc' + str(svc_score))
y_score = svc_clf.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.score.append(svc_score)
AccurayGraph().save_accuracy_bar_graph(
model_name_list=self.model_name,
accuracy_score_list=self.score,
project_id=self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
x_label="Model List",
y_label="Accuracy score comparison {}".format(self.model_name),
title="Cluster " + str(cluster_no) + "Accuracy Score "
)
execution_model_comparison_id = str(uuid.uuid4())
for data in zip(self.model_name, self.score):
self.save_accuracy_data(model_name=data[0], score=data[1],
execution_model_comparision_id=execution_model_comparison_id)
return self.get_best_model_on_score(model_name=self.model_name, model=self.model, score=self.score)
except Exception as e:
model_finder = ModelFinderException(
"Model Selection Failed in module [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_model_mushroom.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
def save_accuracy_data(self, model_name, score, execution_model_comparision_id):
try:
accuracy_graph_data = AccurayGraph(project_id=self.project_id,
model_accuracy_dict={'model_name': model_name,
'score': score,
'execution_model_comparision': execution_model_comparision_id,
'training_execution_id': self.logger_object.execution_id}
)
accuracy_graph_data.save_accuracy()
except Exception as e:
model_finder = ModelFinderException(
"save model accuracy [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_model_mushroom.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
def get_best_params_for_svm_fraud_detection_and_scania(self, train_x, train_y):
"""
Method Name: get_best_params_for_naive_bayes
Description: get the parameters for the SVM Algorithm which give the best accuracy.
Use Hyper Parameter Tuning.
Output: The model with the best parameters
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
self.logger_object.log('Entered the get_best_params_for_svm method of the Model_Finder class')
# initializing with different combination of parameters
param_grid = {"kernel": ['rbf', 'sigmoid'],
"C": [0.1, 0.5, 1.0],
"random_state": [0, 100, 200, 300]}
# Creating an object of the Grid Search class
grid = GridSearchCV(estimator=SVC(), param_grid=param_grid, cv=5, verbose=3)
# finding the best parameters
grid.fit(train_x, train_y)
# extracting the best parameters
kernel = grid.best_params_['kernel']
C = grid.best_params_['C']
random_state = grid.best_params_['random_state']
# creating a new model with the best parameters
sv_classifier = SVC(kernel=kernel, C=C, random_state=random_state, probability=True)
# training the mew model
sv_classifier.fit(train_x, train_y)
self.logger_object.log('SVM best params: ' + str(
grid.best_params_) + '. Exited the get_best_params_for_svm method of the Model_Finder class')
return sv_classifier
except Exception as e:
model_finder = ModelFinderException(
"Failed in [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_params_for_svm_fraud_detection_and_scania.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
def get_best_params_for_xgboost_fraud_detection(self, train_x, train_y):
"""
Method Name: get_best_params_for_xgboost
Description: get the parameters for XGBoost Algorithm which give the best accuracy.
Use Hyper Parameter Tuning.
Output: The model with the best parameters
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
# initializing with different combination of parameters
self.logger_object.log('Entered the get_best_params_for_xgboost method of the Model_Finder class')
param_grid_xgboost = {
"n_estimators": [100, 130], "criterion": ['gini', 'entropy'],
"max_depth": range(8, 10, 1)
}
# Creating an object of the Grid Search class
grid = GridSearchCV(XGBClassifier(objective='binary:logistic'), param_grid_xgboost, verbose=3,
cv=5)
# finding the best parameters
grid.fit(train_x, train_y)
# extracting the best parameters
criterion = grid.best_params_['criterion']
max_depth = grid.best_params_['max_depth']
n_estimators = grid.best_params_['n_estimators']
# creating a new model with the best parameters
xgb = XGBClassifier(criterion=criterion, max_depth=max_depth, n_estimators=n_estimators,
n_jobs=-1)
# training the mew model
xgb.fit(train_x, train_y)
self.logger_object.log('XGBoost best params: ' + str(
grid.best_params_) + '. Exited the get_best_params_for_xgboost method of the Model_Finder class')
return xgb
except Exception as e:
model_finder = ModelFinderException(
"Failed in [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_params_for_xgboost_fraud_detection.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
def get_best_model_fraud_detection(self, train_x, train_y, test_x, test_y, cluster_no=None):
"""
Method Name: get_best_model
Description: Find out the Model which has the best AUC score.
Output: The best model name and the model object
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
# create best model for XGBoost
try:
self.logger_object.log('Entered the get_best_model method of the Model_Finder class')
title_generator = " Cluster " + cluster_no + " model {}"
# XG Boost model
self.model_name.append('XG_BOOST')
title = title_generator.format('XG_BOOST')
self.logger_object.log('Entered the get_best_model method of the Model_Finder class')
xgboost = self.get_best_params_for_xgboost_fraud_detection(train_x, train_y)
prediction_xgboost = xgboost.predict(test_x) # Predictions using the XGBoost Model
if len(test_y.unique()) == 1: # if there is only one label in y, then roc_auc_score returns error. We
# will use accuracy in that case
xgboost_score = accuracy_score(test_y, prediction_xgboost)
self.logger_object.log('Accuracy for XGBoost:' + str(xgboost_score)) # Log AUC
else:
xgboost_score = roc_auc_score(test_y, prediction_xgboost) # AUC for XGBoost
self.logger_object.log('AUC for XGBoost:' + str(xgboost_score)) # Log AUC
y_score = xgboost.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.model.append(xgboost)
self.score.append(xgboost_score)
# create best model for naive bayes
self.model_name.append('NAIVE_BAYES')
title = title_generator.format('NAIVE_BAYES')
naive_bayes = self.get_best_params_for_naive_bayes(train_x, train_y)
prediction_naive_bayes = naive_bayes.predict(test_x) # prediction using the Random Forest Algorithm
self.model.append(naive_bayes)
if len(test_y.unique()) == 1: # if there is only one label in y,
# then roc_auc_score returns error. We will use accuracy in that case
naive_bayes_score = accuracy_score(test_y, prediction_naive_bayes)
self.logger_object.log('Accuracy for naive bayes score' + str(naive_bayes_score))
else:
naive_bayes_score = roc_auc_score(test_y, prediction_naive_bayes) # AUC for Random Forest
self.logger_object.log('AUC for naive bayes score:' + str(naive_bayes_score))
y_score = naive_bayes.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[0])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.score.append(naive_bayes_score)
# create best model for Random forest
self.model_name.append('Random_Forest')
title = title_generator.format('Random_Forest')
random_forest = self.get_best_params_for_svm_fraud_detection_and_scania(train_x, train_y)
prediction_random_forest = random_forest.predict(test_x)
self.model.append(random_forest)
if len(test_y.unique()) == 1:
random_forest_score = accuracy_score(test_y, prediction_random_forest)
self.logger_object.log('Accuracy for Random Forest' + str(random_forest_score))
else:
random_forest_score = roc_auc_score(test_y, prediction_random_forest) # AUC for Random Forest
self.logger_object.log('AUC for Random Forest' + str(random_forest_score))
y_score = random_forest.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.score.append(random_forest_score)
# create best model for KNN
self.model_name.append('KNN')
title = title_generator.format('KNN')
knn_clf = self.get_best_params_for_KNN_fraud_detection(train_x, train_y)
prediction_knn = knn_clf.predict(test_x)
self.model.append(knn_clf)
if len(test_y.unique()) == 1:
knn_score = accuracy_score(test_y, prediction_knn)
self.logger_object.log('Accuracy for KNN clf' + str(knn_score))
else:
knn_score = roc_auc_score(test_y, prediction_knn) # AUC for Random Forest
self.logger_object.log('AUC for KNN' + str(knn_score))
y_score = knn_clf.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.score.append(knn_score)
if len(test_y.unique()) != 1:
""" 5. SVC """
self.model_name.append("SVC")
title = title_generator.format("SVC")
svc_clf = self.get_best_params_for_svm_phising_classifier(train_x, train_y)
prediction_svc = svc_clf.predict(test_x)
self.model.append(svc_clf)
if len(test_y.unique()) == 1:
svc_score = accuracy_score(test_y, prediction_svc)
self.logger_object.log('Accuracy for svc clf' + str(svc_score))
else:
svc_score = roc_auc_score(test_y, prediction_svc) # AUC for Random Forest
self.logger_object.log('AUC for svc' + str(svc_score))
y_score = svc_clf.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.score.append(svc_score)
AccurayGraph().save_accuracy_bar_graph(
model_name_list=self.model_name,
accuracy_score_list=self.score,
project_id=self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
x_label="Model List",
y_label="Accuracy score comparison {}".format(self.model_name),
title="Cluster " + str(cluster_no) + "Accuracy Score "
)
execution_model_comparison_id = str(uuid.uuid4())
for data in zip(self.model_name, self.score):
self.save_accuracy_data(model_name=data[0], score=data[1],
execution_model_comparision_id=execution_model_comparison_id)
return self.get_best_model_on_score(model_name=self.model_name, model=self.model, score=self.score)
except Exception as e:
model_finder = ModelFinderException(
"Failed in [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_model_fraud_detection.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
def get_best_params_for_naive_bayes_credit_defaulter(self, train_x, train_y):
"""
Method Name: get_best_params_for_naive_bayes
Description: get the parameters for the Naive Bayes's Algorithm which give the best accuracy.
Use Hyper Parameter Tuning.
Output: The model with the best parameters
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
self.logger_object.log('Entered the get_best_params_for_naive_bayes method of the Model_Finder class')
# initializing with different combination of parameters
param_grid = {"var_smoothing": [1e-9, 0.1, 0.001, 0.5, 0.05, 0.01, 1e-8, 1e-7, 1e-6, 1e-10, 1e-11]}
# Creating an object of the Grid Search class
grid = GridSearchCV(estimator=GaussianNB(), param_grid=param_grid, cv=3, verbose=3)
# finding the best parameters
grid.fit(train_x, train_y)
# extracting the best parameters
var_smoothing = grid.best_params_['var_smoothing']
# creating a new model with the best parameters
gnb = GaussianNB(var_smoothing=var_smoothing)
# training the mew model
gnb.fit(train_x, train_y)
self.logger_object.log('Naive Bayes best params: ' + str(
grid.best_params_) + '. Exited the get_best_params_for_naive_bayes method of the Model_Finder class')
return gnb
except Exception as e:
model_finder = ModelFinderException(
"Failed in [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_params_for_naive_bayes_credit_defaulter.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
def get_best_params_for_xgboost_credit_defaulter(self, train_x, train_y):
"""
Method Name: get_best_params_for_xgboost
Description: get the parameters for XGBoost Algorithm which give the best accuracy.
Use Hyper Parameter Tuning.
Output: The model with the best parameters
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
self.logger_object.log('Entered the get_best_params_for_xgboost method of the Model_Finder class')
# initializing with different combination of parameters
param_grid_xgboost = {
"n_estimators": [50, 100, 130],
"max_depth": range(3, 11, 1),
"random_state": [0, 50, 100]
}
# Creating an object of the Grid Search class
grid = GridSearchCV(XGBClassifier(objective='binary:logistic'), param_grid_xgboost, verbose=3,
cv=2, n_jobs=-1)
# finding the best parameters
grid.fit(train_x, train_y)
# extracting the best parameters
random_state = grid.best_params_['random_state']
max_depth = grid.best_params_['max_depth']
n_estimators = grid.best_params_['n_estimators']
# creating a new model with the best parameters
xgb = XGBClassifier(random_state=random_state, max_depth=max_depth,
n_estimators=n_estimators, n_jobs=-1)
# training the mew model
xgb.fit(train_x, train_y)
self.logger_object.log('XGBoost best params: ' + str(
grid.best_params_) + '. Exited the get_best_params_for_xgboost method of the Model_Finder class')
return xgb
except Exception as e:
model_finder = ModelFinderException(
"Failed in [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_params_for_xgboost_credit_defaulter.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
def get_best_model_credit_deaulter(self, train_x, train_y, test_x, test_y, cluster_no):
"""
Method Name: get_best_model
Description: Find out the Model which has the best AUC score.
Output: The best model name and the model object
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
# create best model for XGBoost
try:
self.logger_object.log('Entered the get_best_model method of the Model_Finder class')
title_generator = " Cluster " + cluster_no + " model {}"
# XG Boost model
self.model_name.append('XG_BOOST')
title = title_generator.format('XG_BOOST')
self.logger_object.log('Entered the get_best_model method of the Model_Finder class')
xgboost = self.get_best_params_for_xgboost_credit_defaulter(train_x, train_y)
prediction_xgboost = xgboost.predict(test_x) # Predictions using the XGBoost Model
if len(test_y.unique()) == 1: # if there is only one label in y, then roc_auc_score returns error. We
# will use accuracy in that case
xgboost_score = accuracy_score(test_y, prediction_xgboost)
self.logger_object.log('Accuracy for XGBoost:' + str(xgboost_score)) # Log AUC
else:
xgboost_score = roc_auc_score(test_y, prediction_xgboost) # AUC for XGBoost
self.logger_object.log('AUC for XGBoost:' + str(xgboost_score)) # Log AUC
y_score = xgboost.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.model.append(xgboost)
self.score.append(xgboost_score)
# create best model for naive bayes
self.model_name.append('NAIVE_BAYES')
title = title_generator.format('NAIVE_BAYES')
naive_bayes = self.get_best_params_for_naive_bayes_credit_defaulter(train_x, train_y)
prediction_naive_bayes = naive_bayes.predict(test_x) # prediction using the Random Forest Algorithm
self.model.append(naive_bayes)
if len(test_y.unique()) == 1: # if there is only one label in y,
# then roc_auc_score returns error. We will use accuracy in that case
naive_bayes_score = accuracy_score(test_y, prediction_naive_bayes)
self.logger_object.log('Accuracy for naive bayes score' + str(naive_bayes_score))
else:
naive_bayes_score = roc_auc_score(test_y, prediction_naive_bayes) # AUC for Random Forest
self.logger_object.log('AUC for naive bayes score:' + str(naive_bayes_score))
y_score = naive_bayes.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[0])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.score.append(naive_bayes_score)
# create best model for Random forest
self.model_name.append('Random_Forest')
title = title_generator.format('Random_Forest')
random_forest = self.get_best_params_for_random_forest(train_x, train_y)
prediction_random_forest = random_forest.predict(test_x)
self.model.append(random_forest)
if len(test_y.unique()) == 1:
random_forest_score = accuracy_score(test_y, prediction_random_forest)
self.logger_object.log('Accuracy for Random Forest' + str(random_forest_score))
else:
random_forest_score = roc_auc_score(test_y, prediction_random_forest) # AUC for Random Forest
self.logger_object.log('AUC for Random Forest' + str(random_forest_score))
y_score = random_forest.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.score.append(random_forest_score)
# create best model for KNN
self.model_name.append('KNN')
title = title_generator.format('KNN')
knn_clf = self.get_best_params_for_KNN(train_x, train_y)
prediction_knn = knn_clf.predict(test_x)
self.model.append(knn_clf)
if len(test_y.unique()) == 1:
knn_score = accuracy_score(test_y, prediction_knn)
self.logger_object.log('Accuracy for KNN clf' + str(knn_score))
else:
knn_score = roc_auc_score(test_y, prediction_knn) # AUC for Random Forest
self.logger_object.log('AUC for KNN' + str(knn_score))
y_score = knn_clf.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.score.append(knn_score)
if len(test_y.unique()) != 1:
""" 5. SVC """
self.model_name.append("SVC")
title = title_generator.format("SVC")
svc_clf = self.get_best_params_for_svm_phising_classifier(train_x, train_y)
prediction_svc = svc_clf.predict(test_x)
self.model.append(svc_clf)
if len(test_y.unique()) == 1:
svc_score = accuracy_score(test_y, prediction_svc)
self.logger_object.log('Accuracy for svc clf' + str(svc_score))
else:
svc_score = roc_auc_score(test_y, prediction_svc) # AUC for Random Forest
self.logger_object.log('AUC for svc' + str(svc_score))
y_score = svc_clf.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.score.append(svc_score)
AccurayGraph().save_accuracy_bar_graph(
model_name_list=self.model_name,
accuracy_score_list=self.score,
project_id=self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
x_label="Model List",
y_label="Accuracy score comparison {}".format(self.model_name),
title="Cluster " + str(cluster_no) + "Accuracy Score "
)
execution_model_comparison_id = str(uuid.uuid4())
for data in zip(self.model_name, self.score):
self.save_accuracy_data(model_name=data[0], score=data[1],
execution_model_comparision_id=execution_model_comparison_id)
return self.get_best_model_on_score(model_name=self.model_name, model=self.model, score=self.score)
except Exception as e:
model_finder = ModelFinderException(
"Failed in [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_model_credit_deaulter.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
"""phishing classifier"""
def get_best_params_for_svm_phising_classifier(self, train_x, train_y):
"""
Method Name: get_best_params_for_naive_bayes
Description: get the parameters for the SVM Algorithm which give the best accuracy.
Use Hyper Parameter Tuning.
Output: The model with the best parameters
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
self.logger_object.log('Entered the get_best_params_for_svm method of the Model_Finder class')
# initializing with different combination of parameters
param_grid = {"kernel": ['rbf', 'sigmoid'],
"C": [0.1, 0.5, 1.0],
"random_state": [0, 100, 200, 300]}
# Creating an object of the Grid Search class
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=5, verbose=3)
# finding the best parameters
grid.fit(train_x, train_y)
# extracting the best parameters
kernel = grid.best_params_['kernel']
c = grid.best_params_['C']
random_state = grid.best_params_['random_state']
# creating a new model with the best parameters
sv_classifier = SVC(kernel=kernel, C=c, random_state=random_state, probability=True)
# training the mew model
sv_classifier.fit(train_x, train_y)
self.logger_object.log('SVM best params: ' + str(
grid.best_params_) + '. Exited the get_best_params_for_svm method of the Model_Finder class')
return sv_classifier
except Exception as e:
model_finder = ModelFinderException(
"Failed in [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_params_for_svm_phising_classifier.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
def get_best_params_for_xgboost_phising_classifier(self, train_x, train_y):
"""
Method Name: get_best_params_for_xgboost
Description: get the parameters for XGBoost Algorithm which give the best accuracy.
Use Hyper Parameter Tuning.
Output: The model with the best parameters
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
self.logger_object.log('Entered the get_best_params_for_xgboost method of the Model_Finder class')
# initializing with different combination of parameters
param_grid_xgboost = {
"n_estimators": [100, 130], "criterion": ['gini', 'entropy'],
"max_depth": range(8, 10, 1)
}
# Creating an object of the Grid Search class
grid = GridSearchCV(XGBClassifier(objective='binary:logistic'), param_grid_xgboost, verbose=3,
cv=5)
# finding the best parameters
grid.fit(train_x, train_y)
# extracting the best parameters
criterion = grid.best_params_['criterion']
max_depth = grid.best_params_['max_depth']
n_estimators = grid.best_params_['n_estimators']
# creating a new model with the best parameters
xgb = XGBClassifier(criterion=criterion, max_depth=max_depth, n_estimators=n_estimators,
n_jobs=-1)
# training the mew model
xgb.fit(train_x, train_y)
self.logger_object.log('XGBoost best params: ' + str(
grid.best_params_) + '. Exited the get_best_params_for_xgboost method of the Model_Finder class')
return xgb
except Exception as e:
model_finder = ModelFinderException(
"Failed in [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_params_for_xgboost_phising_classifier.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
def get_best_model_phising_classifier(self, train_x, train_y, test_x, test_y, cluster_no):
"""
Method Name: get_best_model
Description: Find out the Model which has the best AUC score.
Output: The best model name and the model object
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
# create best model for XGBoost
try:
self.logger_object.log('Entered the get_best_model method of the Model_Finder class')
title_generator = " Cluster " + cluster_no + " model {}"
# XG Boost model
self.model_name.append('XG_BOOST')
title = title_generator.format('XG_BOOST')
self.logger_object.log('Entered the get_best_model method of the Model_Finder class')
xgboost = self.get_best_params_for_xgboost_phising_classifier(train_x, train_y)
prediction_xgboost = xgboost.predict(test_x) # Predictions using the XGBoost Model
if len(test_y.unique()) == 1: # if there is only one label in y, then roc_auc_score returns error. We
# will use accuracy in that case
xgboost_score = accuracy_score(test_y, prediction_xgboost)
self.logger_object.log('Accuracy for XGBoost:' + str(xgboost_score)) # Log AUC
else:
xgboost_score = roc_auc_score(test_y, prediction_xgboost) # AUC for XGBoost
self.logger_object.log('AUC for XGBoost:' + str(xgboost_score)) # Log AUC
y_score = xgboost.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.model.append(xgboost)
self.score.append(xgboost_score)
# create best model for naive bayes
self.model_name.append('NAIVE_BAYES')
title = title_generator.format('NAIVE_BAYES')
naive_bayes = self.get_best_params_for_naive_bayes(train_x, train_y)
prediction_naive_bayes = naive_bayes.predict(test_x) # prediction using the Random Forest Algorithm
self.model.append(naive_bayes)
if len(test_y.unique()) == 1: # if there is only one label in y,
# then roc_auc_score returns error. We will use accuracy in that case
naive_bayes_score = accuracy_score(test_y, prediction_naive_bayes)
self.logger_object.log('Accuracy for naive bayes score' + str(naive_bayes_score))
else:
naive_bayes_score = roc_auc_score(test_y, prediction_naive_bayes) # AUC for Random Forest
self.logger_object.log('AUC for naive bayes score:' + str(naive_bayes_score))
y_score = naive_bayes.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[0])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.score.append(naive_bayes_score)
# create best model for Random forest
self.model_name.append('Random_Forest')
title = title_generator.format('Random_Forest')
random_forest = self.get_best_params_for_random_forest(train_x, train_y)
prediction_random_forest = random_forest.predict(test_x)
self.model.append(random_forest)
if len(test_y.unique()) == 1:
random_forest_score = accuracy_score(test_y, prediction_random_forest)
self.logger_object.log('Accuracy for Random Forest' + str(random_forest_score))
else:
random_forest_score = roc_auc_score(test_y, prediction_random_forest) # AUC for Random Forest
self.logger_object.log('AUC for Random Forest' + str(random_forest_score))
y_score = random_forest.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.score.append(random_forest_score)
# create best model for KNN
self.model_name.append('KNN')
title = title_generator.format('KNN')
knn_clf = self.get_best_params_for_KNN(train_x, train_y)
prediction_knn = knn_clf.predict(test_x)
self.model.append(knn_clf)
if len(test_y.unique()) == 1:
knn_score = accuracy_score(test_y, prediction_knn)
self.logger_object.log('Accuracy for KNN clf' + str(knn_score))
else:
knn_score = roc_auc_score(test_y, prediction_knn) # AUC for Random Forest
self.logger_object.log('AUC for KNN' + str(knn_score))
y_score = knn_clf.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.score.append(knn_score)
if len(test_y.unique()) != 1:
""" 5. SVC """
self.model_name.append("SVC")
title = title_generator.format("SVC")
svc_clf = self.get_best_params_for_svm_phising_classifier(train_x, train_y)
prediction_svc = svc_clf.predict(test_x)
self.model.append(svc_clf)
if len(test_y.unique()) == 1:
svc_score = accuracy_score(test_y, prediction_svc)
self.logger_object.log('Accuracy for svc clf' + str(svc_score))
else:
svc_score = roc_auc_score(test_y, prediction_svc) # AUC for Random Forest
self.logger_object.log('AUC for svc' + str(svc_score))
y_score = svc_clf.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.score.append(svc_score)
AccurayGraph().save_accuracy_bar_graph(
model_name_list=self.model_name,
accuracy_score_list=self.score,
project_id=self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
x_label="Model List",
y_label="Accuracy score comparison {}".format(self.model_name),
title="Cluster " + str(cluster_no) + "Accuracy Score "
)
execution_model_comparison_id = str(uuid.uuid4())
for data in zip(self.model_name, self.score):
self.save_accuracy_data(model_name=data[0], score=data[1],
execution_model_comparision_id=execution_model_comparison_id)
return self.get_best_model_on_score(model_name=self.model_name, model=self.model, score=self.score)
except Exception as e:
model_finder = ModelFinderException(
"Failed in [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_model_phising_classifier.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
"""Forest cover classifier """
def get_best_params_for_random_forest_forest_cover_clf(self, train_x, train_y):
"""
Method Name: get_best_params_for_random_forest
Description: get the parameters for Random Forest Algorithm which give the best accuracy.
Use Hyper Parameter Tuning.
Output: The model with the best parameters
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
self.logger_object.log('Entered the get_best_params_for_random_forest method of the Model_Finder class')
# initializing with different combination of parameters
param_grid = {"n_estimators": [10, 50, 100, 130], "criterion": ['gini', 'entropy'],
"max_depth": range(2, 4, 1), "max_features": ['auto', 'log2']}
# Creating an object of the Grid Search class
grid = GridSearchCV(estimator=RandomForestClassifier(), param_grid=param_grid, cv=5, verbose=3, n_jobs=-1)
# finding the best parameters
grid.fit(train_x, train_y)
# extracting the best parameters
criterion = grid.best_params_['criterion']
max_depth = grid.best_params_['max_depth']
max_features = grid.best_params_['max_features']
n_estimators = grid.best_params_['n_estimators']
# creating a new model with the best parameters
clf = RandomForestClassifier(n_estimators=n_estimators, criterion=criterion,
max_depth=max_depth, max_features=max_features)
# training the mew model
clf.fit(train_x, train_y)
self.logger_object.log('Random Forest best params: ' + str(
grid.best_params_) + '. Exited the get_best_params_for_random_forest method of the Model_Finder class')
return clf
except Exception as e:
model_finder = ModelFinderException(
"Failed in [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_params_for_random_forest_forest_cover_clf.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
def get_best_params_for_xgboost_forest_cover_clf(self, train_x, train_y):
"""
Method Name: get_best_params_for_xgboost
Description: get the parameters for XGBoost Algorithm which give the best accuracy.
Use Hyper Parameter Tuning.
Output: The model with the best parameters
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
self.logger_object.log('Entered the get_best_params_for_xgboost method of the Model_Finder class')
# initializing with different combination of parameters
param_grid_xgboost = {
'learning_rate': [0.5, 0.1, 0.01, 0.001],
'max_depth': [3, 5, 10, 20],
'n_estimators': [10, 50, 100, 200]
}
# Creating an object of the Grid Search class
grid = GridSearchCV(XGBClassifier(objective='multi:softprob'), param_grid_xgboost, verbose=3, cv=5,
n_jobs=-1)
# finding the best parameters
grid.fit(train_x, train_y)
# extracting the best parameters
learning_rate = grid.best_params_['learning_rate']
max_depth = grid.best_params_['max_depth']
n_estimators = grid.best_params_['n_estimators']
# creating a new model with the best parameters
xgb = XGBClassifier(learning_rate=learning_rate, max_depth=max_depth, n_estimators=n_estimators)
# training the mew model
xgb.fit(train_x, train_y)
self.logger_object.log('XGBoost best params: ' + str(
grid.best_params_) + '. Exited the get_best_params_for_xgboost method of the Model_Finder class')
return xgb
except Exception as e:
model_finder = ModelFinderException(
"Failed in [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_params_for_xgboost_forest_cover_clf.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
def get_best_model_forest_cover(self, train_x, train_y, test_x, test_y, cluster_no=None):
"""
Method Name: get_best_model
Description: Find out the Model which has the best AUC score.
Output: The best model name and the model object
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
# create best model for XGBoost
try:
self.logger_object.log('Entered the get_best_model method of the Model_Finder class')
if cluster_no is not None:
title_generator = " Cluster " + cluster_no + " model {}"
else:
title_generator = "Model {}"
# XG Boost model
self.model_name.append('XG_BOOST')
title = title_generator.format('XG_BOOST')
self.logger_object.log('Entered the get_best_model method of the Model_Finder class')
xgboost = self.get_best_params_for_xgboost(train_x, train_y)
prediction_xgboost = xgboost.predict(test_x) # Predictions using the XGBoost Model
if len(test_y.unique()) == 1: # if there is only one label in y, then roc_auc_score returns error. We
# will use accuracy in that case
xgboost_score = accuracy_score(test_y, prediction_xgboost)
self.logger_object.log('Accuracy for XGBoost:' + str(xgboost_score)) # Log AUC
else:
y_scores = xgboost.predict_proba(test_x)
AccurayGraph().save_plot_multiclass_roc_curve(test_y, y_scores, xgboost,
project_id=self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title="XGBoost ROC curve"
)
xgboost_score = roc_auc_score(test_y, y_scores, multi_class='ovr') # AUC for XGBoost
self.logger_object.log('AUC for XGBoost:' + str(xgboost_score)) # Log AUC
self.model.append(xgboost)
self.score.append(xgboost_score)
# create best model for naive bayes
self.model_name.append('NAIVE_BAYES')
title = title_generator.format('NAIVE_BAYES')
naive_bayes = self.get_best_params_for_naive_bayes(train_x, train_y)
prediction_naive_bayes = naive_bayes.predict(test_x) # prediction using the Random Forest Algorithm
self.model.append(naive_bayes)
if len(test_y.unique()) == 1: # if there is only one label in y,
# then roc_auc_score returns error. We will use accuracy in that case
naive_bayes_score = accuracy_score(test_y, prediction_naive_bayes)
self.logger_object.log('Accuracy for naive bayes score' + str(naive_bayes_score))
else:
y_scores = naive_bayes.predict_proba(test_x)
AccurayGraph().save_plot_multiclass_roc_curve(test_y, y_scores, naive_bayes,
project_id=self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title + self.model_name[-1]
)
naive_bayes_score = roc_auc_score(test_y, y_scores,
multi_class='ovr') # AUC for Random Forest
self.logger_object.log('AUC for naive bayes score:' + str(naive_bayes_score))
self.score.append(naive_bayes_score)
# create best model for Random forest
self.model_name.append('Random_Forest')
title = title_generator.format('Random_Forest')
random_forest = self.get_best_params_for_random_forest(train_x, train_y)
prediction_random_forest = random_forest.predict(test_x)
self.model.append(random_forest)
if len(test_y.unique()) == 1:
random_forest_score = accuracy_score(test_y, prediction_random_forest)
self.logger_object.log('Accuracy for Random Forest' + str(random_forest_score))
else:
y_scores = random_forest.predict_proba(test_x)
AccurayGraph().save_plot_multiclass_roc_curve(test_y, y_scores, random_forest,
project_id=self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title + self.model_name[-1]
)
random_forest_score = roc_auc_score(test_y, y_scores,
multi_class='ovr') # AUC for Random Forest
self.logger_object.log('AUC for Random Forest' + str(random_forest_score))
self.score.append(random_forest_score)
# create best model for KNN
self.model_name.append('KNN')
title = title_generator.format('KNN')
knn_clf = self.get_best_params_for_KNN(train_x, train_y)
prediction_knn = knn_clf.predict(test_x)
self.model.append(knn_clf)
if len(test_y.unique()) == 1:
knn_score = accuracy_score(test_y, prediction_knn)
self.logger_object.log('Accuracy for KNN clf' + str(knn_score))
else:
y_scores = knn_clf.predict_proba(test_x)
AccurayGraph().save_plot_multiclass_roc_curve(test_y, y_scores, knn_clf,
project_id=self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title + self.model_name[-1]
)
knn_score = roc_auc_score(test_y, y_scores, multi_class='ovr') # AUC for Random Forest
self.logger_object.log('AUC for KNN' + str(knn_score))
self.score.append(knn_score)
""" 5. SVC """
if len(test_y.unique()) != 1:
self.model_name.append("SVC")
title = title_generator.format("SVC")
svc_clf = self.get_best_params_for_svm_fraud_detection_and_scania(train_x, train_y)
prediction_svc = svc_clf.predict(test_x)
self.model.append(svc_clf)
if len(test_y.unique()) == 1:
svc_score = accuracy_score(test_y, prediction_svc)
self.logger_object.log('Accuracy for svc clf' + str(svc_score))
else:
y_scores = svc_clf.predict_proba(test_x)
AccurayGraph().save_plot_multiclass_roc_curve(test_y, y_scores, svc_clf,
project_id=self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title + self.model_name[-1]
)
svc_score = roc_auc_score(test_y, y_scores, multi_class='ovr') # AUC for Random Forest
self.logger_object.log('AUC for svc' + str(svc_score))
self.score.append(svc_score)
AccurayGraph().save_accuracy_bar_graph(
model_name_list=self.model_name,
accuracy_score_list=self.score,
project_id=self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
x_label="Model List",
y_label="Accuracy score comparison {}".format(self.model_name),
title="Accuracy Score "
)
execution_model_comparison_id = str(uuid.uuid4())
for data in zip(self.model_name, self.score):
self.save_accuracy_data(model_name=data[0], score=data[1],
execution_model_comparision_id=execution_model_comparison_id)
# comparing the two models
return self.get_best_model_on_score(model_name=self.model_name, model=self.model, score=self.score)
except Exception as e:
model_finder = ModelFinderException(
"Failed in [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_model_forest_cover.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
def get_best_model_scania_truck(self, train_x, train_y, test_x, test_y, cluster_no=None):
"""
Method Name: get_best_model
Description: Find out the Model which has the best AUC score.
Output: The best model name and the model object
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
# create best model for XGBoost
try:
self.logger_object.log('Entered the get_best_model method of the Model_Finder class')
if cluster_no is not None:
title_generator = " Cluster " + cluster_no + " model {}"
else:
title_generator = "Model {}"
# XG Boost model
self.model_name.append('XG_BOOST')
title = title_generator.format('XG_BOOST')
xgboost = self.get_best_params_for_xgboost(train_x, train_y)
prediction_xgboost = xgboost.predict(test_x) # Predictions using the XGBoost Model
if len(test_y.unique()) == 1: # if there is only one label in y, then roc_auc_score returns error. We
# will use accuracy in that case
xgboost_score = accuracy_score(test_y, prediction_xgboost)
self.logger_object.log('Accuracy for XGBoost:' + str(xgboost_score)) # Log AUC
else:
xgboost_score = roc_auc_score(test_y, prediction_xgboost) # AUC for XGBoost
self.logger_object.log('AUC for XGBoost:' + str(xgboost_score)) # Log AUC
y_score = xgboost.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.model.append(xgboost)
self.score.append(xgboost_score)
"""
# create best model for naive bayes
self.model_name.append('NAIVE_BAYES')
title = title_generator.format('NAIVE_BAYES')
naive_bayes = self.get_best_params_for_naive_bayes(train_x, train_y)
prediction_naive_bayes = naive_bayes.predict(test_x) # prediction using the Random Forest Algorithm
self.model.append(naive_bayes)
if len(test_y.unique()) == 1: # if there is only one label in y,
# then roc_auc_score returns error. We will use accuracy in that case
naive_bayes_score = accuracy_score(test_y, prediction_naive_bayes)
self.logger_object.log('Accuracy for naive bayes score' + str(naive_bayes_score))
else:
naive_bayes_score = roc_auc_score(test_y, prediction_naive_bayes) # AUC for Random Forest
self.logger_object.log('AUC for naive bayes score:' + str(naive_bayes_score))
y_score = naive_bayes.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[0])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.score.append(naive_bayes_score)
"""
# create best model for Random forest
self.model_name.append('Random_Forest')
title = title_generator.format('Random_Forest')
random_forest = self.get_best_params_for_random_forest(train_x, train_y)
prediction_random_forest = random_forest.predict(test_x)
self.model.append(random_forest)
if len(test_y.unique()) == 1:
random_forest_score = accuracy_score(test_y, prediction_random_forest)
self.logger_object.log('Accuracy for Random Forest' + str(random_forest_score))
else:
random_forest_score = roc_auc_score(test_y, prediction_random_forest) # AUC for Random Forest
self.logger_object.log('AUC for Random Forest' + str(random_forest_score))
y_score = random_forest.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.score.append(random_forest_score)
"""
# create best model for KNN
self.model_name.append('KNN')
title = title_generator.format('KNN')
knn_clf = self.get_best_params_for_KNN(train_x, train_y)
prediction_knn = knn_clf.predict(test_x)
self.model.append(knn_clf)
if len(test_y.unique()) == 1:
knn_score = accuracy_score(test_y, prediction_knn)
self.logger_object.log('Accuracy for KNN clf' + str(knn_score))
else:
knn_score = roc_auc_score(test_y, prediction_knn) # AUC for Random Forest
self.logger_object.log('AUC for KNN' + str(knn_score))
y_score = knn_clf.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.score.append(knn_score)
5. SVC
if len(test_y.unique()) != 1:
self.model_name.append("SVC")
title = title_generator.format("SVC")
svc_clf = self.get_best_params_for_svm_fraud_detection_and_scania(train_x, train_y)
prediction_svc = svc_clf.predict(test_x)
self.model.append(svc_clf)
if len(test_y.unique()) == 1:
svc_score = accuracy_score(test_y, prediction_svc)
self.logger_object.log('Accuracy for svc clf' + str(svc_score))
else:
svc_score = roc_auc_score(test_y, prediction_svc) # AUC for Random Forest
self.logger_object.log('AUC for svc' + str(svc_score))
y_score = svc_clf.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.score.append(svc_score)
"""
AccurayGraph().save_accuracy_bar_graph(
model_name_list=self.model_name,
accuracy_score_list=self.score,
project_id=self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
x_label="Model List",
y_label="Accuracy score comparison {}".format(self.model_name),
title="Accuracy Score "
)
execution_model_comparison_id = str(uuid.uuid4())
for data in zip(self.model_name, self.score):
self.save_accuracy_data(model_name=data[0], score=data[1],
execution_model_comparision_id=execution_model_comparison_id)
# comparing the two models
return self.get_best_model_on_score(model_name=self.model_name, model=self.model, score=self.score)
except Exception as e:
model_finder = ModelFinderException(
"Failed in [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_model_forest_cover.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
def get_best_params_for_Random_Forest_Regressor(self, train_x, train_y):
"""
Method Name: get_best_params_for_Random_Forest_Regressor
Description: get the parameters for Random_Forest_Regressor Algorithm which give the best accuracy.
Use Hyper Parameter Tuning.
Output: The model with the best parameters
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
self.logger_object.log('Entered the RandomForestReg method of the Model_Finder class')
# initializing with different combination of parameters
param_grid_random_forest_tree = {
"n_estimators": [10, 20, 30],
"max_features": ["auto", "sqrt", "log2"],
"min_samples_split": [2, 4, 8],
"bootstrap": [True, False]
}
# Creating an object of the Grid Search class
grid = GridSearchCV(RandomForestRegressor(), param_grid_random_forest_tree, verbose=3, cv=5)
# finding the best parameters
grid.fit(train_x, train_y)
# extracting the best parameters
n_estimators = grid.best_params_['n_estimators']
max_features = grid.best_params_['max_features']
min_samples_split = grid.best_params_['min_samples_split']
bootstrap = grid.best_params_['bootstrap']
# creating a new model with the best parameters
random_forest_reg = RandomForestRegressor(n_estimators=n_estimators,
max_features=max_features,
min_samples_split=min_samples_split,
bootstrap=bootstrap)
# training the mew models
random_forest_reg.fit(train_x, train_y)
self.logger_object.log('RandomForestReg best params: ' + str(
grid.best_params_) + '. Exited the RandomForestReg method of the Model_Finder class')
return random_forest_reg
except Exception as e:
model_finder = ModelFinderException(
"Failed in [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_params_for_Random_Forest_Regressor.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
def get_best_params_for_linearReg(self, train_x, train_y):
"""
Method Name: get_best_params_for_linearReg
Description: get the parameters for LinearReg Algorithm which give the best accuracy.
Use Hyper Parameter Tuning.
Output: The model with the best parameters
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
self.logger_object.log('Entered the get_best_params_for_linearReg method of the Model_Finder class')
# initializing with different combination of parameters
param_grid_linear_reg = {
'fit_intercept': [True, False], 'normalize': [True, False], 'copy_X': [True, False]
}
# Creating an object of the Grid Search class
grid = GridSearchCV(LinearRegression(), param_grid_linear_reg, verbose=3, cv=5)
# finding the best parameters
grid.fit(train_x, train_y)
# extracting the best parameters
fit_intercept = grid.best_params_['fit_intercept']
normalize = grid.best_params_['normalize']
copy_x = grid.best_params_['copy_X']
# creating a new model with the best parameters
lin_reg = LinearRegression(fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_x)
# training the mew model
lin_reg.fit(train_x, train_y)
self.logger_object.log('LinearRegression best params: ' + str(
grid.best_params_) + '. Exited the get_best_params_for_linearReg method of the Model_Finder class')
return lin_reg
except Exception as e:
model_finder = ModelFinderException(
"Failed in [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_params_for_linearReg.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
def get_best_model_for_reg(self, train_x, train_y, test_x, test_y, cluster_no=None):
"""
Method Name: get_best_model
Description: Find out the Model which has the best AUC score.
Output: The best model name and the model object
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
self.logger_object.log('Entered the get_best_model method of the Model_Finder class')
title = "Cluster {} ".format(cluster_no) if cluster_no is not None else ''
# Linear Regression Training
self.model_name.append("Linear_Regression")
linear_reg = self.get_best_params_for_linearReg(train_x, train_y)
prediction_linear_reg = linear_reg.predict(test_x) # Predictions using the LinearReg Model
linear_reg_error = r2_score(test_y, prediction_linear_reg)
self.model.append(linear_reg)
self.score.append(linear_reg_error)
# Decision Tree training
self.model_name.append('Decision_Tree')
decision_tree_reg = self.get_best_params_for_decision_tree_regressor(train_x, train_y)
self.model.append(decision_tree_reg)
prediction_decision_tree_reg = decision_tree_reg.predict(
test_x) # Predictions using the decisionTreeReg Model
decision_tree_reg_error = r2_score(test_y, prediction_decision_tree_reg)
self.score.append(decision_tree_reg_error)
self.logger_object.log("Decision tree regression r2 score {}".format(decision_tree_reg_error))
# create best model for XGBoost
self.model_name.append('XG_BOOST')
xgboost = self.get_best_params_for_xgboost_regressor(train_x, train_y)
prediction_xgboost = xgboost.predict(test_x) # Predictions using the XGBoost Model
self.model.append(xgboost)
prediction_xgboost_error = r2_score(test_y, prediction_xgboost)
self.logger_object.log("XGBoost regression r2 score {}".format(prediction_xgboost_error))
self.score.append(prediction_xgboost_error)
self.model_name.append("Random_Forest")
random_forest_reg = self.get_best_params_for_Random_Forest_Regressor(train_x, train_y)
self.model.append(random_forest_reg)
prediction_random_forest_reg = random_forest_reg.predict(test_x)
prediction_random_forest_error = r2_score(test_y, prediction_random_forest_reg)
self.score.append(prediction_random_forest_error)
self.logger_object.log("Random Forest regression r2 score {}".format(prediction_random_forest_error))
self.model_name.append("SVR")
sv_reg = self.get_best_params_for_support_vector_regressor(train_x, train_y)
self.model.append(sv_reg)
prediction_sv_reg = sv_reg.predict(test_x)
prediction_sv_reg_error = r2_score(test_y, prediction_sv_reg)
self.score.append(prediction_sv_reg_error)
self.logger_object.log("Support vector regression r2 score {}".format(prediction_sv_reg_error))
"""
Visualization begin based on above model
"""
prediction_value = [prediction_linear_reg,
prediction_decision_tree_reg,
prediction_xgboost,
prediction_random_forest_reg,
prediction_sv_reg]
for data in zip(self.model_name, prediction_value):
AccurayGraph().save_scatter_plot(x_axis_data=test_y, y_axis_data=data[1],
project_id=self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
x_label="True Target values", y_label="Predicted Target value",
title=title + "Predicted vs True " + data[0])
AccurayGraph().save_distribution_plot(data=numpy.abs(test_y - data[1]),
label="Residual distribution plot",
project_id=self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
x_label="Error ",
y_label="frequency or occurance",
title=title + "{} residual distribution plot".format(data[0])
)
mean_abs_error = []
for data in prediction_value:
mean_abs_error.append(numpy.mean(numpy.abs(test_y - data)))
AccurayGraph().save_accuracy_bar_graph(
model_name_list=self.model_name,
accuracy_score_list=mean_abs_error,
project_id=self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
x_label="Model List",
y_label="MAE comparison between {}".format(self.model_name),
title=title + "Mean Absolute error "
)
# saving accuracy data based on model on mongo db
execution_model_comparison_id = str(uuid.uuid4())
for data in zip(self.model_name, self.score):
self.save_accuracy_data(model_name=data[0], score=data[1],
execution_model_comparision_id=execution_model_comparison_id)
return self.get_best_model_on_score(model_name=self.model_name, model=self.model, score=self.score)
except Exception as e:
model_finder = ModelFinderException(
"Failed in [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_model_for_reg.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
def get_best_params_for_decision_tree_regressor(self, train_x, train_y):
"""
Method Name: get_best_params_for_DecisionTreeRegressor
Description: get the parameters for DecisionTreeRegressor Algorithm which give the best accuracy.
Use Hyper Parameter Tuning.
Output: The model with the best parameters
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
self.logger_object.log(
'Entered the get_best_params_for_DecisionTreeRegressor method of the Model_Finder class')
# initializing with different combination of parameters
param_grid_decision_tree = {"criterion": ["mse", "friedman_mse", "mae"],
"splitter": ["best", "random"],
"max_features": ["auto", "sqrt", "log2"],
'max_depth': range(2, 16, 2),
'min_samples_split': range(2, 16, 2)
}
# Creating an object of the Grid Search class
grid = GridSearchCV(DecisionTreeRegressor(), param_grid_decision_tree, verbose=3, cv=5)
# finding the best parameters
grid.fit(train_x, train_y)
# extracting the best parameters
criterion = grid.best_params_['criterion']
splitter = grid.best_params_['splitter']
max_features = grid.best_params_['max_features']
max_depth = grid.best_params_['max_depth']
min_samples_split = grid.best_params_['min_samples_split']
# creating a new model with the best parameters
decision_tree_reg = DecisionTreeRegressor(criterion=criterion, splitter=splitter,
max_features=max_features, max_depth=max_depth,
min_samples_split=min_samples_split)
# training the mew models
decision_tree_reg.fit(train_x, train_y)
self.logger_object.log('Decision Tree repressor ' + str(
grid.best_params_) + '. exited decision tree the Model_Finder class')
return decision_tree_reg
except Exception as e:
model_finder = ModelFinderException(
"Failed in [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_params_for_decision_tree_regressor.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
def get_best_params_for_xgboost_regressor(self, train_x, train_y):
"""
Method Name: get_best_params_for_xgboost
Description: get the parameters for XGBoost Algorithm which give the best accuracy.
Use Hyper Parameter Tuning.
Output: The model with the best parameters
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
self.logger_object.log('Entered the get_best_params_for_xgboost method of the Model_Finder class')
# initializing with different combination of parameters
param_grid_xgboost = {
'learning_rate': [0.5, 0.1, 0.01, 0.001],
'max_depth': [3, 5, 10, 20],
'n_estimators': [10, 50, 100, 200]
}
# Creating an object of the Grid Search class
grid = GridSearchCV(XGBRegressor(objective='reg:squarederror'), param_grid_xgboost, verbose=3,
cv=5)
# finding the best parameters
grid.fit(train_x, train_y)
# extracting the best parameters
learning_rate = grid.best_params_['learning_rate']
max_depth = grid.best_params_['max_depth']
n_estimators = grid.best_params_['n_estimators']
# creating a new model with the best parameters objective='reg:linear'
xgb = XGBRegressor(objective='reg:squarederror', learning_rate=learning_rate,
max_depth=max_depth,
n_estimators=n_estimators)
# training the mew model
xgb.fit(train_x, train_y)
self.logger_object.log('XGBoost best params: ' + str(
grid.best_params_) + '. Exited the get_best_params_for_xgboost method of the Model_Finder class')
return xgb
except Exception as e:
model_finder = ModelFinderException(
"Failed in [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_params_for_xgboost_regressor.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
def get_best_model_zomato_or_fitbit_or_climate_visibility(self, train_x, train_y, test_x, test_y, cluster_no=None):
"""
Method Name: get_best_model
Description: Find out the Model which has the best AUC score.
Output: The best model name and the model object
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
# create best model for KNN
try:
title = "Cluster {} ".format(cluster_no) if cluster_no is not None else ''
self.model_name.append('Decision_Tree')
self.logger_object.log('Entered the get_best_model method of the Model_Finder class')
decision_tree_reg = self.get_best_params_for_decision_tree_regressor(train_x, train_y)
self.model.append(decision_tree_reg)
prediction_decision_tree_reg = decision_tree_reg.predict(
test_x) # Predictions using the decisionTreeReg Model
decision_tree_reg_error = r2_score(test_y, prediction_decision_tree_reg)
self.score.append(decision_tree_reg_error)
self.logger_object.log("Decision tree regression r2 score {}".format(decision_tree_reg_error))
# create best model for XGBoost
self.model_name.append('XG_BOOST')
xgboost = self.get_best_params_for_xgboost_regressor(train_x, train_y)
prediction_xgboost = xgboost.predict(test_x) # Predictions using the XGBoost Model
self.model.append(xgboost)
prediction_xgboost_error = r2_score(test_y, prediction_xgboost)
self.logger_object.log("XGBoost regression r2 score {}".format(prediction_xgboost_error))
self.score.append(prediction_xgboost_error)
self.model_name.append('RIDGE_REG')
ridge_regression = self.get_best_params_for_ridge_regression(train_x, train_y)
self.model.append(ridge_regression)
prediction_ridge_regression = ridge_regression.predict(test_x)
prediction_ridge_error = r2_score(test_y, prediction_ridge_regression)
self.score.append(prediction_ridge_error)
self.logger_object.log("RIDGE_REG regression r2 score {}".format(prediction_ridge_error))
self.model_name.append("Random_Forest")
random_forest_reg = self.get_best_params_for_Random_Forest_Regressor(train_x, train_y)
self.model.append(random_forest_reg)
prediction_random_forest_reg = random_forest_reg.predict(test_x)
prediction_random_forest_error = r2_score(test_y, prediction_random_forest_reg)
self.score.append(prediction_random_forest_error)
self.logger_object.log("Random Forest regression r2 score {}".format(prediction_ridge_error))
self.model_name.append("SVR")
sv_reg = self.get_best_params_for_support_vector_regressor(train_x, train_y)
self.model.append(sv_reg)
prediction_sv_reg = sv_reg.predict(test_x)
prediction_sv_reg_error = r2_score(test_y, prediction_sv_reg)
self.score.append(prediction_sv_reg_error)
self.logger_object.log("Support vector regression r2 score {}".format(prediction_ridge_error))
"""
Visualization begin based on above model
"""
prediction_value = [prediction_decision_tree_reg,
prediction_xgboost,
prediction_ridge_regression,
prediction_random_forest_reg,
prediction_sv_reg]
for data in zip(self.model_name, prediction_value):
AccurayGraph().save_scatter_plot(x_axis_data=test_y, y_axis_data=data[1],
project_id=self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
x_label="True Target values", y_label="Predicted Target value",
title=title + "Predicted vs True " + data[0])
AccurayGraph().save_distribution_plot(data=numpy.abs(test_y - data[1]),
label="Residual distribution plot",
project_id=self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
x_label="Error ",
y_label="frequency or occurrence",
title=title + "{} residual distribution plot".format(data[0])
)
mean_abs_error = []
for data in prediction_value:
mean_abs_error.append(numpy.mean(numpy.abs(test_y - data)))
AccurayGraph().save_accuracy_bar_graph(
model_name_list=self.model_name,
accuracy_score_list=mean_abs_error,
project_id=self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
x_label="Model List",
y_label="MAE comparison between {}".format(self.model_name),
title=title + "Mean Absolute error "
)
execution_model_comparison_id = str(uuid.uuid4())
for data in zip(self.model_name, self.score):
self.save_accuracy_data(model_name=data[0], score=data[1],
execution_model_comparision_id=execution_model_comparison_id)
return self.get_best_model_on_score(model_name=self.model_name, model=self.model, score=self.score)
except Exception as e:
model_finder = ModelFinderException(
"Failed in [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_model_zomato_or_fitbit_or_climate_visibility.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
def get_best_params_for_naive_bayes(self, train_x, train_y):
"""
Method Name: get_best_params_for_naive_bayes
Description: get the parameters for the Naive Bayes's Algorithm which give the best accuracy.
Use Hyper Parameter Tuning.
Output: The model with the best parameters
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
self.logger_object.log('Entered the get_best_params_for_naive_bayes method of the Model_Finder class')
# initializing with different combination of parameters
param_grid = {"var_smoothing": [1e-9, 0.1, 0.001, 0.5, 0.05, 0.01, 1e-8, 1e-7, 1e-6, 1e-10, 1e-11]}
# Creating an object of the Grid Search class
grid = GridSearchCV(estimator=self.gnb, param_grid=param_grid, cv=5, verbose=3)
# finding the best parameters
grid.fit(train_x, train_y)
# extracting the best parameters
var_smoothing = grid.best_params_['var_smoothing']
# creating a new model with the best parameters
gnb = GaussianNB(var_smoothing=var_smoothing)
# training the mew model
gnb.fit(train_x, train_y)
self.logger_object.log('Naive Bayes best params: ' + str(
grid.best_params_) + '. Exited the get_best_params_for_naive_bayes method of the Model_Finder class')
return gnb
except Exception as e:
model_finder = ModelFinderException(
"Failed in [{0}] class [{1}] method [{2}]".format(self.__module__, ModelFinder.__name__,
self.get_best_params_for_naive_bayes.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
def get_best_params_for_xgboost_income_prediction(self, train_x, train_y):
"""
Method Name: get_best_params_for_xgboost
Description: get the parameters for XGBoost Algorithm which give the best accuracy.
Use Hyper Parameter Tuning.
Output: The model with the best parameters
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
self.logger_object.log('Entered the get_best_params_for_xgboost method of the Model_Finder class')
# initializing with different combination of parameters
param_grid_xgboost = {
"n_estimators": [100, 130], "criterion": ['gini', 'entropy'],
"max_depth": range(8, 10, 1)
}
# Creating an object of the Grid Search class
grid = GridSearchCV(XGBClassifier(objective='binary:logistic'), param_grid_xgboost, verbose=3,
cv=5)
# finding the best parameters
grid.fit(train_x, train_y)
# extracting the best parameters
criterion = grid.best_params_['criterion']
max_depth = grid.best_params_['max_depth']
n_estimators = grid.best_params_['n_estimators']
# creating a new model with the best parameters
xgb = XGBClassifier(criterion=criterion, max_depth=max_depth, n_estimators=n_estimators,
n_jobs=-1)
# training the mew model
xgb.fit(train_x, train_y)
self.logger_object.log('XGBoost best params: ' + str(
grid.best_params_) + '. Exited the get_best_params_for_xgboost method of the Model_Finder class')
return xgb
except Exception as e:
model_finder = ModelFinderException("Failed in [{0}] class [{1}] method [{2}]"
.format(self.__module__,
ModelFinder.__name__,
self.get_best_params_for_xgboost_income_prediction.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
def get_best_model_income_prediction(self, train_x, train_y, test_x, test_y, cluster_number):
"""
Method Name: get_best_model
Description: Find out the Model which has the best AUC score.
Output: The best model name and the model object
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
# create best model for XGBoost
try:
title_generator = " Cluster " + cluster_number + " model {}"
# XG Boost model
self.model_name.append('XG_BOOST')
title = title_generator.format('XG_BOOST')
self.logger_object.log('Entered the get_best_model method of the Model_Finder class')
xgboost = self.get_best_params_for_xgboost_income_prediction(train_x, train_y)
prediction_xgboost = xgboost.predict(test_x) # Predictions using the XGBoost Model
if len(test_y.unique()) == 1: # if there is only one label in y, then roc_auc_score returns error. We
# will use accuracy in that case
xgboost_score = accuracy_score(test_y, prediction_xgboost)
self.logger_object.log('Accuracy for XGBoost:' + str(xgboost_score)) # Log AUC
else:
xgboost_score = roc_auc_score(test_y, prediction_xgboost) # AUC for XGBoost
self.logger_object.log('AUC for XGBoost:' + str(xgboost_score)) # Log AUC
y_score = xgboost.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.model.append(xgboost)
self.score.append(xgboost_score)
# create best model for naive bayes
self.model_name.append('NAIVE_BAYES')
title = title_generator.format('NAIVE_BAYES')
naive_bayes = self.get_best_params_for_naive_bayes(train_x, train_y)
prediction_naive_bayes = naive_bayes.predict(test_x) # prediction using the Random Forest Algorithm
self.model.append(naive_bayes)
if len(test_y.unique()) == 1: # if there is only one label in y,
# then roc_auc_score returns error. We will use accuracy in that case
naive_bayes_score = accuracy_score(test_y, prediction_naive_bayes)
self.logger_object.log('Accuracy for naive bayes score' + str(naive_bayes_score))
else:
naive_bayes_score = roc_auc_score(test_y, prediction_naive_bayes) # AUC for Random Forest
self.logger_object.log('AUC for naive bayes score:' + str(naive_bayes_score))
y_score = naive_bayes.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[0])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.score.append(naive_bayes_score)
# create best model for Random forest
self.model_name.append('Random_Forest')
title = title_generator.format('Random_Forest')
random_forest = self.get_best_params_for_random_forest(train_x, train_y)
prediction_random_forest = random_forest.predict(test_x)
self.model.append(random_forest)
if len(test_y.unique()) == 1:
random_forest_score = accuracy_score(test_y, prediction_random_forest)
self.logger_object.log('Accuracy for Random Forest' + str(random_forest_score))
else:
random_forest_score = roc_auc_score(test_y, prediction_random_forest) # AUC for Random Forest
self.logger_object.log('AUC for Random Forest' + str(random_forest_score))
y_score = random_forest.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.score.append(random_forest_score)
# create best model for KNN
self.model_name.append('KNN')
title = title_generator.format('KNN')
knn_clf = self.get_best_params_for_KNN(train_x, train_y)
prediction_knn = knn_clf.predict(test_x)
self.model.append(knn_clf)
if len(test_y.unique()) == 1:
knn_score = accuracy_score(test_y, prediction_knn)
self.logger_object.log('Accuracy for KNN clf' + str(knn_score))
else:
knn_score = roc_auc_score(test_y, prediction_knn) # AUC for Random Forest
self.logger_object.log('AUC for KNN' + str(knn_score))
y_score = knn_clf.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.score.append(knn_score)
if len(test_y.unique()) != 1:
""" 5. SVC """
self.model_name.append("SVC")
title = title_generator.format("SVC")
svc_clf = self.get_best_params_for_svm_fraud_detection_and_scania(train_x, train_y)
prediction_svc = svc_clf.predict(test_x)
self.model.append(svc_clf)
if len(test_y.unique()) == 1:
svc_score = accuracy_score(test_y, prediction_svc)
self.logger_object.log('Accuracy for svc clf' + str(svc_score))
else:
svc_score = roc_auc_score(test_y, prediction_svc) # AUC for Random Forest
self.logger_object.log('AUC for svc' + str(svc_score))
y_score = svc_clf.predict_proba(test_x)[:, 1]
fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])
AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
title=title)
self.score.append(svc_score)
AccurayGraph().save_accuracy_bar_graph(
model_name_list=self.model_name,
accuracy_score_list=self.score,
project_id=self.project_id,
execution_id=self.logger_object.execution_id,
file_object=self.file_object,
x_label="Model List",
y_label="Accuracy score comparison {}".format(self.model_name),
title="Cluster " + str(cluster_number) + "Accuracy Score "
)
execution_model_comparison_id = str(uuid.uuid4())
for data in zip(self.model_name, self.score):
self.save_accuracy_data(model_name=data[0], score=data[1],
execution_model_comparision_id=execution_model_comparison_id)
# comparing the two models
return self.get_best_model_on_score(model_name=self.model_name, model=self.model, score=self.score)
except Exception as e:
model_finder = ModelFinderException(
"Failed in [{0}] class [{1}] method [{2}]"
.format(self.__module__, ModelFinder.__name__,
self.get_best_model_income_prediction.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
def get_best_model_on_score(self, model_name: list, model: list, score: list):
"""
:param model: models in list
:param model_name: Model name list
:param score: score list
:return: best model name and model
"""
try:
record = {'model_name': model_name, 'model': model, 'score': score}
df = pandas.DataFrame(record)
df.index = df.model_name
model_name = df.max()['model_name']
model = df.loc[model_name]['model']
return model_name, model
except Exception as e:
model_finder = ModelFinderException(
"Failed in [{0}] class [{1}] method [{2}]".format(self.__module__, ModelFinder.__name__,
self.get_best_model_on_score.__name__))
raise Exception(model_finder.error_message_detail(str(e), sys)) from e
| 55.377338 | 149 | 0.563362 |
f7552795b727ff23482fa68a2ce214af22fbac6f | 860 | py | Python | tests/data/dependencies_skill/__init__.py | valory-xyz/agents-aea | 8f38efa96041b0156ed1ae328178e395dbabf2fc | [
"Apache-2.0"
] | 126 | 2019-09-07T09:32:44.000Z | 2022-03-29T14:28:41.000Z | tests/data/dependencies_skill/__init__.py | valory-xyz/agents-aea | 8f38efa96041b0156ed1ae328178e395dbabf2fc | [
"Apache-2.0"
] | 1,814 | 2019-08-24T10:08:07.000Z | 2022-03-31T14:28:36.000Z | tests/data/dependencies_skill/__init__.py | valory-xyz/agents-aea | 8f38efa96041b0156ed1ae328178e395dbabf2fc | [
"Apache-2.0"
] | 46 | 2019-09-03T22:13:58.000Z | 2022-03-22T01:25:16.000Z | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains a skill to test dependencies format."""
| 40.952381 | 80 | 0.587209 |
f75534b57eb06254890ba1602ef00ae8f358488b | 1,008 | py | Python | src/data_collection/MatthiasWinkelmann_firstname_database.py | diabolical-ninja/AllTheNames | cdf8a181b80ee3250b76f30cd0b875368d60570c | [
"MIT"
] | null | null | null | src/data_collection/MatthiasWinkelmann_firstname_database.py | diabolical-ninja/AllTheNames | cdf8a181b80ee3250b76f30cd0b875368d60570c | [
"MIT"
] | null | null | null | src/data_collection/MatthiasWinkelmann_firstname_database.py | diabolical-ninja/AllTheNames | cdf8a181b80ee3250b76f30cd0b875368d60570c | [
"MIT"
] | null | null | null | """Firstnames Database from Github User MatthiasWinkelmann.
Source:
- https://github.com/MatthiasWinkelmann/firstname-database
"""
import sys
from pathlib import Path
import pandas as pd
sys.path.append(str(Path(__file__).parent.parent))
import utils as ut # noqa
names_url = "https://raw.githubusercontent.com/MatthiasWinkelmann/firstname-database/master/firstnames.csv" # noqa
names_df = pd.read_csv(names_url, sep=";")
# Original format is wide, with a column for each country. Normalise
names_df = pd.melt(names_df, id_vars=["name", "gender"])
# Remap column names
colnames_dict = {
"name": "first_name",
"gender": "gender",
"variable": "origin",
}
names_df = names_df[list(colnames_dict.keys())]
names_df.rename(columns=colnames_dict, inplace=True)
names_df["gender"] = names_df["gender"].apply(ut.remap_gender)
names_df["definition"] = pd.NA
# Save
names_df.to_csv(
"data/MatthiasWinkelmann_firstname_database.csv",
sep="|",
index=False,
encoding="utf-8",
)
| 24.585366 | 115 | 0.729167 |
f75541943a7c2c9b40e140af66050c9c3fd9094b | 1,760 | py | Python | models/encoder_cnn.py | sarrouti/VQG | eb9cbe3ba4f75d85fc55f5f1e746b1f2190f0b2b | [
"MIT"
] | 9 | 2020-08-08T17:48:08.000Z | 2022-03-06T06:51:50.000Z | models/encoder_cnn.py | sarrouti/VQG | eb9cbe3ba4f75d85fc55f5f1e746b1f2190f0b2b | [
"MIT"
] | 2 | 2020-07-04T03:08:36.000Z | 2021-03-22T09:16:21.000Z | models/encoder_cnn.py | sarrouti/VQG | eb9cbe3ba4f75d85fc55f5f1e746b1f2190f0b2b | [
"MIT"
] | 2 | 2020-07-24T02:29:42.000Z | 2021-11-21T20:02:22.000Z |
"""
Created on Tue Jun 23 20:15:11 2020
@author: sarroutim2
"""
"""Genearates a representation for an image input.
"""
import torch.nn as nn
import torch
import torchvision.models as models
class EncoderCNN(nn.Module):
"""Generates a representation for an image input.
"""
def __init__(self, output_size):
"""Load the pretrained ResNet-152 and replace top fc layer.
"""
super(EncoderCNN, self).__init__()
self.cnn = models.resnet50(pretrained=True)#resnet18
for param in self.cnn.parameters():
param.requires_grad = False
self.cnn.fc = nn.Linear(self.cnn.fc.in_features, output_size)
self.bn = nn.BatchNorm1d(output_size, momentum=0.01)
self.init_weights()
"""
super(EncoderCNN, self).__init__()
self.cnn = models.googlenet(pretrained=True)#resnet18
for param in self.cnn.parameters():
param.requires_grad = False
num_features = self.cnn.classifier[6].in_features
features = list(self.cnn.classifier.children())[:-1]
features.extend([nn.Linear(num_features, 512)])
self.cnn.classifier=nn.Sequential(*features)
#self.cnn.fc=nn.Sequential(*features)
self.cnn.fc = nn.Linear(512, output_size)
#self.cnn.classifier = nn.Sequential(*features)
self.bn = nn.BatchNorm1d(output_size, momentum=0.01)
self.init_weights()"""
def init_weights(self):
"""Initialize the weights.
"""
self.cnn.fc.weight.data.normal_(0.0, 0.02)
self.cnn.fc.bias.data.fill_(0)
def forward(self, images):
"""Extract the image feature vectors.
"""
features = self.cnn(images)
output = self.bn(features)
return output
| 30.877193 | 69 | 0.638636 |
f755629790bb8cde966f4ceb680ec5a084e18788 | 9,889 | py | Python | custom_components/hacs/hacsbase/data.py | Lucstricke/integration | 1543686f3d99c8f16ec4fc37b2edd70b2a3e29a5 | [
"MIT"
] | 1 | 2021-12-12T18:19:48.000Z | 2021-12-12T18:19:48.000Z | custom_components/hacs/hacsbase/data.py | Lucstricke/integration | 1543686f3d99c8f16ec4fc37b2edd70b2a3e29a5 | [
"MIT"
] | null | null | null | custom_components/hacs/hacsbase/data.py | Lucstricke/integration | 1543686f3d99c8f16ec4fc37b2edd70b2a3e29a5 | [
"MIT"
] | null | null | null | """Data handler for HACS."""
import asyncio
import os
from homeassistant.core import callback
from custom_components.hacs.helpers.classes.manifest import HacsManifest
from custom_components.hacs.helpers.functions.register_repository import (
register_repository,
)
from custom_components.hacs.helpers.functions.store import (
async_load_from_store,
async_save_to_store,
async_save_to_store_default_encoder,
get_store_for_key,
)
from custom_components.hacs.share import get_hacs
from custom_components.hacs.utils.logger import getLogger
def update_repository_from_storage(repository, storage_data):
"""Merge in data from storage into the repo data."""
repository.data.memorize_storage(storage_data)
repository.data.update_data(storage_data)
if repository.data.installed:
return
repository.logger.debug("%s Should be installed but is not... Fixing that!", repository)
repository.data.installed = True
class HacsData:
"""HacsData class."""
def __init__(self):
"""Initialize."""
self.logger = getLogger()
self.hacs = get_hacs()
self.content = {}
async def async_write(self):
"""Write content to the store files."""
if self.hacs.status.background_task or self.hacs.system.disabled:
return
self.logger.debug("Saving data")
# Hacs
await async_save_to_store(
self.hacs.hass,
"hacs",
{
"view": self.hacs.configuration.frontend_mode,
"compact": self.hacs.configuration.frontend_compact,
"onboarding_done": self.hacs.configuration.onboarding_done,
"archived_repositories": self.hacs.common.archived_repositories,
"renamed_repositories": self.hacs.common.renamed_repositories,
},
)
await self._async_store_content_and_repos()
for event in ("hacs/repository", "hacs/config"):
self.hacs.hass.bus.async_fire(event, {})
async def _async_store_content_and_repos(self): # bb: ignore
"""Store the main repos file and each repo that is out of date."""
# Repositories
self.content = {}
# Not run concurrently since this is bound by disk I/O
for repository in self.hacs.repositories.list_all:
await self.async_store_repository_data(repository)
await async_save_to_store(self.hacs.hass, "repositories", self.content)
async def async_store_repository_data(self, repository):
repository_manifest = repository.repository_manifest.manifest
data = {
"authors": repository.data.authors,
"category": repository.data.category,
"description": repository.data.description,
"domain": repository.data.domain,
"downloads": repository.data.downloads,
"etag_repository": repository.data.etag_repository,
"full_name": repository.data.full_name,
"first_install": repository.status.first_install,
"installed_commit": repository.data.installed_commit,
"installed": repository.data.installed,
"last_commit": repository.data.last_commit,
"last_release_tag": repository.data.last_version,
"last_updated": repository.data.last_updated,
"name": repository.data.name,
"new": repository.data.new,
"repository_manifest": repository_manifest,
"selected_tag": repository.data.selected_tag,
"show_beta": repository.data.show_beta,
"stars": repository.data.stargazers_count,
"topics": repository.data.topics,
"version_installed": repository.data.installed_version,
}
self.content[str(repository.data.id)] = data
if (
repository.data.installed
and (repository.data.installed_commit or repository.data.installed_version)
and (export := repository.data.export_data())
):
# export_data will return `None` if the memorized
# data is already up to date which allows us to avoid
# writing data that is already up to date or generating
# executor jobs to check the data on disk to see
# if a write is needed.
await async_save_to_store_default_encoder(
self.hacs.hass,
f"hacs/{repository.data.id}.hacs",
export,
)
repository.data.memorize_storage(export)
async def restore(self):
"""Restore saved data."""
hacs = await async_load_from_store(self.hacs.hass, "hacs")
repositories = await async_load_from_store(self.hacs.hass, "repositories") or {}
if not hacs and not repositories:
# Assume new install
self.hacs.status.new = True
return True
self.logger.info("Restore started")
self.hacs.status.new = False
# Hacs
self.hacs.configuration.frontend_mode = hacs.get("view", "Grid")
self.hacs.configuration.frontend_compact = hacs.get("compact", False)
self.hacs.configuration.onboarding_done = hacs.get("onboarding_done", False)
self.hacs.common.archived_repositories = hacs.get("archived_repositories", [])
self.hacs.common.renamed_repositories = {}
# Clear out doubble renamed values
renamed = hacs.get("renamed_repositories", {})
for entry in renamed:
value = renamed.get(entry)
if value not in renamed:
self.hacs.common.renamed_repositories[entry] = value
hass = self.hacs.hass
stores = {}
try:
await self.register_unknown_repositories(repositories)
for entry, repo_data in repositories.items():
if entry == "0":
# Ignore repositories with ID 0
self.logger.debug("Found repository with ID %s - %s", entry, repo_data)
continue
if self.async_restore_repository(entry, repo_data):
stores[entry] = get_store_for_key(hass, f"hacs/{entry}.hacs")
def _load_from_storage():
for entry, store in stores.items():
if os.path.exists(store.path) and (data := store.load()):
if (full_name := data.get("full_name")) and (
renamed := self.hacs.common.renamed_repositories.get(full_name)
) is not None:
data["full_name"] = renamed
update_repository_from_storage(
self.hacs.repositories.get_by_id(entry), data
)
await hass.async_add_executor_job(_load_from_storage)
self.logger.info("Restore done")
except (Exception, BaseException) as exception: # pylint: disable=broad-except
self.logger.critical(f"[{exception}] Restore Failed!", exc_info=exception)
return False
return True
async def register_unknown_repositories(self, repositories):
"""Registry any unknown repositories."""
register_tasks = [
register_repository(
full_name=repo_data["full_name"],
category=repo_data["category"],
check=False,
repo_id=entry,
)
for entry, repo_data in repositories.items()
if entry != "0" and not self.hacs.repositories.is_registered(repository_id=entry)
]
if register_tasks:
await asyncio.gather(*register_tasks)
@callback
def async_restore_repository(self, entry, repository_data):
full_name = repository_data["full_name"]
if not (repository := self.hacs.repositories.get_by_full_name(full_name)):
self.logger.error(f"Did not find {full_name} ({entry})")
return False
# Restore repository attributes
self.hacs.async_set_repository_id(repository, entry)
repository.data.authors = repository_data.get("authors", [])
repository.data.description = repository_data.get("description")
repository.releases.last_release_object_downloads = repository_data.get("downloads")
repository.data.last_updated = repository_data.get("last_updated")
repository.data.etag_repository = repository_data.get("etag_repository")
repository.data.topics = repository_data.get("topics", [])
repository.data.domain = repository_data.get("domain", None)
repository.data.stargazers_count = repository_data.get("stars", 0)
repository.releases.last_release = repository_data.get("last_release_tag")
repository.data.hide = repository_data.get("hide", False)
repository.data.installed = repository_data.get("installed", False)
repository.data.new = repository_data.get("new", True)
repository.data.selected_tag = repository_data.get("selected_tag")
repository.data.show_beta = repository_data.get("show_beta", False)
repository.data.last_version = repository_data.get("last_release_tag")
repository.data.last_commit = repository_data.get("last_commit")
repository.data.installed_version = repository_data.get("version_installed")
repository.data.installed_commit = repository_data.get("installed_commit")
repository.repository_manifest = HacsManifest.from_dict(
repository_data.get("repository_manifest", {})
)
if repository.data.installed:
repository.status.first_install = False
if full_name == "hacs/integration":
repository.data.installed_version = self.hacs.version
repository.data.installed = True
return True
| 42.995652 | 93 | 0.639498 |
f75589efd7c0e17b9ac003f80a1178023e231a53 | 4,811 | py | Python | python-client/swagger_client/models/bucket.py | gabisurita/kinto-codegen-tutorial | b5921ec603df031f9ff25683b7a3fbed5af79094 | [
"MIT"
] | 2 | 2017-03-02T13:12:24.000Z | 2017-10-22T10:31:09.000Z | python-client/swagger_client/models/bucket.py | gabisurita/kinto-codegen-tutorial | b5921ec603df031f9ff25683b7a3fbed5af79094 | [
"MIT"
] | null | null | null | python-client/swagger_client/models/bucket.py | gabisurita/kinto-codegen-tutorial | b5921ec603df031f9ff25683b7a3fbed5af79094 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
kinto
Kinto is a minimalist JSON storage service with synchronisation and sharing abilities. It is meant to be easy to use and easy to self-host. **Limitations of this OpenAPI specification:** 1. Validation on OR clauses is not supported (e.g. provide `data` or `permissions` in patch operations). 2. [Filtering](http://kinto.readthedocs.io/en/stable/api/1.x/filtering.html) is supported on any field by using `?{prefix}{field_name}={value}`. 3. [Backoff headers](http://kinto.readthedocs.io/en/stable/api/1.x/backoff.html) may occur with any response, but they are only present if the server is under in heavy load, so we cannot validate them on every request. They are listed only on the default error message. 4. [Collection schemas](http://kinto.readthedocs.io/en/stable/api/1.x/collections.html#collection-json-schema) can be provided when defining a collection, but they are not validated by this specification.
OpenAPI spec version: 1.13
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class Bucket(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, data=None, permissions=None):
"""
Bucket - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'data': 'object',
'permissions': 'BucketPermissions'
}
self.attribute_map = {
'data': 'data',
'permissions': 'permissions'
}
self._data = data
self._permissions = permissions
@property
def data(self):
"""
Gets the data of this Bucket.
:return: The data of this Bucket.
:rtype: object
"""
return self._data
@data.setter
def data(self, data):
"""
Sets the data of this Bucket.
:param data: The data of this Bucket.
:type: object
"""
self._data = data
@property
def permissions(self):
"""
Gets the permissions of this Bucket.
:return: The permissions of this Bucket.
:rtype: BucketPermissions
"""
return self._permissions
@permissions.setter
def permissions(self, permissions):
"""
Sets the permissions of this Bucket.
:param permissions: The permissions of this Bucket.
:type: BucketPermissions
"""
self._permissions = permissions
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 31.444444 | 938 | 0.594055 |
f7558ac5ccfb283d83840252a788db94af76e9e6 | 605 | py | Python | c2cwsgiutils/sql_profiler/__init__.py | jhutchings1/c2cwsgiutils | 733f3eace5393539a170455038a27d42682bf4f5 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | c2cwsgiutils/sql_profiler/__init__.py | jhutchings1/c2cwsgiutils | 733f3eace5393539a170455038a27d42682bf4f5 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | c2cwsgiutils/sql_profiler/__init__.py | jhutchings1/c2cwsgiutils | 733f3eace5393539a170455038a27d42682bf4f5 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | """
A view (URL=/sql_provider) allowing to enabled/disable a SQL spy that runs an "EXPLAIN ANALYZE" on
every SELECT query going through SQLAlchemy.
"""
import logging
import pyramid.request
from c2cwsgiutils import auth
ENV_KEY = "C2C_SQL_PROFILER_ENABLED"
CONFIG_KEY = "c2c.sql_profiler_enabled"
LOG = logging.getLogger(__name__)
repository = None
def init(config: pyramid.config.Configurator) -> None:
"""
Install a pyramid event handler that adds the request information
"""
if auth.is_enabled(config, ENV_KEY, CONFIG_KEY):
from . import _impl
_impl.init(config)
| 24.2 | 98 | 0.73719 |