hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8fe41e532494a01d940f2785a24f1f0333f1aa5e | 2,110 | py | Python | docs/source/conf.py | JeremieHornus/ufoLib2 | 084dac404c2e84d0945e26ebb93eb699b260e743 | [
"Apache-2.0"
] | null | null | null | docs/source/conf.py | JeremieHornus/ufoLib2 | 084dac404c2e84d0945e26ebb93eb699b260e743 | [
"Apache-2.0"
] | null | null | null | docs/source/conf.py | JeremieHornus/ufoLib2 | 084dac404c2e84d0945e26ebb93eb699b260e743 | [
"Apache-2.0"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("../../src"))
# -- Project information -----------------------------------------------------
project = "ufoLib2"
copyright = "2020, The FontTools Authors"
author = "The FontTools Authors"
# -- General configuration ---------------------------------------------------
master_doc = "index"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
"sphinx_rtd_theme",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
# exclude_patterns = []
intersphinx_mapping = {
"fontTools": ("https://fonttools.readthedocs.io/en/latest/", None),
}
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
| 32.461538 | 79 | 0.663033 |
97a34141ddb6da281c5baca69b4da056e8c6bb95 | 433 | py | Python | python/project_kine/temp.py | OthmanEmpire/misc_code | 4d487db518167b62969ad65abaffbc9e01777d91 | [
"MIT"
] | null | null | null | python/project_kine/temp.py | OthmanEmpire/misc_code | 4d487db518167b62969ad65abaffbc9e01777d91 | [
"MIT"
] | null | null | null | python/project_kine/temp.py | OthmanEmpire/misc_code | 4d487db518167b62969ad65abaffbc9e01777d91 | [
"MIT"
] | null | null | null | __author__ = 'Ozkh'
def get_text(name):
return "lorem ipsum, {0} dolor sit amet".format(name)
def p_decorate(func):
def func_wrapper(name):
return "<p>{0}</p>".format(func(name))
return func_wrapper
my_get_text = p_decorate(get_text)
print(my_get_text("John"))
# <p>Outputs lorem ipsum, John dolor sit amet</p>
print(id(get_text))
get_text = p_decorate(get_text)
print(id(get_text))
print(get_text("John"))
| 17.32 | 56 | 0.69746 |
6967414250bd3a4e9e9317ecf1cdb87ee0801abf | 2,207 | py | Python | mortgage_calculator.py | IanMadlenya/investable | 2e0101158c7331119f62a3e07cff8349ea52b0e0 | [
"MIT"
] | 18 | 2017-02-08T20:29:08.000Z | 2021-08-07T20:14:35.000Z | mortgage_calculator.py | IanMadlenya/investable | 2e0101158c7331119f62a3e07cff8349ea52b0e0 | [
"MIT"
] | 1 | 2021-08-11T19:12:18.000Z | 2021-08-11T19:12:18.000Z | mortgage_calculator.py | jttyeung/investable | 2e0101158c7331119f62a3e07cff8349ea52b0e0 | [
"MIT"
] | 10 | 2017-02-22T02:55:47.000Z | 2022-03-14T06:58:42.000Z | import re
def calculate_mortgage(mortgage_details):
"""
Calculates mortgage monthly payment rate.
Tests:
>>> calculate_mortgage({'price': '$1,000,000', 'rate': '5.25', 'downpayment': '200000', 'loan': '30'})
('$4,418', '$1,590,347')
>>> calculate_mortgage({'price': '$650,000', 'rate': '3.83', 'downpayment': '169000', 'loan': '20'})
('$2,872', '$689,246')
>>> calculate_mortgage({'price': '$240,000', 'rate': '1.12', 'downpayment': '240000', 'loan': '15'})
('$0', '$0')
"""
MONTHS_IN_YEAR = 12
PERCENT_CONVERSION = 100
# Get price, mortgage rate, downpayment amount
price = int(mortgage_details['price'])
rate = ((float(mortgage_details['rate'])/PERCENT_CONVERSION)/MONTHS_IN_YEAR)
downpayment = int(re.sub('[^\d.]+', '', mortgage_details['downpayment']))
hoa = mortgage_details.get('hoa')
# If HOA exists, turn it into an integer, otherwise it is zero
try:
hoa = int(mortgage_details['hoa'])
except ValueError:
hoa = 0
# Translate loan term in years to months
loan = mortgage_details['loan']
# Total loan payments
loan_payments = int(loan[0:2]) * MONTHS_IN_YEAR
# Calculate monthly payment
if rate == 0:
monthly_payment = float(price)/loan_payments
else:
monthly_payment = (price - downpayment) * (rate * (1 + rate) ** loan_payments) / ((1 + rate) ** loan_payments - 1)
# Calculate total monthly payment with HOA fees if one exists
monthly_plus_hoa_payment = monthly_payment + hoa
formatted_monthly_plus_hoa_payment = '${:,}'.format(int(round(monthly_plus_hoa_payment)))
# Calculate total interest paid in span of loan
total_interest_paid = monthly_payment * loan_payments - price
formatted_total_interest_paid = '${:,}'.format(int(round(monthly_payment * loan_payments - price)))
# Calculate total HOA fees paid in span of loan
total_hoa_paid = hoa * loan_payments
# Calculate the total mortgage paid with interest
total_mortgage_payment = '${:,}'.format(int(round(price + total_interest_paid + total_hoa_paid)))
return (formatted_monthly_plus_hoa_payment, total_mortgage_payment)
| 36.783333 | 122 | 0.654735 |
c2b71509c1e46f082585b6cea0a0528f3c2f5b9b | 30 | py | Python | web3_multicall/_utils/__init__.py | BrunoMazorra/web3_multicall_blocknumber | 2f12f6b6bb9853b10db90b968f5b0b75a9b1a7b4 | [
"MIT"
] | 1 | 2021-12-15T04:07:25.000Z | 2021-12-15T04:07:25.000Z | web3_multicall/_utils/__init__.py | BrunoMazorra/web3_multicall_blocknumber | 2f12f6b6bb9853b10db90b968f5b0b75a9b1a7b4 | [
"MIT"
] | null | null | null | web3_multicall/_utils/__init__.py | BrunoMazorra/web3_multicall_blocknumber | 2f12f6b6bb9853b10db90b968f5b0b75a9b1a7b4 | [
"MIT"
] | 3 | 2021-12-15T04:07:45.000Z | 2022-03-04T03:35:28.000Z | from .function import Function | 30 | 30 | 0.866667 |
5cbd69f7d158fd9b9882b43ff96a7fa08ec90c95 | 148 | py | Python | py_placeroute.py | kiba09/unicostfortraveling | ce49ea3fca82d2013f47a2735e1c317526b67195 | [
"Apache-2.0"
] | null | null | null | py_placeroute.py | kiba09/unicostfortraveling | ce49ea3fca82d2013f47a2735e1c317526b67195 | [
"Apache-2.0"
] | null | null | null | py_placeroute.py | kiba09/unicostfortraveling | ce49ea3fca82d2013f47a2735e1c317526b67195 | [
"Apache-2.0"
] | null | null | null | import googlemaps as GoogleMaps
from pygeocoder import Geocoder
results = Geocoder.geocode("Tian'anmen,Beijing")
print(results[0].coordinates)
| 14.8 | 48 | 0.797297 |
9594d65ded58f5cb4edc3f262f7ba340cb6c2d6a | 8,793 | py | Python | BND-DDQN/GazeboWorld.py | KerryWu16/BND-DDQN | 30bc2bf7a29415c453746fe472ac2d558c481197 | [
"MIT"
] | 6 | 2019-07-18T14:22:23.000Z | 2022-03-06T09:42:18.000Z | BND-DDQN/GazeboWorld.py | KerryWu16/BND-DDQN | 30bc2bf7a29415c453746fe472ac2d558c481197 | [
"MIT"
] | 1 | 2020-01-18T07:47:50.000Z | 2020-02-11T02:33:51.000Z | BND-DDQN/GazeboWorld.py | KerryWu16/BND-DDQN | 30bc2bf7a29415c453746fe472ac2d558c481197 | [
"MIT"
] | 2 | 2019-07-18T14:22:33.000Z | 2022-01-18T07:41:22.000Z | import rospy
import numpy as np
import cv2
import copy
import tf
from geometry_msgs.msg import Twist
from gazebo_msgs.msg import ModelStates
from gazebo_msgs.msg import ModelState
from gazebo_msgs.msg import ContactsState
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from nav_msgs.msg import Odometry
from preprocessor import HistoryPreprocessor
class GazeboWorld():
def __init__(self, ns='',
start_location=(0,0),
max_episode=500,
window_size=4,
input_shape=(80,100)):
rospy.init_node('GazeboWorld', anonymous=False)
#-----------Parameters-----------------------
self.set_self_state = ModelState()
self.set_self_state.model_name = ns + 'mobile_base'
self.set_self_state.pose.position.x = start_location[0]
self.set_self_state.pose.position.y = start_location[1]
self.set_self_state.pose.position.z = 0.
self.set_self_state.pose.orientation.x = 0.0
self.set_self_state.pose.orientation.y = 0.0
self.set_self_state.pose.orientation.z = 0.0
self.set_self_state.pose.orientation.w = 1.0
self.set_self_state.twist.linear.x = 0.
self.set_self_state.twist.linear.y = 0.
self.set_self_state.twist.linear.z = 0.
self.set_self_state.twist.angular.x = 0.
self.set_self_state.twist.angular.y = 0.
self.set_self_state.twist.angular.z = 0.
self.set_self_state.reference_frame = 'world'
self.input_shape = input_shape
self.bridge = CvBridge()
self.object_state = [0, 0, 0, 0]
self.object_name = []
self.action1_table = [0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
self.action2_table = [np.pi*45/180, np.pi*30/180, np.pi*15/180, 0., -np.pi*15/180, -np.pi*30/180, -np.pi*45/180]
self.self_speed = [0.7, 0.0]
self.default_states = None
self.start_table = [(0, 0)]
self.depth_image = None
self.bump = False
self.time = 0
self.max_episode = max_episode
self.preprocessor = HistoryPreprocessor(self.input_shape, history_length=window_size)
self.window_size = window_size
self.state = {
'old_state': np.zeros(shape=(input_shape[0], input_shape[1], window_size)),
'action1': 0,
'action2': 0,
'reward': 0,
'new_state': np.zeros(shape=(input_shape[0], input_shape[1], window_size)),
'is_terminal': False
}
#-----------Publisher and Subscriber-------------
self.cmd_vel = rospy.Publisher(ns + 'cmd_vel', Twist, queue_size = 1)
self.set_state = rospy.Publisher('gazebo/set_model_state', ModelState, queue_size = 1)
self.resized_depth_img = rospy.Publisher(ns + '/camera/depth/image_resized',Image, queue_size = 1)
self.object_state_sub = rospy.Subscriber('gazebo/model_states', ModelStates, self.ModelStateCallBack)
self.depth_image_sub = rospy.Subscriber(ns + '/camera/depth/image_raw', Image, self.DepthImageCallBack)
self.odom_sub = rospy.Subscriber(ns + '/odom', Odometry, self.OdometryCallBack)
self.bumper_sub = rospy.Subscriber('bumper', ContactsState, self.BumperCallBack, queue_size = 1)
rospy.sleep(2.)
rospy.on_shutdown(self.shutdown)
def ModelStateCallBack(self, data):
# self state
idx = data.name.index(self.set_self_state.model_name)
quaternion = (data.pose[idx].orientation.x,
data.pose[idx].orientation.y,
data.pose[idx].orientation.z,
data.pose[idx].orientation.w)
euler = tf.transformations.euler_from_quaternion(quaternion)
yaw = euler[2]
self.self_state = [data.pose[idx].position.x,
data.pose[idx].position.y,
yaw,
data.twist[idx].linear.x,
data.twist[idx].linear.y,
data.twist[idx].angular.z]
for lp in range(len(self.object_name)):
idx = data.name.index(self.object_name[lp])
quaternion = (data.pose[idx].orientation.x,
data.pose[idx].orientation.y,
data.pose[idx].orientation.z,
data.pose[idx].orientation.w)
euler = tf.transformations.euler_from_quaternion(quaternion)
yaw = euler[2]
self.object_state[lp] = [data.pose[idx].position.x,
data.pose[idx].position.y,
yaw]
if self.default_states is None:
self.default_states = copy.deepcopy(data)
def DepthImageCallBack(self, img):
self.depth_image = img
def OdometryCallBack(self, odometry):
self.self_linear_x_speed = odometry.twist.twist.linear.x
self.self_linear_y_speed = odometry.twist.twist.linear.y
self.self_rotation_z_speed = odometry.twist.twist.angular.z
def BumperCallBack(self, bumper_data):
bump = False
for state in bumper_data.states:
if 'ground_plane' not in state.collision2_name:
bump = True
break
self.bump = bump
def GetDepthImageObservation(self):
# ros image to cv2 image
try:
cv_img = self.bridge.imgmsg_to_cv2(self.depth_image) #32FC1
except Exception as e:
raise e
cv_img = np.array(cv_img, dtype=np.float32)
# resize
dim = (self.input_shape[1], self.input_shape[0])
cv_img = cv2.resize(cv_img, dim, interpolation = cv2.INTER_NEAREST) #INTER_AREA
cv_img[np.isnan(cv_img)] = 0.
# normalize
return(cv_img/5.)
def PublishDepthPrediction(self, depth_img):
# cv2 image to ros image and publish
cv_img = np.array(depth_img, dtype=np.float32)
try:
resized_img = self.bridge.cv2_to_imgmsg(cv_img, "passthrough")
except Exception as e:
raise e
self.resized_depth_img.publish(resized_img)
def GetSelfState(self):
return self.self_state
def GetSelfLinearXSpeed(self):
return self.self_linear_x_speed
def GetSelfOdomeSpeed(self):
v = np.sqrt(self.self_linear_x_speed**2 + self.self_linear_y_speed**2)
return [v, self.self_rotation_z_speed]
def GetSelfSpeed(self):
return np.array(self.self_speed)
def GetBump(self):
return self.bump
def SetRobotPose(self):
quaternion = tf.transformations.quaternion_from_euler(0., 0., np.random.uniform(-np.pi, np.pi))
start_location = self.start_table[np.random.randint(0, len(self.start_table))]
object_state = copy.deepcopy(self.set_self_state)
object_state.pose.orientation.x = quaternion[0]
object_state.pose.orientation.y = quaternion[1]
object_state.pose.orientation.z = quaternion[2]
object_state.pose.orientation.w = quaternion[3]
object_state.pose.position.x = start_location[0]
object_state.pose.position.y = start_location[1]
self.set_state.publish(object_state)
rospy.sleep(0.1)
def SetObjectPose(self):
object_state = ModelState()
state = copy.deepcopy(self.default_states)
for i in range(len(self.default_states.name)):
if 'mobile_base' not in state.name[i]:
object_state.model_name = state.name[i]
object_state.pose = state.pose[i]
object_state.twist = state.twist[i]
object_state.reference_frame = 'world'
self.set_state.publish(object_state)
rospy.sleep(0.1)
def ResetWorld(self):
self.SetRobotPose() # reset robot
self.SetObjectPose() # reset environment
rospy.sleep(0.1)
def Control(self, action1, action2):
self.self_speed[0] = self.action1_table[int(action1)]
self.self_speed[1] = self.action2_table[int(action2)]
move_cmd = Twist()
move_cmd.linear.x = self.self_speed[0]
move_cmd.linear.y = 0.
move_cmd.linear.z = 0.
move_cmd.angular.x = 0.
move_cmd.angular.y = 0.
move_cmd.angular.z = self.self_speed[1]
self.cmd_vel.publish(move_cmd)
def shutdown(self):
rospy.loginfo("Stop Moving")
self.cmd_vel.publish(Twist())
rospy.sleep(1)
def GetRewardAndTerminate(self):
terminate = False
reset = False
[v, theta] = self.GetSelfOdomeSpeed()
reward = 2*v * v * np.cos( 2* v* theta) -0.1
if self.GetBump():
reward = -10.
terminate = True
reset = True
if self.time > self.max_episode:
reset = True
return reward, terminate, reset
def GetState(self):
return np.copy(self.state['old_state']), self.state['action1'], self.state['action2'], self.state['reward'], \
np.copy(self.state['new_state']), self.state['is_terminal']
def TakeAction(self, action1, action2):
old_state = self.preprocessor.get_state()
self.time += 1
self.Control(action1, action2)
rospy.sleep(0.1)
state = self.GetDepthImageObservation()
reward, is_terminal, reset = self.GetRewardAndTerminate()
self.preprocessor.process_state_for_memory(state)
new_state = self.preprocessor.get_state()
self.state['old_state'] = old_state
self.state['action1'] = action1
self.state['action2'] = action2
self.state['reward'] = reward
self.state['new_state'] = new_state
self.state['is_terminal'] = is_terminal
if reset:
self.Reset()
def Reset(self):
move_cmd = Twist()
move_cmd.linear.x = 0.
move_cmd.linear.y = 0.
move_cmd.linear.z = 0.
move_cmd.angular.x = 0.
move_cmd.angular.y = 0.
move_cmd.angular.z = 0.
self.cmd_vel.publish(move_cmd)
self.ResetWorld()
self.preprocessor.reset()
self.time = 0
state = self.GetDepthImageObservation()
for _ in range(self.window_size):
self.preprocessor.process_state_for_memory(state)
| 32.327206 | 114 | 0.717844 |
47c2995d0a65ef5dbf9948db78ab9709b678b10a | 4,246 | py | Python | main.py | Saeko22/Discord-Bot | bc2ddfbde978441383af09a6e5b06d3ef649b477 | [
"Unlicense"
] | null | null | null | main.py | Saeko22/Discord-Bot | bc2ddfbde978441383af09a6e5b06d3ef649b477 | [
"Unlicense"
] | null | null | null | main.py | Saeko22/Discord-Bot | bc2ddfbde978441383af09a6e5b06d3ef649b477 | [
"Unlicense"
] | null | null | null | import discord
from discord.ext import commands
import pytz
from datetime import datetime
bot = commands.Bot(intents=discord.Intents.all(), command_prefix='&')
bot.remove_command('help')
@bot.event
async def on_ready():
await bot.change_presence(status=discord.Status.online, activity=discord.Activity(
type=discord.ActivityType.playing, name="&help"
))
print(f'BotId: {bot.user.id} - Name: {bot.user.name}')
@bot.command()
async def ping(ctx):
await ctx.send(f":ping_pong: Pong! `{round(bot.latency*1000)}ms`")
@bot.command()
@commands.has_permissions(ban_members=True)
async def ban(ctx, member: discord.Member, *, reason=None):
await member.ban(reason=reason)
await ctx.send(f"{member} was banned!")
@bot.command()
@commands.has_permissions(kick_members=True)
async def kick(ctx, member: discord.Member, *, reason=None):
await member.kick(reason=reason)
await ctx.send(f"{member} was kicked!")
@bot.command(name='test')
async def test(ctx, *args):
await ctx.send(f'Eingabe: {" ".join(args)}')
@commands.has_permissions(manage_messages=True) # Premission einstellen! Wer kann diesen Cmd benutzen?
@bot.command()
async def clear(ctx, count=1): # !clear löscht nur eine Nachricht!
messages = await ctx.channel.purge(limit=count+1)
await ctx.send(f'Es wurden {len(messages)-1} Nachrichten gelöscht.', delete_after=4)
@bot.command()
async def help(ctx):
embed = discord.Embed(
title='Bot Commands',
description='Welcome to the help section.Here are all the commands fot this game!',
color=discord.Colour.purple()
)
embed.set_thumbnail(url='https://discord.gg/47yAdzbwcp')
embed.add_field(
name='&help',
value='list of all commands',
inline=False
)
embed.set_thumbnail(url='https://avatars.githubusercontent.com/u/86261346?v=4')
embed.add_field(
name='&clear',
value='clear messages',
inline=False
)
embed.set_thumbnail(url='https://avatars.githubusercontent.com/u/86261346?v=4')
embed.add_field(
name='&userinfo',
value='is a userinfo from the user',
inline=False
)
embed.set_thumbnail(url='https://avatars.githubusercontent.com/u/86261346?v=4')
embed.add_field(
name='&Test',
value='is a test',
inline=False
)
embed.set_thumbnail(url='https://avatars.githubusercontent.com/u/86261346?v=4')
embed.add_field(
name='&ping',
value='The bot make a pong back',
inline=False
)
embed.set_thumbnail(url='https://avatars.githubusercontent.com/u/86261346?v=4')
embed.add_field(
name='&Kick',
value='The bot kicks user',
inline=False
)
embed.set_thumbnail(url='https://avatars.githubusercontent.com/u/86261346?v=4')
embed.add_field(
name='&Ban',
value='The bot ban user',
inline=False
)
embed.set_footer(text=f'Angefordert von {ctx.author.name} • {ctx.author.id}')
await ctx.send(embed=embed)
@bot.command(name='userinfo')
async def userinfo(ctx, member: discord.Member):
de = pytz.timezone('Europe/Berlin')
embed = discord.Embed(title=f'> Userinfo für {member.display_name}',
description='', color=0x4cd137, timestamp=datetime.now().astimezone(tz=de))
embed.add_field(name='Name', value=f'```{member.name}#{member.discriminator}```', inline=True)
embed.add_field(name='Bot', value=f'```{("Ja" if member.bot else "Nein")}```', inline=True)
embed.add_field(name='Nickname', value=f'```{(member.nick if member.nick else "Nicht gesetzt")}```', inline=True)
embed.add_field(name='Server beigetreten', value=f'```{member.joined_at}```', inline=True)
embed.add_field(name='Discord beigetreten', value=f'```{member.created_at}```', inline=True)
embed.add_field(name='Rollen', value=f'```{len(member.roles)}```', inline=True)
embed.add_field(name='Höchste Rolle', value=f'```{member.top_role.name}```', inline=True)
embed.add_field(name='Farbe', value=f'```{member.color}```', inline=True)
embed.add_field(name='Booster', value=f'```{("Ja" if member.premium_since else "Nein")}```', inline=True)
embed.set_footer(text=f'Angefordert von {ctx.author.name} • {ctx.author.id}', icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
bot.run('BOTTOKEN')
| 31.686567 | 117 | 0.689119 |
b18b781c389ea140233f507cf22012762ca15c5f | 437 | py | Python | PyWorkSpace/helloworld/venv/Scripts/easy_install-script.py | FTTL/GitWorkSpace | 86c38f792ad8743179716cf9ef86e02f15143ab0 | [
"MIT"
] | null | null | null | PyWorkSpace/helloworld/venv/Scripts/easy_install-script.py | FTTL/GitWorkSpace | 86c38f792ad8743179716cf9ef86e02f15143ab0 | [
"MIT"
] | 1 | 2021-01-05T07:53:12.000Z | 2021-01-05T07:53:12.000Z | PyWorkSpace/helloworld/venv/Scripts/easy_install-script.py | FTTL/GitWorkSpace | 86c38f792ad8743179716cf9ef86e02f15143ab0 | [
"MIT"
] | null | null | null | #!E:\PyWorkSpace\helloworld\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
| 33.615385 | 83 | 0.693364 |
d9bce8f172fe787bb92d9d0b4ef9323ef0e0e4ef | 36 | py | Python | pylearn2/sandbox/rnn/utils/__init__.py | BouchardLab/pylearn2 | 4cab785b870d22cd9e85a5f536d4cac234b6bf60 | [
"BSD-3-Clause"
] | 2,045 | 2015-01-01T14:07:52.000Z | 2022-03-08T08:56:41.000Z | pylearn2/sandbox/rnn/utils/__init__.py | BouchardLab/pylearn2 | 4cab785b870d22cd9e85a5f536d4cac234b6bf60 | [
"BSD-3-Clause"
] | 305 | 2015-01-02T13:18:24.000Z | 2021-08-20T18:03:28.000Z | pylearn2/sandbox/rnn/utils/__init__.py | BouchardLab/pylearn2 | 4cab785b870d22cd9e85a5f536d4cac234b6bf60 | [
"BSD-3-Clause"
] | 976 | 2015-01-01T17:08:51.000Z | 2022-03-25T19:53:17.000Z | """
Utilities for RNN framework
"""
| 9 | 27 | 0.666667 |
5a14b48d67578053cb00ebd113feb48f1d93caed | 520 | py | Python | mtdnn/common/linear_pooler.py | microsoft/mt-dnn | e5c3e07f3a8e55067433714ce261a6d28ba73d22 | [
"MIT"
] | 113 | 2020-05-08T08:02:51.000Z | 2022-03-27T06:43:56.000Z | mtdnn/common/linear_pooler.py | microsoft/mt-dnn | e5c3e07f3a8e55067433714ce261a6d28ba73d22 | [
"MIT"
] | 4 | 2020-06-03T12:00:10.000Z | 2021-03-15T07:36:44.000Z | mtdnn/common/linear_pooler.py | microsoft/mt-dnn | e5c3e07f3a8e55067433714ce261a6d28ba73d22 | [
"MIT"
] | 24 | 2020-05-11T13:13:22.000Z | 2022-03-25T05:49:51.000Z | # coding=utf-8
# Copyright (c) Microsoft. All rights reserved.
from torch import nn
class LinearPooler(nn.Module):
def __init__(self, hidden_size):
super(LinearPooler, self).__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
| 28.888889 | 56 | 0.692308 |
ce3dabbb0207d9d87e2f4219c43d583ea2c294a6 | 2,421 | py | Python | src/csnVisualStudio2008.py | xplanes/CSnake | 3cd7a2f5c23787262c42fe3c2763201afa1bdef4 | [
"BSD-4-Clause"
] | 4 | 2016-02-16T06:45:24.000Z | 2021-08-09T14:59:12.000Z | src/csnVisualStudio2008.py | xplanes/CSnake | 3cd7a2f5c23787262c42fe3c2763201afa1bdef4 | [
"BSD-4-Clause"
] | 5 | 2015-08-04T14:42:35.000Z | 2016-03-18T09:08:01.000Z | src/csnVisualStudio2008.py | xplanes/CSnake | 3cd7a2f5c23787262c42fe3c2763201afa1bdef4 | [
"BSD-4-Clause"
] | 5 | 2015-10-15T10:12:52.000Z | 2021-11-08T15:20:46.000Z | ## @package csnVisualStudio2008
# Definition of the csnVisualStudio2008 compilers.
# \ingroup compiler
import csnCompiler
import os
class Compiler(csnCompiler.Compiler):
""" Abstract Visual Studio 2008 compiler. """
def __init__(self):
csnCompiler.Compiler.__init__(self)
self.postProcessor = PostProcessor()
def GetCompileFlags(self):
return [""]
def IsForPlatform(self, _WIN32, _NOT_WIN32):
return _WIN32 or (not _WIN32 and not _NOT_WIN32)
def GetOutputSubFolder(self, _configuration = "${CMAKE_CFG_INTDIR}"):
"""
Returns the folder where the compiler should place binaries for _configuration.
The default value for _configuration returns the output folder for the current configuration.
for storing binaries.
"""
if _configuration == "DebugAndRelease":
return "bin"
else:
return "bin/%s" % (_configuration)
def GetBuildSubFolder(self, _projectType, _projectName):
return "%s/%s" % (_projectType, _projectName)
def GetThirdPartySubFolder(self):
return ""
def GetThirdPartyCMakeParameters(self):
return []
def GetProjectCMakeParameters(self):
return []
def GetAllowedConfigurations(self):
return ["DebugAndRelease"]
def GetPostProcessor(self):
return self.postProcessor
def TargetIsMac(self):
return False
def TargetIsLinux(self):
return False
class Compiler32(Compiler):
""" Visual Studio 2008 32bits compiler. """
def GetName(self):
return "Visual Studio 9 2008"
def TargetIs32Bits(self):
return True
def TargetIs64Bits(self):
return False
class Compiler64(Compiler):
""" Visual Studio 2008 64bits compiler. """
def GetName(self):
return "Visual Studio 9 2008 Win64"
def TargetIs32Bits(self):
return False
def TargetIs64Bits(self):
return True
class PostProcessor:
def Do(self, _project):
"""
Post processes the vcproj file generated for _project.
"""
# vc proj to patch
if not _project.dependenciesManager.isTopLevel:
slnFilename = "%s/%s.sln" % (_project.GetBuildFolder(), _project.name)
if os.path.exists(slnFilename):
os.remove(slnFilename)
| 27.827586 | 101 | 0.633622 |
59c63f58a2da87bb0b0b0d15e7addf7e1eb18c75 | 6,877 | py | Python | shoptimizer_api/optimizers_builtin/condition_optimizer.py | alex-berish/shoptimizer | 3d8837352c0ae52dee2ac804750866a2b93809f1 | [
"Apache-2.0"
] | 27 | 2020-08-21T05:59:29.000Z | 2022-03-30T17:26:44.000Z | shoptimizer_api/optimizers_builtin/condition_optimizer.py | alex-berish/shoptimizer | 3d8837352c0ae52dee2ac804750866a2b93809f1 | [
"Apache-2.0"
] | null | null | null | shoptimizer_api/optimizers_builtin/condition_optimizer.py | alex-berish/shoptimizer | 3d8837352c0ae52dee2ac804750866a2b93809f1 | [
"Apache-2.0"
] | 20 | 2020-09-14T08:38:11.000Z | 2022-03-13T22:37:40.000Z | # coding=utf-8
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for Shoptimizer API that fixes invalid condition values.
Reference: https://support.google.com/merchants/answer/6324469
If the condition field is specified as "new", but other fields in the
product imply that the condition is otherwise, this optimizer will reset
the condition value to "used".
"""
import logging
from typing import Any, Dict, List, Optional, Set
from flask import current_app
from models import optimization_result_counts
from optimizers_abstract import base_optimizer
from util import gpc_id_to_string_converter
from util import optimization_util
_GPC_STRING_TO_ID_MAPPING_CONFIG_FILE_NAME: str = 'gpc_string_to_id_mapping_{}'
_NEW = 'new'
_USED = 'used'
class ConditionOptimizer(base_optimizer.BaseOptimizer):
"""An optimizer that fixes invalidly-set condition fields."""
_OPTIMIZER_PARAMETER = 'condition-optimizer'
_condition_config = None
_gpc_id_to_string_converter: Optional[
gpc_id_to_string_converter.GPCConverter] = None
def _optimize(
self, product_batch: Dict[str, Any], language: str, country: str,
currency: str) -> optimization_result_counts.OptimizationResultCounts:
"""Runs the optimization.
Fixes invalid condition values.
See above for the definition of an invalid condition value.
Args:
product_batch: A batch of product data.
language: The language to use for this optimizer.
country: The country to use for this optimizer.
currency: The currency to use for this optimizer.
Returns:
The number of products affected by this optimization.
"""
num_of_products_optimized = 0
num_of_products_excluded = 0
self._condition_config = current_app.config.get('CONFIGS', {}).get(
f'condition_optimizer_config_{language}', {})
self._gpc_id_to_string_converter = gpc_id_to_string_converter.GPCConverter(
_GPC_STRING_TO_ID_MAPPING_CONFIG_FILE_NAME.format(language))
for entry in product_batch['entries']:
if (optimization_util.optimization_exclusion_specified(
entry, self._OPTIMIZER_PARAMETER)):
num_of_products_excluded += 1
continue
product = entry['product']
google_product_category = product.get('googleProductCategory', '')
gpc_string = self._gpc_id_to_string_converter.convert_gpc_id_to_string(
google_product_category)
if self._is_google_product_category_excluded(gpc_string):
logging.info(
'Product ID: %s With Category %s was flagged for exclusion '
' of the condition check', product.get('offerId', ''), gpc_string)
continue
used_tokens = set(
token.lower() for token in self._condition_config['used_tokens'])
if product.get('condition', '') == _NEW:
# Category format must follow the official spec to be converted a list.
# Ref: https://support.google.com/merchants/answer/6324436?hl=en.
product_categories = gpc_string.split(' > ')
if isinstance(product_categories, list) and product_categories:
lowest_level_category = product_categories[-1]
category_specific_tokens = self._get_tokens_for_category(
lowest_level_category)
if category_specific_tokens:
category_specific_tokens = set(
token.lower() for token in category_specific_tokens)
used_tokens.update(category_specific_tokens)
# Search for used tokens in both title and description and reset the
# condition to used if any were detected.
product_title = product.get('title', '')
product_description = product.get('description', '')
if self._field_contains_used_tokens(
product_title, used_tokens) or self._field_contains_used_tokens(
product_description, used_tokens):
product['condition'] = _USED
logging.info('Modified item %s: Setting new product to used.',
product.get('offerId', ''))
num_of_products_optimized += 1
base_optimizer.set_optimization_tracking(product,
base_optimizer.SANITIZED)
return optimization_result_counts.OptimizationResultCounts(
num_of_products_optimized, num_of_products_excluded)
def _is_google_product_category_excluded(
self, google_product_category: str) -> bool:
"""Checks if the provided category was found in the exclusions config dict.
Args:
google_product_category: A string representing the product category.
Returns:
True if the given category was found in the condition config's list of
categories to exclude from being optimized for condition due to those
categories being at higher risk of containing false-positives.
"""
excluded_categories = self._condition_config.get(
'excluded_product_categories', [])
# Ensure that the exclude category from the config matches the product's
# category from the beginning of the string in order to support an entire
# category family being matched, as well as enforcing avoidance of unrelated
# matches if only a sub-category was specified.
return any(
google_product_category.startswith(category_to_exclude)
for category_to_exclude in excluded_categories)
def _field_contains_used_tokens(self, field_text: str,
used_tokens: Set[str]) -> bool:
"""Checks if the provided field contains any terms in the given set.
Args:
field_text: A string representing the value of a product field.
used_tokens: A set representing used condition indicators.
Returns:
True if any term was found in the target product field, otherwise False.
"""
return any(token in field_text.lower() for token in used_tokens)
def _get_tokens_for_category(self, product_category: str) -> List[str]:
"""Gets the values in a list of dictionaries if the provided category was found.
Args:
product_category: The product's lowest-level category.
Returns:
A list of the tokens of the matching category, or an empty list.
"""
category_mappings = self._condition_config['target_product_categories']
return category_mappings.get(product_category, [])
| 40.692308 | 84 | 0.717464 |
32201cb8ace9921ff839504714ede379b3dd9c20 | 1,130 | py | Python | bika/lims/browser/analysisprofile.py | hocinebendou/bika.gsoc | 85bc0c587de7f52073ae0e89bddbc77bf875f295 | [
"MIT"
] | null | null | null | bika/lims/browser/analysisprofile.py | hocinebendou/bika.gsoc | 85bc0c587de7f52073ae0e89bddbc77bf875f295 | [
"MIT"
] | null | null | null | bika/lims/browser/analysisprofile.py | hocinebendou/bika.gsoc | 85bc0c587de7f52073ae0e89bddbc77bf875f295 | [
"MIT"
] | null | null | null | from bika.lims.jsonapi import load_field_values
from bika.lims.interfaces import IJSONReadExtender, IAnalysisProfile
from zope.component import adapts
from zope.interface import implements
class JSONReadExtender(object):
"""- Place additional information about profile services
into the returned records.
Used in AR Add to prevent extra requests
"""
implements(IJSONReadExtender)
adapts(IAnalysisProfile)
def __init__(self, context):
self.context = context
def __call__(self, request, data):
service_data = []
for service in self.context.getService():
this_service = {'UID': service.UID(),
'Title': service.Title(),
'Keyword': service.getKeyword(),
'Price': service.getPrice(),
'VAT': service.getVAT(),
'PointOfCapture': service.getPointOfCapture(),
'CategoryTitle': service.getCategory().Title()}
service_data.append(this_service)
data['service_data'] = service_data
| 35.3125 | 75 | 0.607965 |
9058e8f2eaed6e030aaadb0d42703b92bcf39c83 | 10,473 | py | Python | elevenclock/lang/lang_el.py | wanderleihuttel/ElevenClock | de4272a650111233acf36c909c7e269c8dc810d2 | [
"Apache-2.0"
] | null | null | null | elevenclock/lang/lang_el.py | wanderleihuttel/ElevenClock | de4272a650111233acf36c909c7e269c8dc810d2 | [
"Apache-2.0"
] | null | null | null | elevenclock/lang/lang_el.py | wanderleihuttel/ElevenClock | de4272a650111233acf36c909c7e269c8dc810d2 | [
"Apache-2.0"
] | null | null | null | # INSTRUCTIONS
# Translate the text and write it between the "
# EXAMPLE: original -> "This text is in english: value {0}"
# translation -> "Aquest text està en anglès: valor {0}"
# If you see sth like {0}, {1}, maintain it on the translated sentence
# Meke special attention to elements like ":", etc.
lang_3_2_1 = {
"Open online help to troubleshoot problems": "",
"Reset ElevenClock preferences to defaults": "",
"Specify a minimum width for the clock": "",
"Search on the settings": "",
"No results were found": "",
}
lang_3_2 = lang_3_2_1 | {
"Use system accent color as background color": "",
"Check only the focused window on the fullscreen check": "",
"Clock on monitor {0}": "",
"Move to the left": "",
"Show this clock on the left": "",
"Show this clock on the right": "",
"Restore clock position": "",
}
lang_3_1 = lang_3_2 | {
"W": "", # The initial of the word week in your language: W for week, S for setmana, etc.
"Disable the notification badge": "",
"Override clock default height": "",
"Adjust horizontal clock position": "",
"Adjust vertical clock position": "",
"Export log as a file": "",
"Copy log to clipboard": "",
"Announcements:": "",
"Fetching latest announcement, please wait...": "",
"Couldn't load the announcements. Please try again later": "",
"ElevenClock's log": "",
"Pick a color": ""
}
lang_3 = lang_3_1 | {
"Hide the clock during 10 seconds when clicked": "",
"Enable low-cpu mode": "",
"You might lose functionalities, like the notification counter or the dynamic background": "",
"Clock position and size:": "",
"Clock size preferences, position offset, clock at the left, etc.": "",
"Reset monitor blacklisting status": "",
"Reset": "",
"Third party licenses": "",
"View": "",
"ElevenClock": "",
"Monitor tools": "",
"Blacklist this monitor": "",
"Third Party Open-Source Software in Elevenclock {0} (And their licenses)": "",
"ElevenClock is an Open-Source application made with the help of other libraries made by the community:": "",
"Ok": "",
"More Info": "",
"About Qt": "",
"Success": "",
"The monitors were unblacklisted successfully.": "",
"Now you should see the clock everywhere": "",
"Ok": "",
"Blacklist Monitor": "",
"Blacklisting a monitor will hide the clock on this monitor permanently.": "",
"This action can be reverted from the settings window. under <b>Clock position and size</b>": "",
"Are you sure do you want to blacklist the monitor \"{0}\"?": "",
"Yes": "",
"No": "",
}
lang_2_9_2 = lang_3 | {
"Reload log": "",
"Do not show the clock on secondary monitors": "",
"Disable clock taskbar background color (make clock transparent)": "",
"Open the welcome wizard": "",
" (ALPHA STAGE, MAY NOT WORK)": "",
"Welcome to ElevenClock": "",
"Skip": "",
"Start": "",
"Next": "",
"Finish": "",
}
lang_2_9 = lang_2_9_2 | {
"Task Manager": "",
"Change date and time": "",
"Notification settings": "",
"Updates, icon tray, language": "",
"Hide extended options from the clock right-click menu (needs a restart to be aplied)": "",
"Fullscreen behaviour, clock position, 1st monitor clock, other miscellanious settings": "",
'Add the "Show Desktop" button on the left corner of every clock': '',
'You might need to set a custom background color for this to work. More info <a href="{0}" style="color:DodgerBlue">HERE</a>': '',
"Clock's font, font size, font color and background, text alignment": "",
"Date format, Time format, seconds,weekday, weeknumber, regional settings": "",
"Testing features and error-fixing tools": "",
"Language pack author(s), help translating ElevenClock": "",
"Info, report a bug, submit a feature request, donate, about": "",
"Log, debugging information": "",
}
lang_2_8 = lang_2_9 | {
"Force the clock to be at the top of the screen": "",
"Show the clock on the primary screen": "",
"Use a custom font color": "",
"Use a custom background color": "",
"Align the clock text to the center": "",
"Select custom color": "",
"Hide the clock when a program occupies all screens": "",
}
lang2_7_bis = lang_2_8 | {
"Use a custom font": "",
"Use a custom font size": "",
"Enable hide when multi-monitor fullscreen apps are running": "",
"<b>{0}</b> needs to be enabled to change this setting": "",
"<b>{0}</b> needs to be disabled to change this setting": "",
}
lang2_7 = lang2_7_bis | {
" (This feature has been disabled because it should work by default. If it is not, please report a bug)": "",
"ElevenClock's language": ""
}
lang2_6 = lang2_7 | {
"About Qt6 (PySide6)": "",
"About": "",
"Alternative non-SSL update server (This might help with SSL errors)": "",
"Fixes and other experimental features: (Use ONLY if something is not working)": "",
"Show week number on the clock": "",
}
lang2_5 = lang2_6 | {
"Hide the clock when RDP Client or Citrix Workspace are running": "",
"Clock Appearance:": "",
"Force the clock to have black text": "",
" - It is required that the Dark Text checkbox is disabled": "",
"Debbugging information:": "",
"Open ElevenClock's log": "",
}
lang2_4 = lang2_5 | {
# Added text in version 2.4
"Show the clock on the primary screen (Useful if clock is set on the left)": "",
"Show weekday on the clock" :"Προβολή ημέρας της εβδομάδας στο ρολόι",
}
lang2_3 = lang2_4 | {
#Context menu
"ElevenClock Settings" :"Ρυθμίσεις ElevenClock", # Also settings title
"Reload Clocks" :"Επαναφόρτωση Ρολογιών",
"ElevenClock v{0}" :"Έκδοση ElevenClock: {0}",
"Restart ElevenClock" :"Επανεκκίνηση ElevenClock",
"Hide ElevenClock" :"Απόκρυψη ElevenClock",
"Quit ElevenClock" :"Τερματισμός ElevenClock",
#General settings section
"General Settings:" :"Γενικές Ρυθμίσεις",
"Automatically check for updates" :"Αυτόματος έλεγχος για ενημερώσεις",
"Automatically install available updates" :"Αυτόματη εγκατάσταση διαθέισμων ενημερώσεων",
"Enable really silent updates" :"Ενεργοποίηση πραγματικά σιωπηλών ενημερώσεων",
"Bypass update provider authenticity check (NOT RECOMMENDED, AT YOUR OWN RISK)" :"Παράκαμψη ελέγχου πιστοποίησης παρόχου ενημερώσεων (ΔΕΝ ΠΡΟΤΕΊΝΕΤΑΙ, ΜΕ ΔΙΚΗ ΣΑΣ ΕΥΘΥΝΗ)",
"Show ElevenClock on system tray" :"Προβολή του ElevenClock στη γραμμή εργασιών",
"Alternative clock alignment (may not work)" :"Εναλλακτική ευθυγράμμιση ρολογιού (ίσως να μην λειτουργεί)",
"Change startup behaviour" :"Αλλαγή συμπεριφοράς κατά την εκκίνηση",
"Change" :"Αλλαγή",
"<b>Update to the latest version!</b>" :"<b>Ενημέρωση στην τελευταία έκδοση!</b>",
"Install update" :"Εγκατάστσαη ενημέρωσης",
#Clock settings
"Clock Settings:" :"Ρυθμίσεις Ρολογιού",
"Hide the clock in fullscreen mode" :"Απόκρυψη ρολογιού σε κατάσταση πλήρους οθόνης",
"Hide the clock when RDP client is active" :"Απόκρυψη ρολογιού όταν χρησιμοποιείται η Απομακρυσμένη Πρόσβαση",
"Force the clock to be at the bottom of the screen" :"Εξαναγκασμός ρολογιού στο κάτω μέρος της οθόνης",
"Show the clock when the taskbar is set to hide automatically" :"Προβολή ρολογιού όταν η γραμμή εργασιών είναι ορισμένη για αυτόματη απόκρυψη",
"Fix the hyphen/dash showing over the month" :"Διόρθωση της καθέτου που προβάλεται πάνω από τον μήνα",
"Force the clock to have white text" :"Εξαναγκασμός ρολογίου για χρήση κειμένου σε λευκό χρώμα",
"Show the clock at the left of the screen" :"Προβολή ρολογιού στα αριστερά της οθόνης",
#Date & time settings
"Date & Time Settings:" :"Ρυθμίσεις Ημερομηνίας & Ώρας",
"Show seconds on the clock" :"Προβολή δευτερολέπτων στο ρολόι",
"Show date on the clock" :"Προβολή ημερομηνίας στο ρολόι",
"Show time on the clock" :"Προβολή ώρας στο ρολόι",
"Change date and time format (Regional settings)" :"Αλλαγή μορφής ημερομηνίας και ώρας (Τοπικές ρυθμίσεις)",
"Regional settings" :"Τοπικές ρυθμίσεις",
#About the language pack
"About the language pack:" :"Σχετικά με το πακέτο γλώσσας",
"Translated to English by martinet101" :"Μετάφραση ελληνικών από panos78", # Here, make sute to give you some credits: Translated to LANGUAGE by USER/NAME/PSEUDONYM/etc.
"Translate ElevenClock to your language" :"Μεταφραση του ElevenClock στη γλώσσα σας",
"Get started" :"Ξεκινήστε",
#About ElevenClock
"About ElevenClock version {0}:" :"Σχετικά με την έκδοση {0} του ElevenClock:",
"View ElevenClock's homepage" :"Μετάβαση στην ιστοσελίδα του ElevenClock",
"Open" :"Άνοιγμα",
"Report an issue/request a feature" :"Αναφορά θέματος / Αίτημα χαρακτηριστικού",
"Report" :"Αναφορά",
"Support the dev: Give me a coffee☕" :"Υποστηρίξτε τον δημιουργό: Κεράστε τον ένα καφέ☕",
"Open page" :"Άνοιγμα σελίδας",
"Icons by Icons8" :"Εικονίδια από Icons8", # Here, the word "Icons8" should not be translated
"Webpage" :"Ιστοσελίδα",
"Close settings" :"Κλείσιμο ρυθμίσεων",
"Close" :"Κλείσιμο",
}
lang = lang2_3
| 50.110048 | 180 | 0.5835 |
d1fd8b73c4920fae582426473920c41effe5c00f | 500 | py | Python | jupyterurlparams/__init__.py | manics/jupyter-urlparams | 2e40927e6bf2e1b780e37d440cc7a463415da91d | [
"BSD-3-Clause"
] | 2 | 2020-03-12T18:21:19.000Z | 2020-03-13T22:27:39.000Z | jupyterurlparams/__init__.py | manics/jupyter-urlparams | 2e40927e6bf2e1b780e37d440cc7a463415da91d | [
"BSD-3-Clause"
] | null | null | null | jupyterurlparams/__init__.py | manics/jupyter-urlparams | 2e40927e6bf2e1b780e37d440cc7a463415da91d | [
"BSD-3-Clause"
] | null | null | null | from .version import __version__ # noqa
from .handlers import (
UIHandler,
)
from notebook.utils import url_path_join
def _jupyter_server_extension_paths():
return [{
'module': 'jupyterurlparams',
}]
def load_jupyter_server_extension(nbapp):
web_app = nbapp.web_app
base_url = url_path_join(web_app.settings['base_url'], 'urlparams')
handlers = [
(base_url, UIHandler),
]
web_app.settings['nbapp'] = nbapp
web_app.add_handlers('.*', handlers)
| 22.727273 | 71 | 0.686 |
59818180ada892159244b8e7ca3cf197cf849760 | 6,864 | py | Python | server/database/models.py | FemiBlack/flask-vue-building-spa | a275da149ee60242170440fba0fd0dc0ecefe659 | [
"MIT"
] | null | null | null | server/database/models.py | FemiBlack/flask-vue-building-spa | a275da149ee60242170440fba0fd0dc0ecefe659 | [
"MIT"
] | null | null | null | server/database/models.py | FemiBlack/flask-vue-building-spa | a275da149ee60242170440fba0fd0dc0ecefe659 | [
"MIT"
] | null | null | null | from .db import db
from flask_bcrypt import generate_password_hash,check_password_hash
# class RemCol(db.Document):
# remark = db.StringField()
# response = db.StringField()
class BuildingExtEnv(db.EmbeddedDocument):
drv_rain = db.DictField()
drainage_issue = db.DictField()
water_log = db.DictField() # read-docs
unkempt = db.DictField() # read-docs
pollution = db.DictField() # read-docs
topography = db.DictField() # read-docs
radiation = db.DictField() # read-docs
extreme_temp = db.DictField() # read-docs
flood = db.DictField() # read-docs
fire_source = db.DictField() # read-docs
traffic_issue = db.DictField() # read-docs
building_threat = db.DictField() # read-docs
wind = db.DictField() # read-docs
moisture = db.DictField() # read-docs
class BuildingIntCond(db.EmbeddedDocument):
moisture = db.DictField() # read-docs
excess_heat = db.DictField() # read-docs
ventilation = db.DictField() # read-docs
dry_air = db.DictField() # read-docs
class BuildingGenCond(db.EmbeddedDocument):
foundation_sett = db.DictField() # read-docs
deformation = db.DictField() # read-docs
defects = db.DictField() # read-docs
cracks = db.DictField() # read-docs
class BuildingQualityofComponent(db.EmbeddedDocument):
physical_app = db.DictField() # read-docs
texture = db.DictField() # read-docs
strength = db.DictField() # read-docs
crack = db.DictField() # read-docs
dimension = db.DictField() # read-docs
deflection = db.DictField() # read-docs
spalling = db.DictField() # read-docs
corrosion = db.DictField() # read-docs
structural_defect = db.DictField() # read-docs
distress = db.DictField() # read-docs
deformation = db.DictField() # read-docs
deterioration = db.DictField() # read-docs
class BuildingDesignLevel(db.EmbeddedDocument):
dimension = db.DictField() # read-docs
spanning = db.DictField() # read-docs
configuration = db.DictField() # read-docs
redundant_element = db.DictField() # read-docs
loading = db.DictField() # read-docs
structural_defect = db.DictField() # read-docs
deformation = db.DictField() # read-docs
class BuildingWorkXPLevel(db.EmbeddedDocument):
dimension = db.DictField() # read-docs
misalignment = db.DictField() # read-docs
deflection = db.DictField() # read-docs
excess_waviness = db.DictField() # read-docs
corossion = db.DictField() # read-docs
bar_spacing = db.DictField() # read-docs
deficient_cover = db.DictField() # read-docs
reinforcement_spec = db.DictField() # read-docs
seq_construction = db.DictField() # read-docs
class BuildingIndoorEnv(db.EmbeddedDocument):
moisture = db.DictField() # read-docs
humidity = db.DictField() # read-docs
vibration = db.DictField() # read-docs
excess_heat = db.DictField() # read-docs
ventilation = db.DictField() # read-docs
lighting = db.DictField() # read-docs
class BuildingOutdoorEnv(db.EmbeddedDocument):
drainage_issue = db.DictField() # read-docs
flood_issue = db.DictField() # read-docs
heat = db.DictField() # read-docs
traffic_issue = db.DictField() # read-docs
drv_rain = db.DictField() # read-docs
unkempt = db.DictField() # read-docs
pollution = db.DictField() # read-docs
extreme_temp = db.DictField() # read-docs
building_threat = db.DictField() # read-docs
class BuildingInUseCond(db.EmbeddedDocument):
addition = db.DictField() # read-docs
overloading = db.DictField() # read-docs
not_kept = db.DictField() # read-docs
vibration = db.DictField() # read-docs
vandalism = db.DictField() # read-docs
residential_only = db.DictField() # read-docs
class BuildingMaintenance(db.EmbeddedDocument):
int_env = db.DictField() # read-docs
ext_env = db.DictField() # read-docs
struct_elements = db.DictField() # read-docs
maintenance_issue = db.DictField() # read-docs
damage_maintenance = db.DictField() # read-docs
care_takers = db.DictField() # read-docs
planned_frequency = db.DictField() # read-docs
class NDTestRes(db.EmbeddedDocument):
code = db.StringField()
grid = db.StringField()
ultrasonic = db.DictField() # read-docs
eq_strength = db.IntField()
hammer_val = db.IntField()
class BuildingWeatherTemp(db.EmbeddedDocument):
temp_17 = db.DictField()
temp_18 = db.DictField()
temp_19 = db.DictField()
temp_20 = db.DictField()
class BuildingWeatherRain(db.EmbeddedDocument):
rain_17 = db.DictField()
rain_18 = db.DictField()
rain_19 = db.DictField()
rain_20 = db.DictField()
class Building(db.Document):
building_no = db.StringField(required=True, unique=True)
address = db.StringField(required=True)
date = db.DateTimeField(required=True)
building_age = db.IntField()
last_repair_date = db.DateTimeField()
nature_of_repair = db.StringField()
frequency_of_repair = db.StringField()
geometry = db.StringField()
characteristics = db.StringField()
compliance = db.StringField()
deviation = db.StringField()
external_env = db.EmbeddedDocumentField(BuildingExtEnv)
internal_cond = db.EmbeddedDocumentField(BuildingIntCond)
general_being = db.EmbeddedDocumentField(BuildingGenCond)
component_quality = db.EmbeddedDocumentField(BuildingQualityofComponent)
design_lvl = db.EmbeddedDocumentField(BuildingDesignLevel)
work_xp_lvl = db.EmbeddedDocumentField(BuildingWorkXPLevel)
indoor_env = db.EmbeddedDocumentField(BuildingIndoorEnv)
outdoor_env = db.EmbeddedDocumentField(BuildingOutdoorEnv)
in_use_cond = db.EmbeddedDocumentField(BuildingInUseCond)
maintenance = db.EmbeddedDocumentField(BuildingMaintenance)
nd_test_res = db.EmbeddedDocumentField(NDTestRes)
weather_info_temp = db.EmbeddedDocumentField(BuildingWeatherTemp)
weather_info_rain = db.EmbeddedDocumentField(BuildingWeatherRain)
is_completed = db.BooleanField(default=False) # set to true on FIELD4 SUBMISSION
added_by = db.ReferenceField('User')
class User(db.Document):
email = db.EmailField(required=True, unique=True)
username = db.StringField(required=True)
password = db.StringField(required=True, min_length=6)
houses = db.ListField(db.ReferenceField('Building', reverse_delete_rule=db.PULL))
def hash_password(self):
self.password = generate_password_hash(self.password).decode('utf8')
def check_password(self, password):
return check_password_hash(self.password, password)
User.register_delete_rule(Building, 'added_by', db.CASCADE)
| 41.101796 | 86 | 0.688228 |
be9979bb3af15007831026978a63090be003432a | 147 | py | Python | happy/fun/urls.py | 0xRumple/happy | bc4be2ae6320281887125d9a19cfa62a58a83331 | [
"Apache-2.0"
] | 7 | 2018-06-05T13:50:10.000Z | 2021-08-04T12:13:53.000Z | happy/fun/urls.py | 0xRumple/happy | bc4be2ae6320281887125d9a19cfa62a58a83331 | [
"Apache-2.0"
] | 67 | 2018-06-13T15:49:35.000Z | 2021-06-10T20:32:08.000Z | happy/fun/urls.py | 0xRumple/happy | bc4be2ae6320281887125d9a19cfa62a58a83331 | [
"Apache-2.0"
] | 7 | 2018-06-05T13:50:25.000Z | 2019-04-01T08:28:24.000Z | # from django.urls import path, include
# from rest_framework.urlpatterns import format_suffix_patterns
# from . import views
urlpatterns = [
]
| 16.333333 | 63 | 0.77551 |
672f707120b9c828f99f25790702b47d2efc0e95 | 383 | py | Python | Yeps/Yeps/wsgi.py | hezuoguang/Yeps-Server | 04c9bc9674fc93f583a46fb4b4197ea1855e5fb7 | [
"MIT"
] | 1 | 2017-06-08T03:15:53.000Z | 2017-06-08T03:15:53.000Z | Yeps/Yeps/wsgi.py | hezuoguang/Yeps-Server | 04c9bc9674fc93f583a46fb4b4197ea1855e5fb7 | [
"MIT"
] | null | null | null | Yeps/Yeps/wsgi.py | hezuoguang/Yeps-Server | 04c9bc9674fc93f583a46fb4b4197ea1855e5fb7 | [
"MIT"
] | null | null | null | """
WSGI config for Yeps project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Yeps.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| 25.533333 | 78 | 0.785901 |
7d09ff9e14923067e653ca7b7a251c840fd6789c | 316,766 | py | Python | theano/tensor/tests/test_basic.py | gundun/theano | 09d17fff10487dca7149e34601b8c6efdc572a19 | [
"BSD-3-Clause"
] | null | null | null | theano/tensor/tests/test_basic.py | gundun/theano | 09d17fff10487dca7149e34601b8c6efdc572a19 | [
"BSD-3-Clause"
] | null | null | null | theano/tensor/tests/test_basic.py | gundun/theano | 09d17fff10487dca7149e34601b8c6efdc572a19 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, print_function, division
import itertools
import logging
import operator
import os
import sys
from tempfile import mkstemp
import unittest
import warnings
from copy import copy, deepcopy
# Import builtin min to be able to use it after importing the tensor version.
from theano.compat import izip
from six import iteritems
from six.moves import xrange
from six.moves.builtins import min as builtin_min
from nose.tools import assert_raises
from nose.plugins.skip import SkipTest
import numpy
from numpy.testing import dec, assert_array_equal, assert_allclose
from distutils.version import LooseVersion
from functools import partial
import theano
from theano.compat import PY3, exc_message, operator_div
from six.moves import StringIO, reduce
from theano import compile, config, function, gof, tensor, shared
from theano.compile import DeepCopyOp
from theano.compile.mode import get_default_mode
from theano.tensor import (_shared, wvector, bvector, autocast_float_as,
argmin, max_and_argmax, cscalar, ctensor3, join,
horizontal_stack, vertical_stack, argmax, get_vector_length,
fscalar, zeros_like, sum, tensor3, vector, add, addbroadcast,
alloc, as_tensor_variable, tensor_from_scalar, ARange, autocast_float,
clip, constant, default, dot, batched_dot,
dmatrix, dscalar, dvector, eq, eye, fill, flatten, inverse_permutation,
tensor4, permute_row_elements, Flatten, fmatrix, fscalars, grad,
inplace, iscalar, matrix, minimum, matrices, maximum, mul, neq,
Reshape, row, scalar, scalars, second, smallest, stack, sub, Tensor,
tensor_copy, tensordot, TensorType, Tri, tri, tril, triu, unbroadcast,
var, Join, shape, MaxAndArgmax, lscalar, zvector, exp,
get_scalar_constant_value, ivector, reshape, scalar_from_tensor, scal,
iscalars, arange, dscalars, fvector, imatrix, numeric_grad,
opt, lvector, lmatrix, true_div, max, min, Split, roll,
tile, patternbroadcast, Eye, Shape, Dot, PermuteRowElements,
ScalarFromTensor, TensorFromScalar, dtensor4, Rebroadcast, Alloc,
dtensor3, SpecifyShape, Mean,
itensor3, Tile, switch, Diagonal, Diag,
nonzero, flatnonzero, nonzero_values,
stacklists, DimShuffle, hessian, ptp, power,
swapaxes, choose, Choose, NoneConst, AllocEmpty,
isclose, allclose, mgrid, ogrid, extract_constant,
)
from theano.tests import unittest_tools as utt
from theano.tests.unittest_tools import attr
imported_scipy_special = False
mode_no_scipy = get_default_mode()
try:
import scipy.special
import scipy.stats
from scipy import __version__ as scipy_version
imported_scipy_special = True
except ImportError:
if config.mode == "FAST_COMPILE":
mode_no_scipy = "FAST_RUN"
floatX = config.floatX
if config.mode == "FAST_COMPILE":
mode_opt = "FAST_RUN"
else:
mode_opt = get_default_mode()
### seed random number generator so that unittests are deterministic ###
utt.seed_rng()
if PY3:
def L(i):
return i
else:
def L(i):
return long(i)
def inplace_func(inputs, outputs, mode=None, allow_input_downcast=False,
on_unused_input='raise', name=None):
if mode is None:
mode = get_default_mode()
return function(inputs, outputs,
mode=mode,
allow_input_downcast=allow_input_downcast,
accept_inplace=True,
on_unused_input=on_unused_input,
name=name)
def eval_outputs(outputs):
variables = inplace_func([], outputs)()
if isinstance(variables, (tuple, list)) and len(variables) == 1:
return variables[0]
return variables
def get_numeric_subclasses(cls=numpy.number, ignore=None):
"""
Return subclasses of `cls` in the numpy scalar hierarchy.
We only return subclasses that correspond to unique data types.
The hierarchy can be seen here:
http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html
"""
if ignore is None:
ignore = []
rval = []
dtype = numpy.dtype(cls)
dtype_num = dtype.num
if dtype_num not in ignore:
# Safety check: we should be able to represent 0 with this data type.
numpy.array(0, dtype=dtype)
rval.append(cls)
ignore.append(dtype_num)
for sub in cls.__subclasses__():
rval += [c for c in get_numeric_subclasses(sub, ignore=ignore)]
return rval
def get_numeric_types(with_int=True, with_float=True, with_complex=False,
only_theano_types=True):
"""
Return numpy numeric data types.
:param with_int: Whether to include integer types.
:param with_float: Whether to include floating point types.
:param with_complex: Whether to include complex types.
:param only_theano_types: If True, then numpy numeric data types that are
not supported by Theano are ignored (i.e. those that are not declared in
scalar/basic.py).
:returns: A list of unique data type objects. Note that multiple data types
may share the same string representation, but can be differentiated through
their `num` attribute.
Note that when `only_theano_types` is True we could simply return the list
of types defined in the `scalar` module. However with this function we can
test more unique dtype objects, and in the future we may use it to
automatically detect new data types introduced in numpy.
"""
if only_theano_types:
theano_types = [d.dtype for d in theano.scalar.all_types]
rval = []
def is_within(cls1, cls2):
# Return True if scalars defined from `cls1` are within the hierarchy
# starting from `cls2`.
# The third test below is to catch for instance the fact that
# one can use ``dtype=numpy.number`` and obtain a float64 scalar, even
# though `numpy.number` is not under `numpy.floating` in the class
# hierarchy.
return (cls1 is cls2 or
issubclass(cls1, cls2) or
isinstance(numpy.array([0], dtype=cls1)[0], cls2))
for cls in get_numeric_subclasses():
dtype = numpy.dtype(cls)
if ((not with_complex and is_within(cls, numpy.complexfloating)) or
(not with_int and is_within(cls, numpy.integer)) or
(not with_float and is_within(cls, numpy.floating)) or
(only_theano_types and dtype not in theano_types)):
# Ignore this class.
continue
rval.append([str(dtype), dtype, dtype.num])
# We sort it to be deterministic, then remove the string and num elements.
return [x[1] for x in sorted(rval, key=str)]
def _numpy_checker(x, y):
"""
Checks if x.data and y.data have the same contents.
Used in DualLinker to compare C version with Python version.
"""
x, y = x[0], y[0]
if (x.dtype != y.dtype or x.shape != y.shape
or numpy.any(numpy.abs(x - y) > 1e-10)):
raise Exception("Output mismatch.", {'performlinker': x, 'clinker': y})
def safe_make_node(op, *inputs):
""" Emulate the behaviour of make_node when op is a function.
Normally op in an instead of the Op class.
"""
node = op(*inputs)
if isinstance(node, list):
return node[0].owner
else:
return node.owner
def upcast_float16_ufunc(fn):
"""Decorator that enforces computation is not done in float16 by NumPy.
Some ufuncs in NumPy will compute float values on int8 and uint8
in half-precision (float16), which is not enough, and not compatible
with the C code.
:param fn: numpy ufunc
:returns: function similar to fn.__call__, computing the same
value with a minimum floating-point precision of float32
"""
def ret(*args, **kwargs):
out_dtype = numpy.find_common_type(
[a.dtype for a in args], [numpy.float16])
if out_dtype == 'float16':
# Force everything to float32
sig = 'f' * fn.nin + '->' + 'f' * fn.nout
kwargs.update(sig=sig)
return fn(*args, **kwargs)
return ret
def upcast_int8_nfunc(fn):
"""Decorator that upcasts input of dtype int8 to float32.
This is so that floating-point computation is not carried using
half-precision (float16), as some NumPy functions do.
:param fn: function computing a floating-point value from inputs
:returns: function similar to fn, but upcasting its uint8 and int8
inputs before carrying out the computation.
"""
def ret(*args, **kwargs):
args = list(args)
for i, a in enumerate(args):
if getattr(a, 'dtype', None) in ('int8', 'uint8'):
args[i] = a.astype('float32')
return fn(*args, **kwargs)
return ret
def makeTester(name, op, expected, checks=None, good=None, bad_build=None,
bad_runtime=None, grad=None, mode=None, grad_rtol=None,
eps=1e-10, skip=False, test_memmap=True, check_name=True,
grad_eps=None):
"""
:param check_name:
Use only for tester that aren't in Theano.
"""
if checks is None:
checks = {}
if good is None:
good = {}
if bad_build is None:
bad_build = {}
if bad_runtime is None:
bad_runtime = {}
if grad is None:
grad = {}
if grad is True:
grad = good
_op, _expected, _checks, _good = op, expected, checks, good
_bad_build, _bad_runtime, _grad = bad_build, bad_runtime, grad
_mode, _grad_rtol, _eps, skip_ = mode, grad_rtol, eps, skip
_test_memmap = test_memmap
_check_name = check_name
_grad_eps = grad_eps
class Checker(unittest.TestCase):
op = staticmethod(_op)
expected = staticmethod(_expected)
checks = _checks
check_name = _check_name
good = _good
bad_build = _bad_build
bad_runtime = _bad_runtime
grad = _grad
mode = _mode
skip = skip_
test_memmap = _test_memmap
def setUp(self):
# Verify that the test's name is correctly set.
# Some tests reuse it outside this module.
if self.check_name:
eval(self.__class__.__module__ + '.' + self.__class__.__name__)
# We keep a list of temporary files created in add_memmap_values,
# to remove them at the end of the test.
self.tmp_files = []
def add_memmap_values(self, val_dict):
# If test_memmap is True, we create a temporary file
# containing a copy of the data passed in the "val_dict" dict,
# then open it as a memmapped array, and we can use the result as a
# new test value.
if not self.test_memmap:
return val_dict
# Copy dict before modifying them
val_dict = val_dict.copy()
# Note that we sort items in the dictionary to ensure tests are
# deterministic (since the loop below will break on the first valid
# item that can be memmapped).
for k, v in sorted(val_dict.items()):
new_k = '_'.join((k, 'memmap'))
if new_k in val_dict:
# A corresponding key was already provided
break
new_v = []
for inp in v:
if type(inp) is numpy.ndarray and inp.size > 0:
f, fname = mkstemp()
self.tmp_files.append((f, fname))
new_inp = numpy.memmap(fname, dtype=inp.dtype,
mode='w+', shape=inp.shape)
new_inp[...] = inp[...]
new_v.append(new_inp)
else:
new_v.append(inp)
val_dict[new_k] = new_v
# We only need one value, no need to copy all of them
break
return val_dict
def tearDown(self):
# This is to avoid a problem with deleting memmap files on windows.
import gc
gc.collect()
for f, fname in self.tmp_files:
os.close(f)
os.remove(fname)
def test_good(self):
if skip:
raise SkipTest(skip)
good = self.add_memmap_values(self.good)
for testname, inputs in iteritems(good):
inputs = [copy(input) for input in inputs]
inputrs = [TensorType(
dtype=input.dtype,
broadcastable=[shape_elem == 1
for shape_elem in input.shape]
)() for input in inputs]
try:
node = safe_make_node(self.op, *inputrs)
except Exception as exc:
err_msg = ("Test %s::%s: Error occurred while"
" making a node with inputs %s") % (
self.op, testname, inputs)
exc.args += (err_msg,)
raise
try:
f = inplace_func(inputrs, node.outputs, mode=mode, name='test_good')
except Exception as exc:
err_msg = ("Test %s::%s: Error occurred while"
" trying to make a Function") % (self.op, testname)
exc.args += (err_msg,)
raise
if (isinstance(self.expected, dict)
and testname in self.expected):
expecteds = self.expected[testname]
# with numpy version, when we print a number and read it
# back, we don't get exactly the same result, so we accept
# rounding error in that case.
eps = 5e-9
else:
expecteds = self.expected(*inputs)
eps = 1e-10
if any([i.dtype in ('float32', 'int8', 'uint8')
for i in inputs]):
eps = 1e-6
eps = numpy.max([eps, _eps])
try:
variables = f(*inputs)
except Exception as exc:
err_msg = ("Test %s::%s: Error occurred while calling"
" the Function on the inputs %s") % (
self.op, testname, inputs)
exc.args += (err_msg,)
raise
if not isinstance(expecteds, (list, tuple)):
expecteds = (expecteds, )
for i, (variable, expected) in enumerate(
izip(variables, expecteds)):
if (variable.dtype != expected.dtype
or variable.shape != expected.shape
or not numpy.allclose(variable, expected,
atol=eps, rtol=eps)):
self.fail(("Test %s::%s: Output %s gave the wrong"
" value. With inputs %s, expected %s (dtype %s),"
" got %s (dtype %s). eps=%f"
" numpy.allclose returns %s %s") % (
self.op,
testname,
i,
inputs,
expected,
expected.dtype,
variable,
variable.dtype,
eps,
numpy.allclose(variable, expected,
atol=eps, rtol=eps),
numpy.allclose(variable, expected)))
for description, check in iteritems(self.checks):
if not check(inputs, variables):
self.fail(("Test %s::%s: Failed check: %s (inputs"
" were %s, outputs were %s)") % (
self.op, testname, description,
inputs, variables))
def test_bad_build(self):
if skip:
raise SkipTest(skip)
for testname, inputs in iteritems(self.bad_build):
inputs = [copy(input) for input in inputs]
inputrs = [shared(input) for input in inputs]
self.assertRaises(Exception,
safe_make_node, self.op, *inputrs)
# The old error string was ("Test %s::%s: %s was successfully
# instantiated on the following bad inputs: %s"
# % (self.op, testname, node, inputs))
def test_bad_runtime(self):
if skip:
raise SkipTest(skip)
for testname, inputs in iteritems(self.bad_runtime):
inputrs = [shared(input) for input in inputs]
try:
node = safe_make_node(self.op, *inputrs)
except Exception as exc:
err_msg = ("Test %s::%s: Error occurred while trying"
" to make a node with inputs %s") % (
self.op, testname, inputs)
exc.args += (err_msg,)
raise
try:
f = inplace_func([], node.outputs, mode=mode, name="test_bad_runtime")
except Exception as exc:
err_msg = ("Test %s::%s: Error occurred while trying"
" to make a Function") % (self.op, testname)
exc.args += (err_msg,)
raise
# Add tester return a ValueError. Should we catch only this
# one?
# TODO: test that only this one is raised and catch only this
# one or the subset that get raised.
self.assertRaises(Exception, f, [])
def test_grad(self):
if skip:
raise SkipTest(skip)
# Disable old warning that may be triggered by this test.
backup = config.warn.sum_div_dimshuffle_bug
config.warn.sum_div_dimshuffle_bug = False
try:
for testname, inputs in iteritems(self.grad):
inputs = [copy(input) for input in inputs]
try:
utt.verify_grad(self.op, inputs,
mode=self.mode,
rel_tol=_grad_rtol,
eps=_grad_eps)
except Exception as exc:
err_msg = ("Test %s::%s: Error occurred while"
" computing the gradient on the following"
" inputs: %s") % (self.op, testname, inputs)
exc.args += (err_msg,)
raise
finally:
config.warn.sum_div_dimshuffle_bug = backup
def test_grad_none(self):
# Check that None is never returned as input gradient
# when calling self.op.grad
# We use all values in self.good because this has to be true
# whether or not the values work for utt.verify_grad.
if skip:
raise SkipTest(skip)
if not hasattr(self.op, 'grad'):
# This is not actually an Op
return
for testname, inputs in iteritems(self.good):
inputs = [copy(input) for input in inputs]
inputrs = [TensorType(
dtype=input.dtype,
broadcastable=[shape_elem == 1
for shape_elem in input.shape]
)() for input in inputs]
if (isinstance(self.expected, dict)
and testname in self.expected):
expecteds = self.expected[testname]
# with numpy version, when we print a number and read it
# back, we don't get exactly the same result, so we accept
# rounding error in that case.
else:
expecteds = self.expected(*inputs)
if not isinstance(expecteds, (list, tuple)):
expecteds = (expecteds, )
out_grad_vars = []
for out in expecteds:
if str(out.dtype) in tensor.discrete_dtypes:
dtype = floatX
else:
dtype = str(out.dtype)
bcast = [shape_elem == 1 for shape_elem in out.shape]
var = TensorType(dtype=dtype, broadcastable=bcast)()
out_grad_vars.append(var)
try:
in_grad_vars = self.op.grad(inputrs, out_grad_vars)
except (gof.utils.MethodNotDefined, NotImplementedError):
pass
else:
assert None not in in_grad_vars
Checker.__name__ = name
if hasattr(Checker, '__qualname__'):
Checker.__qualname__ = name
return Checker
def rand(*shape):
r = numpy.random.rand(*shape) * 2 - 1
return numpy.asarray(r, dtype=config.floatX)
def rand_nonzero(shape, eps=3e-4):
"""Like rand, but the absolute value has to be at least eps"""
# covers [0, 1)
r = numpy.asarray(numpy.random.rand(*shape), dtype=config.floatX)
# covers [0, (1 - eps) / 2) U [(1 + eps) / 2, 1)
r = r * (1 - eps) + eps * (r >= 0.5)
# covers [-1, -eps) U [eps, 1)
r = r * 2 - 1
return r
def randint(*shape):
return numpy.random.randint(-5, 6, shape)
def randuint(*shape):
return numpy.array(numpy.random.randint(5, size=shape), dtype=numpy.uint32)
# XXX: this so-called complex random array as all-zero imaginary parts
def randcomplex(*shape):
r = numpy.asarray(numpy.random.rand(*shape), dtype=config.floatX)
return numpy.complex128(2 * r - 1)
def randcomplex_nonzero(shape, eps=1e-4):
return numpy.complex128(rand_nonzero(shape, eps))
def randint_nonzero(*shape):
r = numpy.random.randint(-5, 5, shape)
return r + (r == 0) * 5
def rand_ranged(min, max, shape):
return numpy.asarray(numpy.random.rand(*shape) * (max - min) + min,
dtype=config.floatX)
def randint_ranged(min, max, shape):
return numpy.random.randint(min, max+1, shape)
def randc128_ranged(min, max, shape):
return numpy.asarray(numpy.random.rand(*shape) * (max - min) + min,
dtype='complex128')
def rand_of_dtype(shape, dtype):
if 'int' in dtype:
return randint(*shape).astype(dtype)
elif 'float' in dtype:
return rand(*shape).astype(dtype)
elif 'complex' in dtype:
return randcomplex(*shape).astype(dtype)
else:
raise TypeError()
def makeBroadcastTester(op, expected, checks=None, name=None, **kwargs):
if checks is None:
checks = {}
if name is None:
name = str(op)
# Here we ensure the test name matches the name of the variable defined in
# this script. This is needed to properly identify the test e.g. with the
# --with-id option of nosetests, or simply to rerun a specific test that
# failed.
capitalize = False
if name.startswith('Elemwise{') and name.endswith(',no_inplace}'):
# For instance: Elemwise{add,no_inplace} -> Add
name = name[9:-12]
capitalize = True
elif name.endswith('_inplace'):
# For instance: sub_inplace -> SubInplace
capitalize = True
if capitalize:
name = ''.join([x.capitalize() for x in name.split('_')])
# Some tests specify a name that already ends with 'Tester', while in other
# cases we need to add it manually.
if not name.endswith('Tester'):
name += "Tester"
if 'inplace' in kwargs:
if kwargs['inplace']:
_expected = expected
if not isinstance(_expected, dict):
expected = lambda *inputs: numpy.array(_expected(*inputs),
dtype=inputs[0].dtype)
def inplace_check(inputs, outputs):
# this used to be inputs[0] is output[0]
# I changed it so that it was easier to satisfy by the
# DebugMode
return numpy.all(inputs[0] == outputs[0])
checks = dict(checks, inplace_check=inplace_check)
del kwargs['inplace']
return makeTester(name, op, expected, checks, **kwargs)
_good_broadcast_binary_normal = dict(
same_shapes=(rand(2, 3), rand(2, 3)),
not_same_dimensions=(rand(2, 2), rand(2)),
scalar=(rand(2, 3), rand(1, 1)),
row=(rand(2, 3), rand(1, 3)),
column=(rand(2, 3), rand(2, 1)),
integers=(randint(2, 3), randint(2, 3)),
dtype_mixup_1=(rand(2, 3), randint(2, 3)),
dtype_mixup_2=(randint(2, 3), rand(2, 3)),
complex1=(randcomplex(2, 3), randcomplex(2, 3)),
complex2=(randcomplex(2, 3), rand(2, 3)),
# Disabled as we test the case where we reuse the same output as the
# first inputs.
# complex3=(rand(2,3),randcomplex(2,3)),
empty=(numpy.asarray([], dtype=config.floatX),
numpy.asarray([1], dtype=config.floatX)),
)
_bad_build_broadcast_binary_normal = dict()
_bad_runtime_broadcast_binary_normal = dict(
bad_shapes=(rand(2, 3), rand(3, 2)),
bad_row=(rand(2, 3), rand(1, 2)))
_grad_broadcast_binary_normal = dict(
same_shapes=(rand(2, 3), rand(2, 3)),
scalar=(rand(2, 3), rand(1, 1)),
row=(rand(2, 3), rand(1, 3)),
column=(rand(2, 3), rand(2, 1)),
# This don't work as verify grad don't support that
#empty=(numpy.asarray([]), numpy.asarray([1]))
# complex1=(randcomplex(2,3),randcomplex(2,3)),
# complex2=(randcomplex(2,3),rand(2,3)),
# Disabled as we test the case where we reuse the same output as the
# first inputs.
# complex3=(rand(2,3),randcomplex(2,3)),
)
def check_floatX(inputs, rval):
"""
:param inputs: Inputs to a function that returned `rval` with these inputs.
:param rval: Value returned by a function with inputs set to `inputs`.
:returns: Either `rval` unchanged, or `rval` cast in float32. The idea is
that when a numpy function would have returned a float64, Theano may prefer
to return a float32 instead when `config.cast_policy` is set to
'numpy+floatX' and config.floatX to 'float32', and there was no float64
input.
"""
if (isinstance(rval, numpy.ndarray) and
rval.dtype == 'float64' and
config.cast_policy == 'numpy+floatX'
and config.floatX == 'float32' and
all(x.dtype != 'float64' for x in inputs)):
# Then we expect float32 instead of float64.
return rval.astype('float32')
else:
return rval
AddTester = makeBroadcastTester(
op=add,
expected=lambda *inputs: check_floatX(
inputs, reduce(lambda x, y: x + y, inputs)),
good=dict(
three_inputs_same_shapes=(rand(2, 3),
rand(2, 3),
rand(2, 3)),
three_inputs_same_shapes_uint=(randuint(2,3),
randuint(2,3),
randuint(2,3)),
four_inputs_broadcast=(rand(2, 3),
rand(1, 3),
rand(2, 1),
rand(1, 1)),
**_good_broadcast_binary_normal),
bad_build=_bad_build_broadcast_binary_normal,
bad_runtime=_bad_runtime_broadcast_binary_normal)
AddInplaceTester = makeBroadcastTester(
op=inplace.add_inplace,
expected=lambda x, y: x + y,
good=_good_broadcast_binary_normal,
bad_build=_bad_build_broadcast_binary_normal,
bad_runtime=_bad_runtime_broadcast_binary_normal,
inplace=True)
SubTester = makeBroadcastTester(
op=sub,
expected=lambda x, y: check_floatX((x, y), x - y),
good=_good_broadcast_binary_normal,
bad_build=_bad_build_broadcast_binary_normal,
bad_runtime=_bad_runtime_broadcast_binary_normal,
grad=_grad_broadcast_binary_normal)
SubInplaceTester = makeBroadcastTester(op=inplace.sub_inplace,
expected=lambda x, y: x - y,
good=_good_broadcast_binary_normal,
bad_build=_bad_build_broadcast_binary_normal,
bad_runtime=_bad_runtime_broadcast_binary_normal,
grad=_grad_broadcast_binary_normal,
inplace=True)
SwitchTester = makeBroadcastTester(
op=switch,
expected=numpy.where,
good=dict(all_true=(numpy.asarray(1, dtype=config.floatX),
rand(4, 5), rand(4, 5)),
false_true=(numpy.asarray(0, dtype=config.floatX),
rand(4, 5), rand(4, 5)),
mixed=(randint_ranged(0, 1, (4, 5)),
rand(4, 5), rand(4, 5))
),
bad_build=dict(all_true=(numpy.asarray(1, dtype=config.floatX),
rand(4, 5))),
bad_runtime=dict(all_true=(numpy.asarray(1, dtype=config.floatX),
rand(3, 5), rand(4, 5)),
false_true=(numpy.asarray(0, dtype=config.floatX),
rand(4, 6), rand(4, 5)),
),
# We suppose that cond+eps do not switch branch in switch.grad()
# So we can't call verify_grad with cond 0.
grad=dict(all_true=(numpy.asarray(1, dtype=config.floatX),
rand(4, 5), rand(4, 5)),
# false_true=(numpy.asarray(0, dtype=config.floatX),
# rand(4, 5), rand(4, 5)),
# mixed=(randint_ranged(0, 1, (4, 5)).astype(config.floatX),
# rand(4, 5), rand(4, 5))
),
)
MaximumTester = makeBroadcastTester(op=maximum,
expected=lambda *inputs: check_floatX(inputs, numpy.maximum(*inputs)),
good=_good_broadcast_binary_normal,
bad_build=_bad_build_broadcast_binary_normal,
bad_runtime=_bad_runtime_broadcast_binary_normal,
grad=_grad_broadcast_binary_normal)
MaximumInplaceTester = makeBroadcastTester(op=inplace.maximum_inplace,
expected=numpy.maximum,
good=_good_broadcast_binary_normal,
bad_build=_bad_build_broadcast_binary_normal,
bad_runtime=_bad_runtime_broadcast_binary_normal,
grad=_grad_broadcast_binary_normal,
inplace=True)
MinimumTester = makeBroadcastTester(op=minimum,
expected=lambda *inputs: check_floatX(inputs, numpy.minimum(*inputs)),
good=_good_broadcast_binary_normal,
bad_build=_bad_build_broadcast_binary_normal,
bad_runtime=_bad_runtime_broadcast_binary_normal,
grad=_grad_broadcast_binary_normal)
MinimumInplaceTester = makeBroadcastTester(op=inplace.minimum_inplace,
expected=numpy.minimum,
good=_good_broadcast_binary_normal,
bad_build=_bad_build_broadcast_binary_normal,
bad_runtime=_bad_runtime_broadcast_binary_normal,
grad=_grad_broadcast_binary_normal,
inplace=True)
MulTester = makeBroadcastTester(op=mul,
expected=lambda *inputs: check_floatX(inputs, reduce(lambda x, y: x * y, inputs)),
good=dict(three_inputs_same_shapes=(rand(2, 3), rand(2, 3), rand(2, 3)),
four_inputs_broadcast=(rand(2, 3), rand(1, 3), rand(2, 1), rand(1, 1)),
**_good_broadcast_binary_normal),
bad_build=_bad_build_broadcast_binary_normal,
bad_runtime=_bad_runtime_broadcast_binary_normal,
grad=dict(three_inputs_same_shapes=(rand(2, 3), rand(2, 3), rand(2, 3)),
four_inputs_broadcast=(rand(2, 3), rand(1, 3), rand(2, 1), rand(1, 1)),
**_grad_broadcast_binary_normal))
MulInplaceTester = makeBroadcastTester(op=inplace.mul_inplace,
expected=lambda x, y: x * y,
good=_good_broadcast_binary_normal,
bad_build=_bad_build_broadcast_binary_normal,
bad_runtime=_bad_runtime_broadcast_binary_normal,
grad=_grad_broadcast_binary_normal,
inplace=True)
def copymod(dct, without=None, **kwargs):
"""Return dct but with the keys named by args removed, and with
kwargs added.
"""
if without is None:
without = []
rval = copy(dct)
for a in without:
if a in rval:
del rval[a]
for kw, val in iteritems(kwargs):
rval[kw] = val
return rval
_good_broadcast_div_mod_normal_float_no_complex = dict(
same_shapes=(rand(2, 3), rand_nonzero((2, 3))),
scalar=(rand(2, 3), rand_nonzero((1, 1))),
row=(rand(2, 3), rand_nonzero((1, 3))),
column=(rand(2, 3), rand_nonzero((2, 1))),
dtype_mixup_1=(rand(2, 3), randint_nonzero(2, 3)),
dtype_mixup_2=(randint_nonzero(2, 3), rand_nonzero((2, 3))),
integer=(randint(2, 3), randint_nonzero(2, 3)),
uinteger=(randint(2, 3).astype("uint8"),
randint_nonzero(2, 3).astype("uint8")),
int8=[numpy.tile(numpy.arange(-127, 128, dtype='int8'), [254, 1]).T,
numpy.tile(numpy.array(list(range(-127, 0)) + list(range(1, 128)),
dtype='int8'),
[255, 1])],
# This empty2 doesn't work for some tests. I don't remember why
#empty2=(numpy.asarray([0]), numpy.asarray([])),
)
if PY3:
_good_broadcast_div_mod_normal_float_inplace = copymod(
_good_broadcast_div_mod_normal_float_no_complex,
empty1=(numpy.asarray([]), numpy.asarray([1])),
# No complex floor division in python 3.x
)
else:
_good_broadcast_div_mod_normal_float_inplace = copymod(
_good_broadcast_div_mod_normal_float_no_complex,
empty1=(numpy.asarray([], dtype=config.floatX),
numpy.asarray([1], dtype=config.floatX)),
complex1=(randcomplex(2, 3), randcomplex_nonzero((2, 3))),
complex2=(randcomplex(2, 3), rand_nonzero((2, 3))),
# Inplace on the first element. Must have the same type.
#complex3=(rand(2, 3) ,randcomplex(2, 3)),
)
_good_broadcast_div_mod_normal_float = copymod(
_good_broadcast_div_mod_normal_float_inplace,
empty2=(numpy.asarray([0], dtype=config.floatX),
numpy.asarray([], dtype=config.floatX))
)
_grad_broadcast_div_mod_normal = dict(
same_shapes=(rand(2, 3), rand_nonzero((2, 3))),
scalar=(rand(2, 3), rand_nonzero((1, 1))),
row=(rand(2, 3), rand_nonzero((1, 3))),
column=(rand(2, 3), rand_nonzero((2, 1))),
#complex1=(randcomplex(2, 3), randcomplex_nonzero((2, 3))),
#complex2=(randcomplex(2, 3), rand_nonzero((2, 3))),
#complex3=(rand(2, 3), randcomplex_nonzero((2, 3))),
#dtype_mixup_1=(rand(2, 3), randint_nonzero(2, 3)),
#dtype_mixup_2=(randint_nonzero(2, 3), rand_nonzero((2, 3))),
#empty1=(numpy.asarray([]), numpy.asarray([1.])),
#empty2=(numpy.asarray([0]), numpy.asarray([])),
)
div_grad_rtol = None
if config.floatX == 'float32':
# We raise the relative tolerance for the grad as there can be errors in
# float32.
# This is probably caused by our way of computing the gradient error.
div_grad_rtol = 0.025
def _numpy_true_div(x, y):
"""Performs true division, and cast the result in the type we expect.
We define that function so we can use it in TrueDivTester.expected,
because simply calling numpy.true_divide could cause a dtype mismatch.
"""
out = numpy.true_divide(x, y)
# Use floatX as the result of int / int
if x.dtype in tensor.discrete_dtypes and y.dtype in tensor.discrete_dtypes:
out = theano._asarray(out, dtype=config.floatX)
return out
TrueDivTester = makeBroadcastTester(
op=tensor.true_div,
expected=_numpy_true_div,
good=_good_broadcast_div_mod_normal_float_no_complex,
grad=_grad_broadcast_div_mod_normal,
grad_rtol=div_grad_rtol,
)
TrueDivInplaceTester = makeBroadcastTester(
op=inplace.true_div_inplace,
expected=_numpy_true_div,
good=copymod(
_good_broadcast_div_mod_normal_float_inplace,
# The output is now in float, we cannot work inplace on an int.
without=['integer', 'uinteger', 'int8']),
grad=_grad_broadcast_div_mod_normal,
grad_rtol=div_grad_rtol,
inplace=True)
_good_inv = dict(
normal=[5 * rand_nonzero((2, 3))],
integers=[randint_nonzero(2, 3)],
int8=[numpy.array(list(range(-127, 0)) + list(range(1, 127)), dtype='int8')],
complex=[randcomplex_nonzero((2, 3))],
empty=[numpy.asarray([], dtype=config.floatX)])
_good_inv_inplace = copymod(_good_inv, without=['integers', 'int8', 'complex'])
_grad_inv = copymod(_good_inv,
without=['integers', 'int8', 'complex', 'empty'])
_bad_runtime_inv = dict(
float=[numpy.zeros((2, 3))],
integers=[numpy.zeros((2, 3), dtype='int64')],
int8=[numpy.zeros((2, 3), dtype='int8')],
complex=[numpy.zeros((2, 3), dtype='complex128')])
InvTester = makeBroadcastTester(
op=tensor.inv,
expected=lambda x: upcast_int8_nfunc(numpy.true_divide)(numpy.int8(1), x),
good=_good_inv,
bad_runtime=_bad_runtime_inv,
grad=_grad_inv,
grad_rtol=div_grad_rtol)
InvInplaceTester = makeBroadcastTester(
op=inplace.inv_inplace,
expected=lambda x: _numpy_true_div(numpy.int8(1), x),
good=_good_inv_inplace,
bad_runtime=_bad_runtime_inv,
grad=_grad_inv,
grad_rtol=div_grad_rtol,
inplace=True)
CeilIntDivTester = makeBroadcastTester(
op=tensor.ceil_intdiv,
expected=lambda x, y: check_floatX((x, y), (x // y) + ((x % y) != 0)),
good=_good_broadcast_div_mod_normal_float_no_complex,
name='CeilIntDiv',
# As we implement this function with neq, the gradient returned is always 0.
# grad=_grad_broadcast_div_mod_normal,
# grad_rtol=div_grad_rtol,
)
ModTester = makeBroadcastTester(
op=tensor.mod,
expected=lambda x, y: numpy.asarray(
x % y, dtype=theano.scalar.basic.upcast(x.dtype, y.dtype)),
good=copymod(_good_broadcast_div_mod_normal_float,
['complex1', 'complex2']),
grad=_grad_broadcast_div_mod_normal,
grad_eps=1e-5,
)
ModInplaceTester = makeBroadcastTester(
op=inplace.mod_inplace,
expected=lambda x, y: numpy.asarray(
x % y, dtype=theano.scalar.basic.upcast(x.dtype, y.dtype)),
good=copymod(_good_broadcast_div_mod_normal_float_inplace,
["complex1", "complex2"]),
grad=_grad_broadcast_div_mod_normal,
grad_eps=1e-5,
inplace=True)
_good_broadcast_pow_normal_float = dict(same_shapes=(rand_ranged(1, 5, (2, 3)), rand_ranged(-3, 3, (2, 3))),
scalar=(rand_ranged(1, 5, (2, 3)), rand_ranged(-3, 3, (1, 1))),
row=(rand_ranged(1, 5, (2, 3)), rand_ranged(-3, 3, (1, 3))),
column=(rand_ranged(1, 5, (2, 3)), rand_ranged(-3, 3, (2, 1))),
dtype_mixup=(rand_ranged(-3, 3, (2, 3)), randint_ranged(-3, 3, (2, 3))),
complex1=(randcomplex(2, 3), randcomplex(2, 3)),
complex2=(randcomplex(2, 3), rand(2, 3)),
# complex3 = (rand(2,3),randcomplex(2,3)), # Inplace on the first element.
empty1=(numpy.asarray([], dtype=config.floatX),
numpy.asarray([1], dtype=config.floatX)),
empty2=(numpy.asarray([0], dtype=config.floatX),
numpy.asarray([], dtype=config.floatX)),
empty3=(numpy.asarray([], dtype=config.floatX),
numpy.asarray([], dtype=config.floatX)),
)
_grad_broadcast_pow_normal = dict(same_shapes=(rand_ranged(1, 5, (2, 3)), rand_ranged(-3, 3, (2, 3))),
scalar=(rand_ranged(1, 5, (2, 3)), rand_ranged(-3, 3, (1, 1))),
row=(
rand_ranged(1, 5, (2, 3)), rand_ranged(-3, 3, (1, 3))),
column=(rand_ranged(1, 5, (2, 3)), rand_ranged(-3, 3, (2, 1))),
#complex1 = (randcomplex(2,3),randcomplex(2,3)),
#complex2 = (randcomplex(2,3),rand(2,3)),
#complex3 = (rand(2,3),randcomplex(2,3)),
#empty1 = (numpy.asarray([]), numpy.asarray([1])),
#empty2 = (numpy.asarray([0]), numpy.asarray([])),
x_eq_zero=(
numpy.asarray([0.], dtype=config.floatX),
numpy.asarray([2.], dtype=config.floatX)
), # Test for issue 1780
)
# empty2 case is not supported by numpy.
_good_broadcast_pow_normal_float_pow = copy(_good_broadcast_pow_normal_float)
del _good_broadcast_pow_normal_float_pow["empty2"]
# Disable NAN checking for pow operator per issue #1780
m = copy(theano.compile.get_default_mode())
m.check_isfinite = False
PowTester = makeBroadcastTester(
op=pow,
expected=lambda x, y: check_floatX((x, y), x ** y),
good=_good_broadcast_pow_normal_float,
grad=_grad_broadcast_pow_normal,
name='Pow',
mode=m
)
PowInplaceTester = makeBroadcastTester(
op=inplace.pow_inplace,
expected=lambda x, y: x ** y,
good=_good_broadcast_pow_normal_float_pow,
grad=_grad_broadcast_pow_normal,
inplace=True,
mode=m
)
# Those are corner case when rounding. Their is many rounding algo.
# c round() fct and numpy round are not the same!
corner_case = numpy.asarray(
[-2.5, -2., -1.5, -1., -0.5, -.51, -.49, 0,
0.49, 0.5, 0.9, 1, 1.5, 2, 2.5],
dtype=floatX)
# we remove 0 here as the grad is not always computable numerically.
corner_case_grad = numpy.asarray(
[-2.5, -2., -1.5, -1., -0.5, -.51, -.49,
0.49, 0.5, 0.9, 1, 1.5, 2, 2.5],
dtype=floatX)
_good_broadcast_unary_normal_float = dict(
normal=[rand_ranged(-5, 5, (2, 3))],
corner_case=[corner_case],
complex=[randcomplex(2, 3)],
empty=[numpy.asarray([], dtype=config.floatX)])
_good_broadcast_unary_normal_float_no_empty = copymod(
_good_broadcast_unary_normal_float,
without=['empty'])
_good_broadcast_unary_normal_float_no_empty_no_complex = copymod(
_good_broadcast_unary_normal_float_no_empty,
without=['complex'])
_good_broadcast_unary_normal_float_no_complex = copymod(
_good_broadcast_unary_normal_float,
without=['complex'])
_good_broadcast_unary_normal_float_no_complex_small_neg_range = dict(
normal=[rand_ranged(-2, 5, (2, 3))],
corner_case=[corner_case],
empty=[numpy.asarray([], dtype=config.floatX)])
_good_broadcast_unary_normal = dict(
normal=[numpy.asarray(rand_ranged(-5, 5, (2, 3)),
dtype=config.floatX)],
integers=[randint_ranged(-5, 5, (2, 3))],
# not using -128 because numpy.allclose would return False
int8=[numpy.arange(-127, 128, dtype='int8')],
corner_case=[corner_case],
complex=[randcomplex(2, 3)],
empty=[numpy.asarray([], dtype=config.floatX)],
)
_good_broadcast_unary_normal_no_complex = dict(
normal=[numpy.asarray(rand_ranged(-5, 5, (2, 3)), dtype=floatX)],
integers=[randint_ranged(-5, 5, (2, 3))],
int8=[numpy.arange(-127, 128, dtype='int8')],
corner_case=[corner_case],
empty=[numpy.asarray([], dtype=config.floatX)],
)
_grad_broadcast_unary_normal_no_complex = dict(
normal=[numpy.asarray(rand_ranged(-5, 5, (2, 3)), dtype=floatX)],
corner_case=[corner_case_grad])
_grad_broadcast_unary_normal = dict(
normal=[numpy.asarray(rand_ranged(-5, 5, (2, 3)), dtype=floatX)],
corner_case=[corner_case_grad],
# empty = [numpy.asarray([])] # XXX: should this be included?
)
_grad_broadcast_unary_normal_small_neg_range = dict(
normal=[numpy.asarray(rand_ranged(-2, 5, (2, 3)), dtype=floatX)],
corner_case=[corner_case_grad])
_grad_broadcast_unary_normal_no_complex_no_corner_case = copymod(
_grad_broadcast_unary_normal_no_complex,
without=['corner_case'])
_grad_broadcast_unary_abs1_no_complex = dict(
normal=[numpy.asarray(rand_ranged(-1, 1, (2, 3)), dtype=floatX)],
)
_grad_broadcast_unary_0_2_no_complex = dict(
# Don't go too close to 2 for tests in float32
normal=[numpy.asarray(rand_ranged(0, 1.9, (2, 3)), dtype=floatX)],
)
# inplace ops when the input is integer and the output is float*
# don't have a well defined behavior. We don't test that case.
AbsTester = makeBroadcastTester(op=tensor.abs_,
expected=lambda x: abs(x),
good=_good_broadcast_unary_normal,
grad=_grad_broadcast_unary_normal)
_good_broadcast_unary_normal_abs = copy(_good_broadcast_unary_normal)
# Can't do inplace on Abs as the input/output are not of the same type!
del _good_broadcast_unary_normal_abs['complex']
AbsInplaceTester = makeBroadcastTester(op=inplace.abs__inplace,
expected=lambda x: numpy.abs(x),
good=_good_broadcast_unary_normal_abs,
grad=_grad_broadcast_unary_normal,
inplace=True)
NegTester = makeBroadcastTester(op=tensor.neg,
expected=lambda x: -x,
good=_good_broadcast_unary_normal,
grad=_grad_broadcast_unary_normal)
NegInplaceTester = makeBroadcastTester(op=inplace.neg_inplace,
expected=lambda x: -x,
good=_good_broadcast_unary_normal,
grad=_grad_broadcast_unary_normal,
inplace=True)
SgnTester = makeBroadcastTester(op=tensor.sgn,
expected=numpy.sign,
good=_good_broadcast_unary_normal_no_complex,
grad=_grad_broadcast_unary_normal,)
SgnInplaceTester = makeBroadcastTester(op=inplace.sgn_inplace,
expected=numpy.sign,
good=_good_broadcast_unary_normal_no_complex,
grad=_grad_broadcast_unary_normal,
inplace=True)
IntDivTester = makeBroadcastTester(
op=tensor.int_div,
expected=lambda x, y: check_floatX((x, y), x // y),
good=_good_broadcast_div_mod_normal_float,
# I don't test the grad as the output is always an integer
# (this is not a continuous output).
# grad=_grad_broadcast_div_mod_normal,
)
IntDivInplaceTester = makeBroadcastTester(
op=inplace.int_div_inplace,
expected=lambda x, y: check_floatX((x, y), x // y),
good=_good_broadcast_div_mod_normal_float_inplace,
# I don't test the grad as the output is always an integer
# (this is not a continuous output).
# grad=_grad_broadcast_div_mod_normal,
inplace=True
)
CeilTester = makeBroadcastTester(op=tensor.ceil,
expected=lambda a: numpy.asarray(
numpy.ceil(a),
a.dtype),
good=_good_broadcast_unary_normal_no_complex,
grad=copymod(_grad_broadcast_unary_normal,
without=['corner_case'],
# corner_case includes ints where ceil is not differentiable
extra=[numpy.asarray([-2.5, -1.5, -1.51, 0.49, .98, 1.02],
dtype=floatX)]))
CeilInplaceTester = makeBroadcastTester(op=inplace.ceil_inplace,
expected=lambda a: numpy.asarray(numpy.ceil(a), a.dtype),
good=_good_broadcast_unary_normal_no_complex,
# corner cases includes a lot of integers: points where Ceil is not
# continuous (not differentiable)
grad=copymod(_grad_broadcast_unary_normal,
without=['corner_case'],
# corner_case includes ints where ceil is not differentiable
extra=[numpy.asarray([-2.5, -1.5, -1.51, 0.49, .98, 1.02],
dtype=floatX)]),
inplace=True)
FloorTester = makeBroadcastTester(op=tensor.floor,
expected=lambda a: numpy.asarray(numpy.floor(a), a.dtype),
good=_good_broadcast_unary_normal_no_complex,
# XXX: why does grad of floor not give huge values at
# the integer points in the 'corner_case' in
# _grad_broadcast_unary_normal? It seems this test should fail,
# yet it does not...
grad=_grad_broadcast_unary_normal)
FloorInplaceTester = makeBroadcastTester(op=inplace.floor_inplace,
expected=lambda a: numpy.asarray(numpy.floor(a), a.dtype),
good=_good_broadcast_unary_normal_no_complex,
grad=_grad_broadcast_unary_normal,
inplace=True)
TruncInplaceTester = makeBroadcastTester(
op=inplace.trunc_inplace,
expected=lambda a: numpy.asarray(numpy.trunc(a), a.dtype),
good=_good_broadcast_unary_normal_no_complex,
inplace=True)
TruncTester = makeBroadcastTester(
op=tensor.trunc,
expected=lambda a: numpy.asarray(numpy.trunc(a), a.dtype),
good=_good_broadcast_unary_normal_no_complex)
RoundHalfToEvenTester = makeBroadcastTester(
op=tensor.round_half_to_even,
expected=numpy.round,
good=_good_broadcast_unary_normal_float_no_complex,
grad=_grad_broadcast_unary_normal_no_complex_no_corner_case)
RoundHalfToEvenInplaceTester = makeBroadcastTester(
op=inplace.round_half_to_even_inplace,
expected=numpy.round,
good=_good_broadcast_unary_normal_float_no_complex,
grad=_grad_broadcast_unary_normal_no_complex_no_corner_case,
inplace=True)
# numpy.vectorize don't handle correctly empty ndarray.
# see in their file numpy/lib/function_base.py in class vectorize.__call__
# This happen in float32 mode.
RoundHalfAwayFromZeroTester = makeBroadcastTester(
op=tensor.round_half_away_from_zero,
expected=lambda a: theano.scalar.basic.round_half_away_from_zero_vec(a),
good=_good_broadcast_unary_normal_float_no_empty_no_complex,
grad=_grad_broadcast_unary_normal_no_complex_no_corner_case)
#_good_broadcast_unary_normal_float)
RoundHalfAwayFromZeroInplaceTester = makeBroadcastTester(
op=inplace.round_half_away_from_zero_inplace,
expected=lambda a: theano.scalar.basic.round_half_away_from_zero_vec(a),
good=_good_broadcast_unary_normal_float_no_empty_no_complex,
grad=_grad_broadcast_unary_normal_no_complex_no_corner_case,
inplace=True)
SqrTester = makeBroadcastTester(op=tensor.sqr,
expected=numpy.square,
good=_good_broadcast_unary_normal,
grad=_grad_broadcast_unary_normal)
SqrInplaceTester = makeBroadcastTester(op=inplace.sqr_inplace,
expected=numpy.square,
good=_good_broadcast_unary_normal,
grad=_grad_broadcast_unary_normal,
inplace=True)
ExpTester = makeBroadcastTester(
op=tensor.exp,
expected=upcast_float16_ufunc(numpy.exp),
good=dict(_good_broadcast_unary_normal,
int8=[numpy.arange(-127, 89, dtype='int8')]),
grad=_grad_broadcast_unary_normal)
ExpInplaceTester = makeBroadcastTester(
op=inplace.exp_inplace,
expected=numpy.exp,
good=_good_broadcast_unary_normal_float,
grad=_grad_broadcast_unary_normal,
inplace=True)
Exp2Tester = makeBroadcastTester(op=tensor.exp2,
expected=upcast_float16_ufunc(numpy.exp2),
good=_good_broadcast_unary_normal,
grad=_grad_broadcast_unary_normal)
Exp2InplaceTester = makeBroadcastTester(
op=inplace.exp2_inplace,
expected=numpy.exp2,
good=_good_broadcast_unary_normal_float,
grad=_grad_broadcast_unary_normal,
inplace=True)
Expm1Tester = makeBroadcastTester(
op=tensor.expm1,
expected=upcast_float16_ufunc(numpy.expm1),
good=dict(_good_broadcast_unary_normal,
int8=[numpy.arange(-127, 89, dtype='int8')]),
grad=_grad_broadcast_unary_normal)
Expm1InplaceTester = makeBroadcastTester(
op=inplace.expm1_inplace,
expected=numpy.expm1,
good=_good_broadcast_unary_normal_float,
grad=_grad_broadcast_unary_normal,
inplace=True)
_good_broadcast_unary_positive = dict(
normal=(rand_ranged(0.001, 5, (2, 3)),),
integers=(randint_ranged(1, 5, (2, 3)),),
uint8=[numpy.arange(1, 256, dtype='uint8')],
complex=(randc128_ranged(1, 5, (2, 3)),),
empty=(numpy.asarray([], dtype=config.floatX),),
)
_good_broadcast_unary_positive_float = copymod(
_good_broadcast_unary_positive,
without=['integers', 'uint8'])
_grad_broadcast_unary_positive = dict(normal=(rand_ranged(0.001, 5, (2, 3)),),)
LogTester = makeBroadcastTester(op=tensor.log,
expected=upcast_float16_ufunc(numpy.log),
good=_good_broadcast_unary_positive,
grad=_grad_broadcast_unary_positive)
LogInplaceTester = makeBroadcastTester(
op=inplace.log_inplace,
expected=numpy.log,
good=_good_broadcast_unary_positive_float,
grad=_grad_broadcast_unary_positive,
inplace=True)
Log2Tester = makeBroadcastTester(op=tensor.log2,
expected=upcast_float16_ufunc(numpy.log2),
good=_good_broadcast_unary_positive,
grad=_grad_broadcast_unary_positive)
Log2InplaceTester = makeBroadcastTester(
op=inplace.log2_inplace,
expected=numpy.log2,
good=_good_broadcast_unary_positive_float,
grad=_grad_broadcast_unary_positive,
inplace=True)
Log10Tester = makeBroadcastTester(op=tensor.log10,
expected=upcast_float16_ufunc(numpy.log10),
good=_good_broadcast_unary_positive,
grad=_grad_broadcast_unary_positive)
Log10InplaceTester = makeBroadcastTester(
op=inplace.log10_inplace,
expected=numpy.log10,
good=_good_broadcast_unary_positive_float,
grad=_grad_broadcast_unary_positive,
inplace=True)
Log1pTester = makeBroadcastTester(op=tensor.log1p,
expected=upcast_float16_ufunc(numpy.log1p),
good=_good_broadcast_unary_positive,
grad=_grad_broadcast_unary_positive)
Log1pInplaceTester = makeBroadcastTester(
op=inplace.log1p_inplace,
expected=numpy.log1p,
good=_good_broadcast_unary_positive_float,
grad=_grad_broadcast_unary_positive,
inplace=True)
SqrtTester = makeBroadcastTester(op=tensor.sqrt,
expected=upcast_float16_ufunc(numpy.sqrt),
good=_good_broadcast_unary_positive,
grad=_grad_broadcast_unary_positive)
SqrtInplaceTester = makeBroadcastTester(
op=inplace.sqrt_inplace,
expected=numpy.sqrt,
good=_good_broadcast_unary_positive_float,
grad=_grad_broadcast_unary_positive,
inplace=True)
_good_broadcast_unary_wide = dict(
normal=(rand_ranged(-1000, 1000, (2, 3)),),
integers=(randint_ranged(-1000, 1000, (2, 3)),),
int8=[numpy.arange(-127, 128, dtype='int8')],
complex=(randc128_ranged(-1000, 1000, (2, 3)),),
empty=(numpy.asarray([], dtype=config.floatX),),)
_good_broadcast_unary_wide_float = copymod(
_good_broadcast_unary_wide,
without=['integers', 'int8'])
_grad_broadcast_unary_wide = dict(normal=(rand_ranged(-1000, 1000, (2, 3)),),)
if theano.config.floatX == 'float32':
angle_eps = 1e-4
else:
angle_eps = 1e-10
Deg2radTester = makeBroadcastTester(
op=tensor.deg2rad,
expected=upcast_float16_ufunc(numpy.deg2rad),
good=_good_broadcast_unary_normal_no_complex,
grad=_grad_broadcast_unary_normal_no_complex,
eps=angle_eps)
Deg2radInplaceTester = makeBroadcastTester(
op=inplace.deg2rad_inplace,
expected=numpy.deg2rad,
good=_good_broadcast_unary_normal_float_no_complex,
grad=_grad_broadcast_unary_normal_no_complex,
inplace=True,
eps=angle_eps)
Rad2degTester = makeBroadcastTester(
op=tensor.rad2deg,
expected=upcast_float16_ufunc(numpy.rad2deg),
good=_good_broadcast_unary_normal_no_complex,
grad=_grad_broadcast_unary_normal_no_complex,
eps=angle_eps)
Rad2degInplaceTester = makeBroadcastTester(
op=inplace.rad2deg_inplace,
expected=numpy.rad2deg,
good=_good_broadcast_unary_normal_float_no_complex,
grad=_grad_broadcast_unary_normal_no_complex,
inplace=True,
eps=angle_eps)
SinTester = makeBroadcastTester(op=tensor.sin,
expected=upcast_float16_ufunc(numpy.sin),
good=_good_broadcast_unary_wide,
grad=_grad_broadcast_unary_wide)
SinInplaceTester = makeBroadcastTester(
op=inplace.sin_inplace,
expected=numpy.sin,
good=_good_broadcast_unary_wide_float,
grad=_grad_broadcast_unary_wide,
inplace=True)
_good_broadcast_unary_arcsin = dict(
normal=(rand_ranged(-1, 1, (2, 3)),),
integers=(randint_ranged(-1, 1, (2, 3)),),
int8=[numpy.arange(-1, 2, dtype='int8')],
complex=(randc128_ranged(-1, 1, (2, 3)),),
empty=(numpy.asarray([], dtype=config.floatX),),)
_good_broadcast_unary_arcsin_float = copymod(
_good_broadcast_unary_arcsin,
without=['integers', 'int8'])
# The actual range is [-1, 1] but the numerical gradient is too
# unstable near those values
_grad_broadcast_unary_arcsin = dict(normal=(rand_ranged(-0.9, 0.9, (2, 3)),),)
ArcsinTester = makeBroadcastTester(op=tensor.arcsin,
expected=upcast_float16_ufunc(numpy.arcsin),
good=_good_broadcast_unary_arcsin,
grad=_grad_broadcast_unary_arcsin)
ArcsinInplaceTester = makeBroadcastTester(
op=inplace.arcsin_inplace,
expected=numpy.arcsin,
good=_good_broadcast_unary_arcsin_float,
grad=_grad_broadcast_unary_arcsin,
inplace=True)
CosTester = makeBroadcastTester(op=tensor.cos,
expected=upcast_float16_ufunc(numpy.cos),
good=_good_broadcast_unary_wide,
grad=_grad_broadcast_unary_wide)
CosInplaceTester = makeBroadcastTester(
op=inplace.cos_inplace,
expected=numpy.cos,
good=_good_broadcast_unary_wide_float,
grad=_grad_broadcast_unary_wide,
inplace=True)
def test_py_c_match():
a = tensor.TensorType(dtype='int8', broadcastable=(False,))()
f = theano.function([a], tensor.arccos(a), mode='DebugMode')
# This can fail in DebugMode
f(numpy.asarray([1, 0, -1], dtype='int8'))
ArccosTester = makeBroadcastTester(op=tensor.arccos,
expected=upcast_float16_ufunc(numpy.arccos),
good=_good_broadcast_unary_arcsin,
grad=_grad_broadcast_unary_arcsin)
ArccosInplaceTester = makeBroadcastTester(
op=inplace.arccos_inplace,
expected=numpy.arccos,
good=_good_broadcast_unary_arcsin_float,
grad=_grad_broadcast_unary_arcsin,
inplace=True)
_good_broadcast_unary_tan = dict(
normal=(rand_ranged(-3.14, 3.14, (2, 3)),),
shifted=(rand_ranged(3.15, 6.28, (2, 3)),),
integers=(randint_ranged(-3, 3, (2, 3)),),
int8=[numpy.arange(-3, 4, dtype='int8')],
complex=(randc128_ranged(-3.14, 3.14, (2, 3)),),
empty=(numpy.asarray([], dtype=config.floatX),),)
# We do not want to test around the discontinuity.
_grad_broadcast_unary_tan = dict(normal=(rand_ranged(-1.5, 1.5, (2, 3)),),
shifted=(rand_ranged(1.6, 4.6, (2, 3)),))
TanTester = makeBroadcastTester(op=tensor.tan,
expected=upcast_float16_ufunc(numpy.tan),
good=_good_broadcast_unary_tan,
grad=_grad_broadcast_unary_tan)
TanInplaceTester = makeBroadcastTester(
op=inplace.tan_inplace,
expected=numpy.tan,
good=copymod(_good_broadcast_unary_tan, without=['integers', 'int8']),
grad=_grad_broadcast_unary_tan,
inplace=True)
ArctanTester = makeBroadcastTester(op=tensor.arctan,
expected=upcast_float16_ufunc(numpy.arctan),
good=_good_broadcast_unary_wide,
grad=_grad_broadcast_unary_wide)
ArctanInplaceTester = makeBroadcastTester(
op=inplace.arctan_inplace,
expected=numpy.arctan,
good=_good_broadcast_unary_wide_float,
grad=_grad_broadcast_unary_wide,
inplace=True)
_good_broadcast_binary_arctan2 = dict(
same_shapes=(rand(2, 3), rand(2, 3)),
not_same_dimensions=(rand(2, 2), rand(2)),
scalar=(rand(2, 3), rand(1, 1)),
row=(rand(2, 3), rand(1, 3)),
column=(rand(2, 3), rand(2, 1)),
integers=(randint(2, 3), randint(2, 3)),
int8=[numpy.arange(-127, 128, dtype='int8'),
numpy.arange(-127, 128, dtype='int8')[:, numpy.newaxis]],
dtype_mixup_1=(rand(2, 3), randint(2, 3)),
dtype_mixup_2=(randint(2, 3), rand(2, 3)),
empty=(numpy.asarray([], dtype=config.floatX),
numpy.asarray([1], dtype=config.floatX)),
)
_grad_broadcast_binary_arctan2 = dict(
same_shapes=(rand(2, 3), rand(2, 3)),
scalar=(rand(2, 3), rand(1, 1)),
row=(rand(2, 3), rand(1, 3)),
column=(rand(2, 3), rand(2, 1)),
)
Arctan2Tester = makeBroadcastTester(
op=tensor.arctan2,
expected=upcast_float16_ufunc(numpy.arctan2),
good=_good_broadcast_binary_arctan2,
grad=_grad_broadcast_binary_arctan2)
Arctan2InplaceTester = makeBroadcastTester(
op=inplace.arctan2_inplace,
expected=numpy.arctan2,
good=copymod(_good_broadcast_binary_arctan2, without=['integers', 'int8']),
grad=_grad_broadcast_binary_arctan2,
inplace=True)
CoshTester = makeBroadcastTester(
op=tensor.cosh,
expected=upcast_float16_ufunc(numpy.cosh),
good=dict(_good_broadcast_unary_normal,
int8=[numpy.arange(-89, 90, dtype='int8')]),
grad=_grad_broadcast_unary_normal)
CoshInplaceTester = makeBroadcastTester(
op=inplace.cosh_inplace,
expected=numpy.cosh,
good=_good_broadcast_unary_normal_float,
grad=_grad_broadcast_unary_normal,
inplace=True)
_good_broadcast_unary_arccosh = dict(
normal=(rand_ranged(1, 1000, (2, 3)),),
integers=(randint_ranged(1, 1000, (2, 3)),),
uint8=[numpy.arange(1, 256, dtype='uint8')],
complex=(randc128_ranged(1, 1000, (2, 3)),),
empty=(numpy.asarray([], dtype=config.floatX),),)
_grad_broadcast_unary_arccosh = dict(normal=(rand_ranged(1, 1000, (2, 3)),),)
ArccoshTester = makeBroadcastTester(
op=tensor.arccosh,
expected=upcast_float16_ufunc(numpy.arccosh),
good=_good_broadcast_unary_arccosh,
grad=_grad_broadcast_unary_arccosh)
ArccoshInplaceTester = makeBroadcastTester(
op=inplace.arccosh_inplace,
expected=numpy.arccosh,
good=copymod(_good_broadcast_unary_arccosh, without=['integers', 'uint8']),
grad=_grad_broadcast_unary_arccosh,
inplace=True)
SinhTester = makeBroadcastTester(
op=tensor.sinh,
expected=upcast_float16_ufunc(numpy.sinh),
good=dict(_good_broadcast_unary_normal,
int8=[numpy.arange(-89, 90, dtype='int8')]),
grad=_grad_broadcast_unary_normal)
SinhInplaceTester = makeBroadcastTester(
op=inplace.sinh_inplace,
expected=numpy.sinh,
good=_good_broadcast_unary_normal_float,
grad=_grad_broadcast_unary_normal,
inplace=True)
ArcsinhTester = makeBroadcastTester(
op=tensor.arcsinh,
expected=upcast_float16_ufunc(numpy.arcsinh),
good=_good_broadcast_unary_normal,
grad=_grad_broadcast_unary_normal)
ArcsinhInplaceTester = makeBroadcastTester(
op=inplace.arcsinh_inplace,
expected=numpy.arcsinh,
good=_good_broadcast_unary_normal_float,
grad=_grad_broadcast_unary_normal,
inplace=True)
TanhTester = makeBroadcastTester(op=tensor.tanh,
expected=upcast_float16_ufunc(numpy.tanh),
good=_good_broadcast_unary_normal,
grad=_grad_broadcast_unary_normal)
TanhInplaceTester = makeBroadcastTester(
op=inplace.tanh_inplace,
expected=numpy.tanh,
good=_good_broadcast_unary_normal_float,
grad=_grad_broadcast_unary_normal,
inplace=True)
_eps = 1e-2
_good_broadcast_unary_arctanh = dict(
normal=(rand_ranged(-1 + _eps, 1 - _eps, (2, 3)),),
integers=(randint_ranged(-1 + _eps, 1 - _eps, (2, 3)),),
int8=[numpy.arange(0, 1, dtype='int8')],
complex=(randc128_ranged(-1 + _eps, 1 - _eps, (2, 3)),),
empty=(numpy.asarray([], dtype=config.floatX),),)
_grad_broadcast_unary_arctanh = dict(
normal=(rand_ranged(-1 + _eps, 1 - _eps, (2, 3)),),)
ArctanhTester = makeBroadcastTester(
op=tensor.arctanh,
expected=upcast_float16_ufunc(numpy.arctanh),
good=_good_broadcast_unary_arctanh,
grad=_grad_broadcast_unary_arctanh)
ArctanhInplaceTester = makeBroadcastTester(
op=inplace.arctanh_inplace,
expected=numpy.arctanh,
good=copymod(_good_broadcast_unary_arctanh, without=['integers', 'int8']),
grad=_grad_broadcast_unary_arctanh,
inplace=True)
# We can't test it if scipy is not installed!
# Precomputing the result is brittle(it have been broken!)
# As if we do any modification to random number here,
# The input random number will change and the output!
if imported_scipy_special:
expected_erf = scipy.special.erf
expected_erfc = scipy.special.erfc
expected_erfinv = scipy.special.erfinv
expected_erfcinv = scipy.special.erfcinv
expected_gamma = scipy.special.gamma
expected_gammaln = scipy.special.gammaln
expected_psi = scipy.special.psi
expected_chi2sf = lambda x, df: scipy.stats.chi2.sf(x, df).astype(x.dtype)
expected_j0 = scipy.special.j0
expected_j1 = scipy.special.j1
skip_scipy = False
if LooseVersion(scipy_version) >= LooseVersion("0.12.0"):
expected_erfcx = scipy.special.erfcx
skip_scipy12 = False
else:
expected_erfcx = []
skip_scipy12 = "the erfcx op requires scipy version >= 0.12, installed version is " + scipy_version
else:
expected_erf = []
expected_erfc = []
expected_erfcx = []
expected_erfinv = []
expected_erfcinv = []
expected_gamma = []
expected_gammaln = []
expected_psi = []
expected_chi2sf = []
expected_j0 = []
expected_j1 = []
skip_scipy = "scipy is not present"
skip_scipy12 = "scipy is not present"
ErfTester = makeBroadcastTester(
op=tensor.erf,
expected=expected_erf,
good=_good_broadcast_unary_normal,
grad=_grad_broadcast_unary_normal,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy)
ErfInplaceTester = makeBroadcastTester(
op=inplace.erf_inplace,
expected=expected_erf,
good=_good_broadcast_unary_normal_float,
grad=_grad_broadcast_unary_normal,
mode=mode_no_scipy,
eps=2e-10,
inplace=True,
skip=skip_scipy)
ErfcTester = makeBroadcastTester(
op=tensor.erfc,
expected=expected_erfc,
good=_good_broadcast_unary_normal_float_no_complex,
grad=_grad_broadcast_unary_normal,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy)
ErfcInplaceTester = makeBroadcastTester(
op=inplace.erfc_inplace,
expected=expected_erfc,
good=_good_broadcast_unary_normal_float_no_complex,
grad=_grad_broadcast_unary_normal,
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy)
ErfcxTester = makeBroadcastTester(
op=tensor.erfcx,
expected=expected_erfcx,
good=_good_broadcast_unary_normal_float_no_complex_small_neg_range,
grad=_grad_broadcast_unary_normal_small_neg_range,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy12)
ErfcxInplaceTester = makeBroadcastTester(
op=inplace.erfcx_inplace,
expected=expected_erfcx,
good=_good_broadcast_unary_normal_float_no_complex_small_neg_range,
grad=_grad_broadcast_unary_normal_small_neg_range,
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy12)
ErfinvTester = makeBroadcastTester(
op=tensor.erfinv,
expected=expected_erfinv,
good={'normal': [rand_ranged(-.9, .9, (2, 3))],
'empty': [numpy.asarray([], dtype=config.floatX)]},
grad=_grad_broadcast_unary_abs1_no_complex,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy)
ErfcinvTester = makeBroadcastTester(
op=tensor.erfcinv,
expected=expected_erfcinv,
good={'normal': [rand_ranged(0.001, 1.9, (2, 3))],
'empty': [numpy.asarray([], dtype=config.floatX)]},
grad=_grad_broadcast_unary_0_2_no_complex,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy)
_good_broadcast_unary_gammaln = dict(
normal=(rand_ranged(-1 + 1e-2, 10, (2, 3)),),
empty=(numpy.asarray([], dtype=config.floatX),),)
_grad_broadcast_unary_gammaln = dict(
# smaller range as our grad method does not estimate it well enough.
normal=(rand_ranged(1e-1, 8, (2, 3)),),)
GammaTester = makeBroadcastTester(
op=tensor.gamma,
expected=expected_gamma,
good=_good_broadcast_unary_gammaln,
grad=_grad_broadcast_unary_gammaln,
mode=mode_no_scipy,
eps=1e-5,
skip=skip_scipy)
GammaInplaceTester = makeBroadcastTester(
op=inplace.gamma_inplace,
expected=expected_gamma,
good=_good_broadcast_unary_gammaln,
grad=_grad_broadcast_unary_gammaln,
mode=mode_no_scipy,
eps=1e-5,
inplace=True,
skip=skip_scipy)
GammalnTester = makeBroadcastTester(
op=tensor.gammaln,
expected=expected_gammaln,
good=_good_broadcast_unary_gammaln,
grad=_grad_broadcast_unary_gammaln,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy)
GammalnInplaceTester = makeBroadcastTester(
op=inplace.gammaln_inplace,
expected=expected_gammaln,
good=_good_broadcast_unary_gammaln,
grad=_grad_broadcast_unary_gammaln,
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy)
_good_broadcast_unary_psi = dict(
normal=(rand_ranged(1, 10, (2, 3)),),
empty=(numpy.asarray([], dtype=config.floatX),),)
PsiTester = makeBroadcastTester(
op=tensor.psi,
expected=expected_psi,
good=_good_broadcast_unary_psi,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy)
PsiInplaceTester = makeBroadcastTester(
op=inplace.psi_inplace,
expected=expected_psi,
good=_good_broadcast_unary_psi,
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy)
# chi2sf takes two inputs, a value (x) and a degrees of freedom (k).
# not sure how to deal with that here...
_good_broadcast_unary_chi2sf = dict(
normal=(rand_ranged(1, 10, (2, 3)), numpy.asarray(1, dtype=config.floatX)),
empty=(numpy.asarray([], dtype=config.floatX),
numpy.asarray(1, dtype=config.floatX)))
Chi2SFTester = makeBroadcastTester(
op=tensor.chi2sf,
expected=expected_chi2sf,
good=_good_broadcast_unary_chi2sf,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
name='Chi2SF')
Chi2SFInplaceTester = makeBroadcastTester(
op=inplace.chi2sf_inplace,
expected=expected_chi2sf,
good=_good_broadcast_unary_chi2sf,
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
name='Chi2SF')
_good_broadcast_unary_j = dict(
normal=(rand_ranged(0.1, 8, (2, 3)),),)
J0Tester = makeBroadcastTester(
op=tensor.j0,
expected=expected_j0,
good=_good_broadcast_unary_j,
grad=_good_broadcast_unary_j,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy)
J0InplaceTester = makeBroadcastTester(
op=inplace.j0_inplace,
expected=expected_j0,
good=_good_broadcast_unary_j,
grad=_good_broadcast_unary_j,
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy)
J1Tester = makeBroadcastTester(
op=tensor.j1,
expected=expected_j1,
good=_good_broadcast_unary_j,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy)
J1InplaceTester = makeBroadcastTester(
op=inplace.j1_inplace,
expected=expected_j1,
good=_good_broadcast_unary_j,
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy)
ZerosLikeTester = makeBroadcastTester(
op=tensor.zeros_like,
expected=numpy.zeros_like,
good=_good_broadcast_unary_normal,
grad=_grad_broadcast_unary_normal,
name='ZerosLike')
OnesLikeTester = makeBroadcastTester(
op=tensor.ones_like,
expected=numpy.ones_like,
good=_good_broadcast_unary_normal,
grad=_grad_broadcast_unary_normal,
name='OnesLike')
# Complex operations
_good_complex_from_polar = dict(
same_shapes=(abs(rand(2, 3)), rand(2, 3)),
not_same_dimensions=(abs(rand(2, 2)), rand(2)),
scalar=(abs(rand(2, 3)), rand(1, 1)),
row=(abs(rand(2, 3)), rand(1, 3)),
column=(abs(rand(2, 3)), rand(2, 1)),
integers=(abs(randint(2, 3)), randint(2, 3)),
empty=(numpy.asarray([], dtype=config.floatX),
numpy.asarray([1], dtype=config.floatX)),)
_grad_complex_from_polar = dict(
same_shapes=(abs(rand(2, 3)), rand(2, 3)),
scalar=(abs(rand(2, 3)), rand(1, 1)),
row=(abs(rand(2, 3)), rand(1, 3)),
column=(abs(rand(2, 3)), rand(2, 1)))
ComplexFromPolarTester = makeBroadcastTester(
op=tensor.complex_from_polar,
expected=lambda r, theta: r * numpy.cos(theta) + 1j * r * numpy.sin(theta),
good=_good_complex_from_polar)
ConjTester = makeBroadcastTester(
op=tensor.conj,
expected=numpy.conj,
good=_good_broadcast_unary_normal)
ConjInplaceTester = makeBroadcastTester(
op=inplace.conj_inplace,
expected=numpy.conj,
good=_good_broadcast_unary_normal,
inplace=True)
DotTester = makeTester(name='DotTester',
op=dot,
expected=lambda x, y: numpy.dot(x, y),
checks={},
good=dict(correct1=(rand(5, 7), rand(7, 5)),
correct2=(rand(5, 7), rand(7, 9)),
correct3=(rand(5, 7), rand(7)),
correct4=(rand(5), rand(5, 7)),
mixed1=(rand(5).astype('float32'),
rand(5, 7)),
mixed2=(rand(5).astype('float64'),
rand(5, 7)),
complex1=(randcomplex(5, 7),
randcomplex(7)),
complex2=(rand(5, 7), randcomplex(7)),
complex3=(randcomplex(5, 7), rand(7)),
empty1=(numpy.asarray([], dtype=config.floatX),
numpy.asarray([], dtype=config.floatX)),
empty2=(rand(5, 0), rand(0, 2)),
empty3=(rand(0, 5), rand(5, 0)),
),
bad_build=dict(),
bad_runtime=dict(bad1=(rand(5, 7), rand(5, 7)),
bad2=(rand(5, 7), rand(8, 3))))
BatchedDotTester = makeTester(
name='BatchedDotTester',
op=batched_dot,
expected=(lambda xs, ys:
numpy.asarray(
list(x * y if x.ndim == 0 or y.ndim == 0 else numpy.dot(x, y)
for x, y in zip(xs, ys)),
dtype=theano.scalar.upcast(xs.dtype, ys.dtype))),
checks={},
grad=dict(correct1=(rand(3, 5, 7), rand(3, 7, 5)),
correct2=(rand(3, 5, 7), rand(3, 7, 9)),
correct3=(rand(3, 5, 7), rand(3, 7)),
correct4=(rand(3, 5), rand(3, 5, 7)),
correct5=(rand(3), rand(3, 5, 7)),
correct6=(rand(3, 5), rand(3)),
correct7=(rand(3, 5), rand(3, 5)),
correct8=(rand(3), rand(3)),
correct9=(rand(3, 5, 7, 11), rand(3)),
correct10=(rand(3, 7, 11, 5), rand(3, 5)),
correct11=(rand(3, 7, 11, 5), rand(3, 5, 13)),
correct12=(rand(3, 7, 11, 5), rand(3, 13, 5, 17)),
mixed1=(rand(3, 5).astype('float32'),
rand(3, 5, 7)),
mixed2=(rand(3, 5).astype('float64'),
rand(3, 5, 7))),
good=dict(correct1=(rand(3, 5, 7), rand(3, 7, 5)),
correct2=(rand(3, 5, 7), rand(3, 7, 9)),
correct3=(rand(3, 5, 7), rand(3, 7)),
correct4=(rand(3, 5), rand(3, 5, 7)),
correct5=(rand(3), rand(3, 5, 7)),
correct6=(rand(3, 5), rand(3)),
correct7=(rand(3, 5), rand(3, 5)),
correct8=(rand(3), rand(3)),
correct9=(rand(3, 5, 7, 11), rand(3)),
correct10=(rand(3, 7, 11, 5), rand(3, 5)),
correct11=(rand(3, 7, 11, 5), rand(3, 5, 13)),
correct12=(rand(3, 7, 11, 5), rand(3, 13, 5, 17)),
mixed1=(rand(3, 5).astype('float32'),
rand(3, 5, 7)),
mixed2=(rand(3, 5).astype('float64'),
rand(3, 5, 7))),
bad_build=dict(no_batch_axis2=(rand(), rand(3, 5)),
no_batch_axis3=(rand(3, 5), rand())),
bad_runtime=dict(batch_dim_mismatch1=(rand(2, 5, 7), rand(3, 7, 9)),
batch_dim_mismatch2=(rand(3, 5, 7), rand(2, 7, 9)),
batch_dim_mismatch3=(rand(3), rand(5)),
bad_dim1=(rand(3, 5, 7), rand(3, 5, 7)),
bad_dim2=(rand(3, 5, 7), rand(3, 8, 3)),
bad_dim3=(rand(3, 5), rand(3, 7)),
bad_dim4=(rand(3, 5, 7, 11), rand(3, 5)),
bad_dim5=(rand(3, 5, 7, 11), rand(3, 5, 13)),
bad_dim6=(rand(3, 5, 7, 11), rand(3, 13, 5, 17))))
def _numpy_second(x, y):
return numpy.broadcast_arrays(x, y)[1]
ALL_DTYPES = ('int8', 'int16', 'int32', 'int64',
'float32', 'float64', 'complex64', 'complex128')
REAL_DTYPES = ALL_DTYPES[:-2]
COMPLEX_DTYPES = ALL_DTYPES[-2:]
def multi_dtype_checks(shape1, shape2, dtypes=ALL_DTYPES, nameprefix=''):
for dtype1, dtype2 in itertools.combinations(dtypes, 2):
name1 = '%s_%s_%s' % (nameprefix, dtype1, dtype2)
name2 = '%s_%s_%s' % (nameprefix, dtype2, dtype1)
obj1 = rand_of_dtype(shape1, dtype1)
obj2 = rand_of_dtype(shape2, dtype2)
yield (name1, (obj1, obj2))
yield (name2, (obj2, obj1))
def multi_dtype_cast_checks(shape, dtypes=ALL_DTYPES, nameprefix=''):
for dtype1, dtype2 in itertools.combinations(dtypes, 2):
name1 = '%s_%s_%s' % (nameprefix, dtype1, dtype2)
name2 = '%s_%s_%s' % (nameprefix, dtype2, dtype1)
obj1 = rand_of_dtype(shape, dtype1)
obj2 = rand_of_dtype(shape, dtype2)
yield (name1, (obj1, dtype2))
yield (name2, (obj2, dtype1))
SecondBroadcastTester = makeTester(
name='SecondBroadcastTester',
op=second,
expected=_numpy_second,
good=dict(itertools.chain(
multi_dtype_checks((4, 5), (5,)),
multi_dtype_checks((2, 3, 2), (3, 2)),
multi_dtype_checks((2, 3, 2), (2,)),
)),
# I can't think of any way to make this fail at
# build time
# Just some simple smoke tests
bad_runtime=dict(
fail1=(rand(5, 4), rand(5)),
fail2=(rand(3, 2, 3), rand(6, 9)),
fail3=(randint(6, 2, 9), rand(3, 2)),
)
)
# We exclude local_fill_to_alloc because it optimizes the "second" node
# away from the graph.
SecondSameRankTester = makeTester(
name='SecondSameRankTester',
op=second,
expected=_numpy_second,
good=dict(itertools.chain(
multi_dtype_checks((4, 5), (4, 5)),
multi_dtype_checks((1, 2), (3, 2)),
multi_dtype_checks((3, 2), (1, 2)),
)),
# These sizes are not broadcastable to one another
# and SHOULD raise an error, but currently don't.
bad_runtime=dict(itertools.chain(
multi_dtype_checks((4, 5), (5, 4)),
multi_dtype_checks((1, 5), (5, 4)),
)),
mode=get_default_mode().excluding(
'local_fill_to_alloc',
'local_useless_fill')
)
# Alloc
AllocTester = makeBroadcastTester(
name='AllocTester',
op=alloc,
expected=(lambda x, *shp: numpy.zeros(shp, dtype=x.dtype) + x),
good=dict(
correct01=(rand(), numpy.int32(7)),
correct01_bcast=(rand(1), numpy.int32(7)),
correct02=(rand(), numpy.int32(4), numpy.int32(7)),
correct12=(rand(7), numpy.int32(4), numpy.int32(7)),
correct13=(rand(7), numpy.int32(2), numpy.int32(4),
numpy.int32(7)),
correct23=(rand(4, 7), numpy.int32(2), numpy.int32(4),
numpy.int32(7)),
correctb1=(rand(1, 7), numpy.int32(4), numpy.int32(7)),
correctb2=(rand(1, 7), numpy.int32(2),
numpy.int32(4), numpy.int32(7)),
correctb3=(rand(7, 1), numpy.int32(7), numpy.int32(4)),
correctb4=(rand(7, 1), numpy.int32(2),
numpy.int32(7), numpy.int32(4)),
),
bad_runtime=dict(
bad_shape12=(rand(7), numpy.int32(7), numpy.int32(5)),
),
bad_build=dict(
vec=(rand(1), [numpy.int32(2)]),
too_big32=(rand(6, 2, 4), numpy.
int32(6), numpy.int32(2)),
too_big32b=(rand(6, 2, 4), numpy.
int32(6), numpy.int32(4)),
too_big32c=(rand(6, 2, 4), numpy.
int32(2), numpy.int32(4)),
too_big32d=(rand(6, 2, 4), numpy.
int32(2), numpy.int32(6)),
too_big32e=(rand(6, 2, 4), numpy.
int32(4), numpy.int32(6)),
too_big32f=(rand(6, 2, 4), numpy.
int32(4), numpy.int32(2)),
),
)
# Since not all inputs of Alloc are differentiable, we need different testers
s1, s2, s3 = randint_ranged(1, 13, (3,))
# alloc a scalar into a vector
Alloc01GradTester = makeBroadcastTester(
name='Alloc01GradTester',
#op = (lambda self, x: alloc(x, s1)),
op=(lambda x: alloc(x, s1)),
expected=(lambda x: numpy.zeros((s1,), dtype=x.dtype) + x),
grad=dict(
x1=(rand(),),
x2=(rand(),),
x3=(rand(),),
),
)
# alloc a vector into a tensor3
Alloc13GradTester = makeBroadcastTester(
name='Alloc13GradTester',
#op = (lambda self, x: alloc(x, s1, s2, s3)),
op=(lambda x: alloc(x, s1, s2, s3)),
expected=(lambda x: numpy.zeros((s1, s2, s3), dtype=x.dtype) + x),
grad=dict(
x1=(rand(s3),),
x2=(rand(s3),),
x3=(rand(s3),),
),
)
# unbroadcast a row to a matrix
Allocb1GradTester = makeBroadcastTester(
name='Allocb1GradTester',
op=lambda x: alloc(x, s1, s2),
expected=(lambda x: numpy.zeros((s1, s2), dtype=x.dtype) + x),
grad=dict(
x1=(rand(1, s2),),
x2=(rand(1, s2),),
x3=(rand(1, s2),),
),
)
# unbroadcast a row to a tensor3
Allocb2GradTester = makeBroadcastTester(
name='Allocb2GradTester',
op=lambda x: alloc(x, s1, s2, s3),
expected=(lambda x: numpy.zeros((s1, s2, s3), dtype=x.dtype) + x),
grad=dict(
x1=(rand(1, s3),),
x2=(rand(1, s3),),
x3=(rand(1, s3),),
),
)
# unbroadcast a col to a matrix
Allocb3GradTester = makeBroadcastTester(
name='Allocb3GradTester',
op=lambda x: alloc(x, s1, s2),
expected=(lambda x: numpy.zeros((s1, s2), dtype=x.dtype) + x),
grad=dict(
x1=(rand(s1, 1),),
x2=(rand(s1, 1),),
x3=(rand(s1, 1),),
),
)
# unbroadcast a col to a tensor3
Allocb4GradTester = makeBroadcastTester(
name='Allocb4GradTester',
op=lambda x: alloc(x, s1, s2, s3),
expected=(lambda x: numpy.zeros((s1, s2, s3), dtype=x.dtype) + x),
grad=dict(
x1=(rand(s2, 1),),
x2=(rand(s2, 1),),
x3=(rand(s2, 1),),
),
)
# Partial un broadcast of a dimshuffled input
AllocDimshuffleGradTester = makeBroadcastTester(
name='Allocb4GradTester',
op=lambda x: alloc(x.dimshuffle('x', 'x', 0), 1, s2, s3),
expected=(lambda x: numpy.zeros((1, s2, s3), dtype=x.dtype) + x),
grad=dict(
x1=(rand(s3),),
x2=(rand(s3),),
x3=(rand(s3),),
),
)
AllocDimshuffleGradTester2 = makeBroadcastTester(
name='Allocb4GradTester',
op=lambda x: alloc(x.dimshuffle('x', 0), 1, s2, s3),
expected=(lambda x: numpy.zeros((1, s2, s3), dtype=x.dtype) + x),
grad=dict(
x1=(rand(s3),),
x2=(rand(s3),),
x3=(rand(s3),),
),
)
class ApplyDefaultTestOp(theano.Op):
def __init__(self, id):
self.default_output = id
def make_node(self, x):
x = theano.tensor.as_tensor_variable(x)
return theano.Apply(self, [x], [x.type()])
class TestAsTensorVariable(unittest.TestCase):
"""
Unit test for ensuring that as_tensor_variable handles Apply objects
correctly and removes leading broadcastable dimensions when possible.
"""
def setUp(self):
self.x = tensor.scalar('x')
def test_one_output(self):
good_apply_var = ApplyDefaultTestOp(0).make_node(self.x)
x = as_tensor_variable(good_apply_var)
def test_below_zero_output(self):
bad_apply_var = ApplyDefaultTestOp(-1).make_node(self.x)
self.assertRaises(AttributeError, as_tensor_variable, bad_apply_var)
def test_above_output_len(self):
bad_apply_var = ApplyDefaultTestOp(2).make_node(self.x)
self.assertRaises(AttributeError, as_tensor_variable, bad_apply_var)
def test_list(self):
bad_apply_var = ApplyDefaultTestOp([0, 1]).make_node(self.x)
self.assertRaises(AttributeError, as_tensor_variable, bad_apply_var)
def test_strip_leading_broadcastable(self):
x = tensor.TensorType(config.floatX, (True, False))('x')
x = as_tensor_variable(x, ndim=1)
assert(x.ndim == 1)
x = tensor.matrix('x', dtype=config.floatX)
self.assertRaises(ValueError, as_tensor_variable, x, ndim=1)
class TestAlloc(unittest.TestCase):
dtype = config.floatX
mode = mode_opt
shared = staticmethod(theano.shared)
allocs = [tensor.Alloc()] * 3
def setUp(self):
self.rng = numpy.random.RandomState(seed=utt.fetch_seed())
def test_alloc_constant_folding(self):
test_params = numpy.asarray(self.rng.randn(50 * 60),
self.dtype)
some_vector = vector('some_vector', dtype=self.dtype)
some_matrix = some_vector.reshape((60, 50))
variables = self.shared(numpy.ones((50,), dtype=self.dtype))
idx = tensor.constant(numpy.arange(50))
for alloc, (subtensor, n_alloc) in zip(self.allocs, [
# IncSubtensor1
(some_matrix[:60], 2),
# AdvancedIncSubtensor1
(some_matrix[arange(60)], 2),
# AdvancedIncSubtensor
(some_matrix[idx, idx], 1)
]):
derp = sum(dot(subtensor, variables))
fobj = theano.function([some_vector], derp, mode=self.mode)
grad_derp = theano.grad(derp, some_vector)
fgrad = theano.function([some_vector], grad_derp,
mode=self.mode)
topo_obj = fobj.maker.fgraph.toposort()
#<= is needed as the GPU currently don't implement
# AdvancedIncSubtensor. When this is the case it can be
# replaced with ==.
assert numpy.sum([isinstance(node.op, type(alloc))
for node in topo_obj]) <= 1
topo_grad = fgrad.maker.fgraph.toposort()
# print subtensor
# theano.printing.debugprint(fgrad)
assert numpy.sum([isinstance(node.op, type(alloc))
for node in topo_grad]) == n_alloc, (
alloc, subtensor, n_alloc, topo_grad)
fobj(test_params)
fgrad(test_params)
def test_alloc_output(self):
val = tensor.constant(self.rng.randn(1, 1), dtype=self.dtype)
for alloc in self.allocs:
# The output is the result of the alloc operation,
# we do not want it to be constant-folded
out = alloc(val, 50, 60)
f = theano.function([], out, mode=self.mode)
topo = f.maker.fgraph.toposort()
assert numpy.sum([isinstance(node.op, type(alloc))
for node in topo]) == 1
assert not isinstance(topo[0].op, DeepCopyOp)
def test_ones(self):
for shp in [[], 1, [1], [1, 2], [1, 2, 3]]:
ones = theano.function([], [tensor.ones(shp)], mode=self.mode)
assert numpy.allclose(ones(), numpy.ones(shp))
# scalar doesn't have to be provided as input
x = scalar()
shp = []
ones_scalar = theano.function([], [tensor.ones(x.shape)],
mode=self.mode)
assert numpy.allclose(ones_scalar(), numpy.ones(shp))
for (typ, shp) in [(vector, [3]), (matrix, [3, 4])]:
x = typ()
ones_tensor = theano.function([x], [tensor.ones(x.shape)],
mode=self.mode)
inp = numpy.zeros(shp, dtype=config.floatX)
assert numpy.allclose(ones_tensor(inp),
numpy.ones(shp))
def test_zeros(self):
for shp in [[], 1, [1], [1, 2], [1, 2, 3]]:
zeros = theano.function([], [tensor.zeros(shp)],
mode=self.mode)
assert numpy.allclose(zeros(), numpy.zeros(shp))
# scalar doesn't have to be provided as input
x = scalar()
shp = []
zeros_scalar = theano.function([], [tensor.zeros(x.shape)],
mode=self.mode)
assert numpy.allclose(zeros_scalar(), numpy.zeros(shp))
for (typ, shp) in [(vector, [3]), (matrix, [3, 4])]:
x = typ()
zeros_tensor = theano.function([x], [tensor.zeros(x.shape)],
mode=self.mode)
inp = numpy.zeros(shp, dtype=config.floatX)
assert numpy.allclose(zeros_tensor(inp),
numpy.zeros(shp))
# This is slow for the ('int8', 3) version.
def test_eye():
def check(dtype, N, M_=None, k=0):
# Theano does not accept None as a tensor.
# So we must use a real value.
M = M_
# Currently DebugMode does not support None as inputs even if this is
# allowed.
if M is None and theano.config.mode in ['DebugMode', 'DEBUG_MODE']:
M = N
N_symb = tensor.iscalar()
M_symb = tensor.iscalar()
k_symb = tensor.iscalar()
f = function([N_symb, M_symb, k_symb],
eye(N_symb, M_symb, k_symb, dtype=dtype))
result = f(N, M, k)
assert numpy.allclose(result, numpy.eye(N, M_, k, dtype=dtype))
assert result.dtype == numpy.dtype(dtype)
for dtype in ALL_DTYPES:
yield check, dtype, 3
# M != N, k = 0
yield check, dtype, 3, 5
yield check, dtype, 5, 3
# N == M, k != 0
yield check, dtype, 3, 3, 1
yield check, dtype, 3, 3, -1
# N < M, k != 0
yield check, dtype, 3, 5, 1
yield check, dtype, 3, 5, -1
# N > M, k != 0
yield check, dtype, 5, 3, 1
yield check, dtype, 5, 3, -1
class test_triangle(unittest.TestCase):
def test_tri(self):
def check(dtype, N, M_=None, k=0):
# Theano does not accept None as a tensor.
# So we must use a real value.
M = M_
# Currently DebugMode does not support None as inputs even if this is
# allowed.
if M is None and theano.config.mode in ['DebugMode', 'DEBUG_MODE']:
M = N
N_symb = tensor.iscalar()
M_symb = tensor.iscalar()
k_symb = tensor.iscalar()
f = function([N_symb, M_symb, k_symb],
tri(N_symb, M_symb, k_symb, dtype=dtype))
result = f(N, M, k)
self.assertTrue(
numpy.allclose(result, numpy.tri(N, M_, k, dtype=dtype)))
self.assertTrue(result.dtype == numpy.dtype(dtype))
for dtype in ALL_DTYPES:
yield check, dtype, 3
# M != N, k = 0
yield check, dtype, 3, 5
yield check, dtype, 5, 3
# N == M, k != 0
yield check, dtype, 3, 3, 1
yield check, dtype, 3, 3, -1
# N < M, k != 0
yield check, dtype, 3, 5, 1
yield check, dtype, 3, 5, -1
# N > M, k != 0
yield check, dtype, 5, 3, 1
yield check, dtype, 5, 3, -1
def test_tril_triu(self):
def check_l(m, k=0):
m_symb = matrix(dtype=m.dtype)
k_symb = iscalar()
f = function([m_symb, k_symb], tril(m_symb, k_symb))
result = f(m, k)
self.assertTrue(numpy.allclose(result, numpy.tril(m, k)))
self.assertTrue(result.dtype == numpy.dtype(dtype))
def check_u(m, k=0):
m_symb = matrix(dtype=m.dtype)
k_symb = iscalar()
f = function([m_symb, k_symb], triu(m_symb, k_symb))
result = f(m, k)
self.assertTrue(numpy.allclose(result, numpy.triu(m, k)))
self.assertTrue(result.dtype == numpy.dtype(dtype))
for dtype in ALL_DTYPES:
m = rand_of_dtype((10, 10), dtype)
yield check_l, m, 0
yield check_l, m, 1
yield check_l, m, -1
yield check_u, m, 0
yield check_u, m, 1
yield check_u, m, -1
m = rand_of_dtype((10, 5), dtype)
yield check_l, m, 0
yield check_l, m, 1
yield check_l, m, -1
yield check_u, m, 0
yield check_u, m, 1
yield check_u, m, -1
class test_nonzero(unittest.TestCase):
def test_nonzero(self):
def check(m):
m_symb = theano.tensor.tensor(dtype=m.dtype,
broadcastable=(False,) * m.ndim)
f_tuple = function([m_symb], nonzero(m_symb, return_matrix=False))
f_matrix = function([m_symb], nonzero(m_symb, return_matrix=True))
self.assertTrue(numpy.allclose(f_matrix(m),
numpy.vstack(numpy.nonzero(m))))
for i, j in zip(f_tuple(m), numpy.nonzero(m)):
self.assertTrue(numpy.allclose(i, j))
rand0d = numpy.array(rand())
self.assertRaises(ValueError, check, rand0d)
rand1d = rand(8)
rand1d[:4] = 0
check(rand1d)
rand2d = rand(8, 9)
rand2d[:4] = 0
check(rand2d)
rand3d = rand(8, 9, 10)
rand3d[:4] = 0
check(rand3d)
rand4d = rand(8, 9, 10, 11)
rand4d[:4] = 0
check(rand4d)
def test_flatnonzero(self):
def check(m):
m_symb = theano.tensor.tensor(dtype=m.dtype,
broadcastable=(False,) * m.ndim)
f = function([m_symb], flatnonzero(m_symb))
result = f(m)
assert numpy.allclose(result, numpy.flatnonzero(m))
rand0d = numpy.array(rand())
self.assertRaises(ValueError, check, rand0d)
rand1d = rand(8)
rand1d[:4] = 0
check(rand1d)
rand2d = rand(8, 9)
rand2d[:4] = 0
check(rand2d)
rand3d = rand(8, 9, 10)
rand3d[:4] = 0
check(rand3d)
rand4d = rand(8, 9, 10, 11)
rand4d[:4] = 0
check(rand4d)
def test_nonzero_values(self):
def check(m):
m_symb = theano.tensor.tensor(dtype=m.dtype,
broadcastable=(False,) * m.ndim)
f = function([m_symb], nonzero_values(m_symb))
result = f(m)
assert numpy.allclose(result, m[numpy.nonzero(m)])
rand0d = rand()
self.assertRaises(ValueError, check, rand0d)
rand1d = rand(8)
rand1d[:4] = 0
check(rand1d)
rand2d = rand(8, 9)
rand2d[:4] = 0
check(rand2d)
rand3d = rand(8, 9, 10)
rand3d[:4] = 0
check(rand3d)
rand4d = rand(8, 9, 10, 11)
rand4d[:4] = 0
check(rand4d)
def test_identity():
def check(dtype):
obj = rand_of_dtype((2,), dtype)
sym = tensor.vector(dtype=dtype)
f = function([sym], tensor_copy(sym))
assert numpy.all(obj == f(obj))
assert obj.dtype == f(obj).dtype
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
if theano.config.mode != 'FAST_COMPILE':
assert isinstance(topo[0].op, DeepCopyOp)
for dtype in ALL_DTYPES:
yield check, dtype
class CastTester(unittest.TestCase):
def test_good_between_real_types(self):
good = itertools.chain(
multi_dtype_cast_checks((2,), dtypes=REAL_DTYPES),
# Casts from foo to foo
[('%s_%s' % (rand_of_dtype((2,), dtype), dtype),
(rand_of_dtype((2,), dtype), dtype))
for dtype in ALL_DTYPES])
for testname, (obj, dtype) in good:
inp = tensor.vector(dtype=obj.dtype)
out = tensor.cast(inp, dtype=dtype)
f = function([inp], out)
assert f(obj).dtype == numpy.dtype(dtype)
# Test astype too
out2 = inp.astype(dtype=dtype)
assert out2.type == out.type
def test_cast_from_real_to_complex(self):
for real_dtype in REAL_DTYPES:
for complex_dtype in COMPLEX_DTYPES:
inp = tensor.vector(dtype=real_dtype)
out = tensor.cast(inp, dtype=complex_dtype)
f = function([inp], out)
obj = rand_of_dtype((2, ), real_dtype)
assert f(obj).dtype == numpy.dtype(complex_dtype)
def test_cast_from_complex_to_real_raises_error(self):
for real_dtype in REAL_DTYPES:
for complex_dtype in COMPLEX_DTYPES:
inp = tensor.vector(dtype=real_dtype)
self.assertRaises(TypeError, tensor.cast(
inp, dtype=complex_dtype))
ClipTester = makeTester(name='ClipTester',
op=clip,
expected=lambda x, y, z: numpy.clip(x, y, z),
good=dict(correct1=((5 * rand(5, 5)).astype('float32'),
numpy.array(-1, dtype='float32'),
numpy.array(1, dtype='float32')),
correct2=((5 * rand(5, 5)).astype('float64'),
numpy.array(-1, dtype='float64'),
numpy.array(1, dtype='float64')),
correct3=(randint(5, 5).astype('int8'),
numpy.array(-1, dtype='int8'),
numpy.array(1, dtype='int8')),
correct4=(randint(5, 5).astype('int16'),
numpy.array(-1, dtype='int16'),
numpy.array(1, dtype='int16')),
correct5=(randint(5, 5).astype('int32'),
numpy.array(-1, dtype='int32'),
numpy.array(1, dtype='int32')),
correct6=(randint(5, 5).astype('int64'),
numpy.array(-1, dtype='int64'),
numpy.array(1, dtype='int64')),
# min > max. messed up behaviour, but
# should be same as NumPy's
correct7=((5 * rand(5, 5)).astype('float64'),
numpy.array(1, dtype='float64'),
numpy.array(-1, dtype='float64')))
)
# I can't think of any way to make this fail at runtime
class T_Clip(unittest.TestCase):
def test_complex_value(self):
for dtype in ['complex64', 'complex128']:
a = tensor.vector(dtype=dtype)
b = tensor.scalar()
c = tensor.scalar()
self.assertRaises(TypeError, clip, a, b, c)
def test_clip_repeat_grad(self):
# This is testing for the issue #633
x, y = tensor.vectors('xy')
a = clip(x, y, x)
g = theano.gradient.grad(a.sum(), x)
fn = theano.function([x, y], [g])
# Test the other way around as well
a2 = clip(x, x, y)
g2 = theano.gradient.grad(a2.sum(), x)
fn2 = theano.function([x, y], [g2])
# Test for the equal case too
a3 = theano.tensor.clip(x, x, x)
g3 = theano.gradient.grad(a3.sum(), x)
fn3 = theano.function([x], [g3])
rng = numpy.random.RandomState(utt.fetch_seed())
nvals = 50
xval = rng.rand(nvals).astype(config.floatX)
# To ensure that the min < x
yval_mn = rng.rand(nvals).astype(config.floatX) - 1.0
# To ensure that the max > x
yval_mx = rng.rand(nvals).astype(config.floatX) + 1.0
aval, = fn(xval, yval_mn)
aval2, = fn2(xval, yval_mx)
aval3, = fn3(xval)
self.assertTrue(numpy.all(aval == 1.))
self.assertTrue(numpy.all(aval2 == 1.))
self.assertTrue(numpy.all(aval3 == 1.))
def test_clip_repeat_verify_grad(self):
# Additional tests for issue gh-633
utt.verify_grad(
op=lambda x: clip(x, 0, x),
pt=[rand_nonzero((3, 7))])
utt.verify_grad(
op=lambda x: clip(x, x, 0),
pt=[rand_nonzero((3, 7))])
utt.verify_grad(
op=lambda x: clip(0, x, x),
pt=[rand_nonzero((3, 7))])
utt.verify_grad(
op=lambda x: clip(x, x, x),
pt=[rand_nonzero((3, 7))])
# TODO: consider moving this function / functionality to gradient.py
# rationale: it's tricky, and necessary everytime you want to verify
# gradient numerically
# useful mostly for unit tests
def _approx_eq(a, b, eps=1.0e-4):
a = numpy.asarray(a)
b = numpy.asarray(b)
if a.shape != b.shape:
if _approx_eq.debug:
print(a.shape, b.shape)
return False
abs_rel_err = numeric_grad.abs_rel_err(a, b)
# numpy.max don't like empty ndarray.
if a.size == b.size == 0:
return True
if numpy.max(abs_rel_err) >= eps:
if _approx_eq.debug:
print(a, b)
return False
return True
_approx_eq.debug = 0
def test_batched_dot():
first = theano.tensor.tensor3("first")
second = theano.tensor.tensor3("second")
output = theano.tensor.basic.batched_dot(first, second)
first_val = numpy.random.rand(10, 10, 20).astype(config.floatX)
second_val = numpy.random.rand(10, 20, 5).astype(config.floatX)
result_fn = theano.function([first, second], output)
result = result_fn(first_val, second_val)
assert result.shape[0] == first_val.shape[0]
assert result.shape[1] == first_val.shape[1]
assert result.shape[2] == second_val.shape[2]
first_mat = theano.tensor.dmatrix("first")
second_mat = theano.tensor.dmatrix("second")
output = theano.tensor.basic.batched_dot(first_mat, second_mat)
first_mat_val = numpy.random.rand(10, 10).astype(config.floatX)
second_mat_val = numpy.random.rand(10, 10).astype(config.floatX)
result_fn = theano.function([first_mat, second_mat], output)
result = result_fn(first_mat_val, second_mat_val)
assert result.shape[0] == first_mat_val.shape[0]
def test_batched_tensordot():
first = theano.tensor.tensor4("first")
second = theano.tensor.tensor4("second")
axes = [[1, 2], [3, 1]]
output = theano.tensor.basic.batched_tensordot(first, second, axes)
first_val = numpy.random.rand(8, 10, 20, 3).astype(config.floatX)
second_val = numpy.random.rand(8, 20, 5, 10).astype(config.floatX)
result_fn = theano.function([first, second], output)
result = result_fn(first_val, second_val)
assert result.shape[0] == first_val.shape[0]
assert result.shape[1] == first_val.shape[3]
assert result.shape[2] == second_val.shape[2]
first_mat = theano.tensor.dmatrix("first")
second_mat = theano.tensor.dmatrix("second")
axes = 1
output = theano.tensor.basic.batched_tensordot(first_mat, second_mat, axes)
first_mat_val = numpy.random.rand(10, 4).astype(config.floatX)
second_mat_val = numpy.random.rand(10, 4).astype(config.floatX)
result_fn = theano.function([first_mat, second_mat], output)
result = result_fn(first_mat_val, second_mat_val)
assert result.shape[0] == first_mat_val.shape[0]
assert len(result.shape) == 1
def test_tensor_values_eq_approx():
# test, inf, -inf and nan equal themself
a = numpy.asarray([-numpy.inf, -1, 0, 1, numpy.inf, numpy.nan])
assert TensorType.values_eq_approx(a, a)
# test inf, -inf don't equal themself
b = numpy.asarray([numpy.inf, -1, 0, 1, numpy.inf, numpy.nan])
assert not TensorType.values_eq_approx(a, b)
b = numpy.asarray([-numpy.inf, -1, 0, 1, -numpy.inf, numpy.nan])
assert not TensorType.values_eq_approx(a, b)
# test allow_remove_inf
b = numpy.asarray([numpy.inf, -1, 0, 1, 5, numpy.nan])
assert TensorType.values_eq_approx(a, b, allow_remove_inf=True)
b = numpy.asarray([numpy.inf, -1, 0, 1, 5, 6])
assert not TensorType.values_eq_approx(a, b, allow_remove_inf=True)
# test allow_remove_nan
b = numpy.asarray([numpy.inf, -1, 0, 1, 5, numpy.nan])
assert not TensorType.values_eq_approx(a, b, allow_remove_nan=False)
b = numpy.asarray([-numpy.inf, -1, 0, 1, numpy.inf, 6])
assert not TensorType.values_eq_approx(a, b, allow_remove_nan=False)
def test_nan_inf_constant_signature():
# Test that the signature of a constant tensor containing NaN and Inf
# values is correct.
test_constants = [
[numpy.nan, numpy.inf, 0, 1],
[numpy.nan, numpy.inf, -numpy.inf, 1],
[0, numpy.inf, -numpy.inf, 1],
[0, 3, -numpy.inf, 1],
[0, 3, numpy.inf, 1],
[numpy.nan, 3, 4, 1],
[0, 3, 4, 1],
numpy.nan,
numpy.inf,
-numpy.inf,
0,
1,
]
n = len(test_constants)
# We verify that signatures of two rows i, j in the matrix above are
# equal if and only if i == j.
for i in xrange(n):
for j in xrange(n):
x = constant(test_constants[i])
y = constant(test_constants[j])
assert (x.signature() == y.signature()) == (i == j)
# Also test that nan !=0 and nan != nan.
x = tensor.scalar()
mode = get_default_mode()
if isinstance(mode, theano.compile.debugmode.DebugMode):
# Disable the check preventing usage of NaN / Inf values.
# We first do a copy of the mode to avoid side effects on other tests.
mode = copy(mode)
mode.check_isfinite = False
f = theano.function([x], eq(x, numpy.nan), mode=mode)
assert f(0) == 0
assert f(numpy.nan) == 0
class T_Shape(unittest.TestCase):
def test_basic0(self):
s = shape(numpy.ones((5, 3)))
self.assertTrue((eval_outputs([s]) == [5, 3]).all())
def test_basic1(self):
s = shape(numpy.ones((2)))
self.assertTrue((eval_outputs([s]) == [2]).all())
def test_basic2(self):
s = shape(numpy.ones((5, 3, 10)))
self.assertTrue((eval_outputs([s]) == [5, 3, 10]).all())
class T_max_and_argmax(unittest.TestCase):
def setUp(self):
utt.seed_rng()
MaxAndArgmax.debug = 0
def test0(self):
n = as_tensor_variable(5.0)
v, i = eval_outputs(max_and_argmax(n))
self.assertTrue(v == 5.0)
self.assertTrue(i == 0)
assert i.dtype == 'int64'
v = eval_outputs(max_and_argmax(n)[0].shape)
assert len(v) == 0
v = eval_outputs(max_and_argmax(n)[1].shape)
assert len(v) == 0
def test1(self):
n = as_tensor_variable([1, 2, 3, 2, -6])
v, i = eval_outputs(max_and_argmax(n))
self.assertTrue(v == 3)
self.assertTrue(i == 2)
assert i.dtype == 'int64'
v = eval_outputs(max_and_argmax(n)[0].shape)
assert len(v) == 0
def test2(self):
data = rand(2, 3)
n = as_tensor_variable(data)
for (axis, np_axis) in [(-1, -1), (0, 0), (1, 1), (None, None),
([0, 1], None), ([1, 0], None),
(NoneConst.clone(), None),
(constant(0), 0)]:
v, i = eval_outputs(max_and_argmax(n, axis))
assert i.dtype == 'int64'
self.assertTrue(numpy.all(v == numpy.max(data, np_axis)))
self.assertTrue(numpy.all(i == numpy.argmax(data, np_axis)))
v_shape = eval_outputs(max_and_argmax(n, axis)[0].shape)
assert tuple(v_shape) == numpy.max(data, np_axis).shape
def test2_invalid(self):
n = as_tensor_variable(rand(2, 3))
# Silence expected error messages
_logger = logging.getLogger('theano.gof.opt')
oldlevel = _logger.level
_logger.setLevel(logging.CRITICAL)
try:
try:
eval_outputs(max_and_argmax(n, 3))
assert False
except ValueError as e:
pass
finally:
_logger.setLevel(oldlevel)
def test2_invalid_neg(self):
n = as_tensor_variable(rand(2, 3))
old_stderr = sys.stderr
sys.stderr = StringIO()
try:
try:
eval_outputs(max_and_argmax(n, -3))
assert False
except ValueError as e:
pass
finally:
sys.stderr = old_stderr
def test2_valid_neg(self):
n = as_tensor_variable(rand(2, 3))
v, i = eval_outputs(max_and_argmax(n, -1))
assert i.dtype == 'int64'
self.assertTrue(v.shape == (2,))
self.assertTrue(i.shape == (2,))
self.assertTrue(numpy.all(v == numpy.max(n.value, -1)))
self.assertTrue(numpy.all(i == numpy.argmax(n.value, -1)))
v, i = eval_outputs(max_and_argmax(n, -2))
assert i.dtype == 'int64'
self.assertTrue(v.shape == (3,))
self.assertTrue(i.shape == (3,))
self.assertTrue(numpy.all(v == numpy.max(n.value, -2)))
self.assertTrue(numpy.all(i == numpy.argmax(n.value, -2)))
v = eval_outputs(max_and_argmax(n, -1)[0].shape)
assert v == (2)
v = eval_outputs(max_and_argmax(n, -2)[0].shape)
assert v == (3)
def test3(self):
data = rand(2, 3, 4)
n = as_tensor_variable(data)
for (axis, np_axis) in [(-1, -1), (0, 0), (1, 1), (None, None),
([0, 1, 2], None), ([1, 2, 0], None)]:
v, i = eval_outputs(max_and_argmax(n, axis))
assert i.dtype == 'int64'
self.assertTrue(numpy.all(v == numpy.max(data, np_axis)))
self.assertTrue(numpy.all(i == numpy.argmax(data, np_axis)))
v = eval_outputs(max_and_argmax(n, axis)[0].shape)
assert tuple(v) == numpy.max(data, np_axis).shape
def test_arg_grad(self):
"""
The test checks that the gradient of argmax(x).sum() is 0
"""
x = matrix()
cost = argmax(x, axis=0).sum()
gx = grad(cost, x)
val = tensor.get_scalar_constant_value(gx)
assert val == 0.0
def test_grad(self):
data = rand(2, 3)
n = as_tensor_variable(data)
def safe_verify_grad(func, data):
"""
Wrapper around 'verify_grad' that picks a proper value for epsilon.
This is needed because 'verify_grad' may fail when its epsilon is
too large, due to the fact the argmax is not continuous.
We make sure epsilon is less than the minimum absolute value found
in the matrix of pairwise differences between all elements in the
data. This way, the argmax will not change when adding epsilon.
"""
# 'data' is a one-element list.
data_tensor, = data
# Flatten it into a 1D vector.
data_vector = data_tensor.flatten()
# Compute pairwise absolute differences.
diff = numpy.abs(data_vector.reshape((-1, 1)) - data_vector)
# Alter the diagonal to avoid a zero minimum.
for i in xrange(len(diff)):
diff[i, i] = 1
# Find an appropriate epsilon.
eps = builtin_min(numeric_grad.type_eps[config.floatX],
diff.min() / 2)
# Run gradient verification.
utt.verify_grad(func, data, eps=eps)
def check_grad_max(data, max_grad_data, axis=None):
"""
Why this is needed? verify_grad is not enough?
"""
# This works only for axis in [0, None].
assert axis in [0, None]
z = numpy.zeros_like(data)
z = z.flatten()
argmax = numpy.argmax(data, axis=axis)
if argmax.ndim == 0:
z[argmax] += 1
else:
for id, v in enumerate(argmax):
z[v * numpy.prod(data.shape[data.ndim - 1:axis:-1]) +
id] += 1
z = z.reshape(data.shape)
assert numpy.all(max_grad_data == z)
for axis in (-1, 0, 1, None):
for j in xrange(2):
safe_verify_grad(lambda v: max_and_argmax(v, axis=axis)[j],
[data])
if axis != 1:
safe_verify_grad(lambda v: max_and_argmax(v.flatten(),
axis=axis)[j],
[data])
if axis in (0, None):
check_grad_max(data, eval_outputs(grad(
max_and_argmax(n, axis=axis)[0].sum(), n)), axis=axis)
check_grad_max(data, eval_outputs(grad(
max_and_argmax(n.flatten())[0], n)))
# Test 3d inner dimensions
data = rand(3, 4, 5)
for i in [0, 1, 2]:
safe_verify_grad(lambda v: max_and_argmax(v, axis=[i])[0], [data])
safe_verify_grad(lambda v: max_and_argmax(v, axis=[i])[1], [data])
# Test 4d inner dimensions
data = rand(2, 3, 4, 5)
for i in [0, 1, 2, 3]:
safe_verify_grad(lambda v: max_and_argmax(v, axis=[i])[0], [data])
safe_verify_grad(lambda v: max_and_argmax(v, axis=[i])[1], [data])
# Test grad with multiple axes
for i in [[0, 1], [0, 0]]:
safe_verify_grad(lambda v: max_and_argmax(v, axis=i)[0], [data])
safe_verify_grad(lambda v: max_and_argmax(v, axis=i)[1], [data])
def test_preserve_broadcastable(self):
"""
Ensure the original broadcastable flags are preserved by Max/Argmax.
"""
x = tensor.matrix().dimshuffle('x', 0, 'x', 1, 'x')
y = x.max(axis=1)
assert y.type.broadcastable == (True, True, False, True)
def test_multiple_axes(self):
data = numpy.arange(24).reshape(3, 2, 4)
x = as_tensor_variable(data)
v, i = eval_outputs(max_and_argmax(x, [1, -1]))
assert numpy.all(v == numpy.array([7, 15, 23]))
assert numpy.all(i == numpy.array([7, 7, 7]))
v = eval_outputs(max_and_argmax(x, [1, -1])[0].shape)
assert tuple(v) == numpy.max(data, (1, -1)).shape
def test_zero_shape(self):
x = tensor.matrix()
m, i = max_and_argmax(x, axis=1)
f = theano.function([x], [m, i])
xv = numpy.zeros((0, 4), dtype=floatX)
mv, iv = f(xv)
assert mv.shape == (0,)
assert iv.shape == (0,)
class T_argmin_argmax(unittest.TestCase):
def setUp(self):
utt.seed_rng()
MaxAndArgmax.debug = 0
def test_scalar(self):
for fct in [argmin, argmax]:
n = as_tensor_variable(5.0)
i = eval_outputs(fct(n))
self.assertTrue(i == 0)
v = eval_outputs(fct(n).shape)
assert len(v) == 0
def test_list(self):
n = as_tensor_variable([1, 2, 3, 2, -6])
i = eval_outputs(argmin(n))
self.assertTrue(i == 4)
v = eval_outputs(argmin(n).shape)
assert len(v) == 0
n = as_tensor_variable([1, 2, 3, 2, -6])
i = eval_outputs(argmax(n))
self.assertTrue(i == 2)
v = eval_outputs(argmax(n).shape)
assert len(v) == 0
def test2(self):
data = rand(2, 3)
n = as_tensor_variable(data)
for fct, nfct in [(argmax, numpy.argmax), (argmin, numpy.argmin)]:
for (axis, np_axis) in [(-1, -1), (0, 0), (1, 1), (None, None),
([0, 1], None), ([1, 0], None)]:
v = eval_outputs(fct(n, axis))
self.assertTrue(numpy.all(v == nfct(data, np_axis)))
v_shape = eval_outputs(fct(n, axis).shape)
assert tuple(v_shape) == nfct(data, np_axis).shape
def test2_invalid(self):
for fct, nfct in [(argmax, numpy.argmax), (argmin, numpy.argmin)]:
n = as_tensor_variable(rand(2, 3))
# Silence expected error messages
_logger = logging.getLogger('theano.gof.opt')
oldlevel = _logger.level
_logger.setLevel(logging.CRITICAL)
try:
try:
eval_outputs(fct(n, 3))
assert False
except ValueError as e:
pass
finally:
_logger.setLevel(oldlevel)
def test2_invalid_neg(self):
for fct, nfct in [(argmax, numpy.argmax), (argmin, numpy.argmin)]:
n = as_tensor_variable(rand(2, 3))
old_stderr = sys.stderr
sys.stderr = StringIO()
try:
try:
eval_outputs(fct(n, -3))
assert False
except ValueError as e:
pass
finally:
sys.stderr = old_stderr
def test2_valid_neg(self):
for fct, nfct in [(argmax, numpy.argmax), (argmin, numpy.argmin)]:
n = as_tensor_variable(rand(2, 3))
i = eval_outputs(fct(n, -1))
self.assertTrue(i.shape == (2,))
self.assertTrue(numpy.all(i == nfct(n.value, -1)))
i = eval_outputs(fct(n, -2))
self.assertTrue(i.shape == (3,))
self.assertTrue(numpy.all(i == nfct(n.value, -2)))
v = eval_outputs(fct(n, -1).shape)
assert v == (2)
v = eval_outputs(fct(n, -2).shape)
assert v == (3)
def test3(self):
data = rand(2, 3, 4)
n = as_tensor_variable(data)
for fct, nfct in [(argmax, numpy.argmax), (argmin, numpy.argmin)]:
for (axis, np_axis) in [(-1, -1), (0, 0), (1, 1), (2, 2),
(None, None), ([0, 1, 2], None),
([1, 0, 2], None)]:
v = eval_outputs(fct(n, axis))
self.assertTrue(numpy.all(v == nfct(data, np_axis)))
v_shape = eval_outputs(fct(n, axis).shape)
assert tuple(v_shape) == nfct(data, np_axis).shape
def test_grad_argmin(self):
data = rand(2, 3)
n = as_tensor_variable(data)
n.name = 'n'
# test grad of argmin
utt.verify_grad(lambda v: argmin(v, axis=-1), [data])
utt.verify_grad(lambda v: argmin(v, axis=[0]), [data])
utt.verify_grad(lambda v: argmin(v, axis=[1]), [data])
utt.verify_grad(lambda v: argmin(v.flatten()), [data])
try:
cost = argmin(n, axis=-1)
cost.name = None
g = grad(cost, n)
raise Exception('Expected an error')
except TypeError:
pass
def test_grad_argmax(self):
data = rand(2, 3)
n = as_tensor_variable(data)
# test grad of argmax
utt.verify_grad(lambda v: argmax(v, axis=-1), [data])
utt.verify_grad(lambda v: argmax(v, axis=[0]), [data])
utt.verify_grad(lambda v: argmax(v, axis=[1]), [data])
utt.verify_grad(lambda v: argmax(v.flatten()), [data])
try:
grad(argmax(n, axis=-1), n)
raise Exception('Expected an error')
except TypeError:
pass
class T_min_max(unittest.TestCase):
def setUp(self):
utt.seed_rng()
MaxAndArgmax.debug = 0
def test_scalar(self):
for fct in [max, min]:
n = as_tensor_variable(5.0)
v = eval_outputs(fct(n))
self.assertTrue(v == 5.0)
v = eval_outputs(fct(n).shape)
assert len(v) == 0
def test_list(self):
for fct, nfct in [(max, numpy.max), (min, numpy.min)]:
n = as_tensor_variable([1, 2, 3, 2, -6])
v = eval_outputs([fct(n)])
self.assertTrue(v == nfct(n.value))
v = eval_outputs(fct(n).shape)
assert len(v) == 0
def test2(self):
data = rand(2, 3)
n = as_tensor_variable(data)
for fct, nfct in [(max, numpy.max), (min, numpy.min)]:
for (axis, np_axis) in [(-1, -1), (0, 0), (1, 1), (None, None),
([0, 1], None), ([1, 0], None)]:
v = eval_outputs(fct(n, axis))
self.assertTrue(numpy.all(v == nfct(data, np_axis)))
v_shape = eval_outputs(fct(n, axis).shape)
assert tuple(v_shape) == nfct(data, np_axis).shape
def test2_invalid(self):
for fct in [max, min]:
n = as_tensor_variable(rand(2, 3))
# Silence expected error messages
_logger = logging.getLogger('theano.gof.opt')
oldlevel = _logger.level
_logger.setLevel(logging.CRITICAL)
try:
try:
eval_outputs(fct(n, 3))
assert False
except ValueError as e:
pass
finally:
_logger.setLevel(oldlevel)
def test2_invalid_neg(self):
for fct in [max, min]:
n = as_tensor_variable(rand(2, 3))
old_stderr = sys.stderr
sys.stderr = StringIO()
try:
try:
eval_outputs(fct(n, -3))
assert False
except ValueError as e:
pass
finally:
sys.stderr = old_stderr
def test2_valid_neg(self):
for fct, nfct in [(max, numpy.max), (min, numpy.min)]:
n = as_tensor_variable(rand(2, 3))
v = eval_outputs(fct(n, -1))
self.assertTrue(v.shape == (2,))
self.assertTrue(numpy.all(v == nfct(n.value, -1)))
v = eval_outputs(fct(n, -2))
self.assertTrue(v.shape == (3,))
self.assertTrue(numpy.all(v == nfct(n.value, -2)))
v = eval_outputs(fct(n, -1).shape)
assert v == (2)
v = eval_outputs(fct(n, -2).shape)
assert v == (3)
def test3(self):
# Test with 1 axis or all axis out of 3 dims
data = rand(2, 3, 4)
n = as_tensor_variable(data)
for fct, nfct in [(max, numpy.max), (min, numpy.min)]:
for (axis, np_axis) in [(-1, -1), (0, 0), (1, 1), (2, 2),
(None, None), ([0, 1, 2], None),
([1, 0, 2], None)]:
v = eval_outputs(fct(n, axis))
self.assertTrue(numpy.all(v == nfct(data, np_axis)))
v_shape = eval_outputs(fct(n, axis).shape)
assert tuple(v_shape) == nfct(data, np_axis).shape
def test3b(self):
# Test with 2 axis out of 3 dims
data = rand(2, 3, 4)
n = as_tensor_variable(data)
for fct, nfct in [(max, numpy.max), (min, numpy.min)]:
for axis in [[0, 1], [1, 2], [0, 2]]:
v = eval_outputs(fct(n, axis))
np_v = nfct(nfct(data, axis[1]), axis[0])
self.assertTrue(numpy.all(v == np_v))
v_shape = eval_outputs(fct(n, axis).shape)
assert tuple(v_shape) == np_v.shape
def test_grad_max(self):
data = rand(2, 3)
n = as_tensor_variable(data)
def check_grad_max(data, max_grad_data, axis=None):
# This work only for axis in [0,None]
assert axis in [0, None]
z = numpy.zeros_like(data)
z = z.flatten()
argmax = numpy.argmax(data, axis=axis)
if argmax.ndim == 0:
z[numpy.argmax(data, axis=axis)] += 1
else:
for id, v in enumerate(argmax):
z[v * numpy.prod(data.shape[data.ndim - 1:axis:-1])
+ id] += 1
z = z.reshape(data.shape)
assert numpy.all(max_grad_data == z)
# test grad of max
# axis is the last one
utt.verify_grad(lambda v: max(v, axis=-1), [data])
utt.verify_grad(lambda v: max(v, axis=[0]), [data])
check_grad_max(data, eval_outputs(grad(max(n, axis=0).sum(), n)),
axis=0)
utt.verify_grad(lambda v: max(v, axis=[1]), [data])
# check_grad_max(data,eval_outputs(grad(max(n,axis=1),n)),axis=1)
utt.verify_grad(lambda v: max(v.flatten()), [data])
check_grad_max(data, eval_outputs(grad(max(n.flatten()), n)))
def test_grad_min(self):
data = rand(2, 3)
n = as_tensor_variable(data)
def check_grad_min(data, min_grad_data, axis=None):
# This work only for axis in [0, None]
assert axis in [0, None]
z = numpy.zeros_like(data)
z = z.flatten()
argmin = numpy.argmin(data, axis=axis)
if argmin.ndim == 0:
z[numpy.argmin(data, axis=axis)] += 1
else:
for id, v in enumerate(argmin):
z[v * numpy.prod(data.shape[data.ndim - 1:axis:-1])
+ id] += 1
z = z.reshape(data.shape)
assert numpy.all(min_grad_data == z)
# test grad of min
# axis is the last one
utt.verify_grad(lambda v: min(v, axis=-1), [data])
utt.verify_grad(lambda v: min(v, axis=[0]), [data])
check_grad_min(data, eval_outputs(grad(min(n, axis=0).sum(), n)),
axis=0)
utt.verify_grad(lambda v: min(v, axis=[1]), [data])
# check_grad_min(data,eval_outputs(grad(min(n,axis=1),n)),axis=1)
utt.verify_grad(lambda v: min(v.flatten()), [data])
check_grad_min(data, eval_outputs(grad(min(n.flatten()), n)))
def _grad_list(self):
"""
Test the gradient when we have multiple axis at the same time.
This not implemented, so we disable the test. See ticket:
http://www.assembla.com/spaces/theano/tickets/511
"""
data = rand(2, 3)
n = as_tensor_variable(data)
for fct in [max_and_argmax, max, min]:
utt.verify_grad(lambda v: fct(v, axis=[0, 1]), [data])
# check_grad_max(data, eval_outputs(grad(max_and_argmax(n,
# axis=1)[0], n)),axis=1)
def test_basic_allclose():
# This was raised by a user in https://github.com/Theano/Theano/issues/2975
assert tensor.basic._allclose(-0.311023883434, -0.311022856884)
class T_outer(unittest.TestCase):
def test_outer(self):
for m in range(4):
for n in range(4):
x = tensor.tensor(dtype='floatX', broadcastable=(False,) * m)
y = tensor.tensor(dtype='floatX', broadcastable=(False,) * n)
s1 = numpy.random.randint(1, 10, m)
s2 = numpy.random.randint(1, 10, n)
v1 = numpy.asarray(numpy.random.rand(*s1)).astype(floatX)
v2 = numpy.asarray(numpy.random.rand(*s2)).astype(floatX)
o = tensor.outer(x, y).eval({x: v1, y: v2})
assert_allclose(o, numpy.outer(v1, v2))
def test_grad(self):
"""
Test the combined graph of the graph of outer
with broadcastable dimensions, just in case.
"""
for shp0, shp1 in [((1,), (2,)),
((3,), (1,)),
((1,), (1,)),
((3,), (2,)),
((3, 2), (1, 1)),
((3, 2), (1, 4)),
((3, 2), (4, 1)),
((3, 2), (4, 5)),
((1, 2), (4, 5)),
((3, 1), (4, 5)),
((1, 1), (4, 5)),
((1, 1), (1, 1)),
]:
data0 = numpy.random.rand(*shp0).astype(floatX)
data1 = numpy.random.rand(*shp1).astype(floatX)
utt.verify_grad(tensor.outer, [data0, data1])
class T_GetVectorLength(unittest.TestCase):
def test_get_vector_length(self):
x = theano.shared(numpy.zeros((2, 3, 4, 5)))
assert len(list(x.shape)) == 4
assert len(list(x.shape[2:4])) == 2
assert len(list(x.shape[2:])) == 2
assert len(list(x.shape[1:4])) == 3
assert len(list(x.shape[2:2])) == 0
assert len(list(x.shape[1:5])) == 3
assert len(list(x.shape[1:10])) == 3
# Test step
assert len(list(x.shape[1:10:2])) == 2
# Test neg start
assert len(list(x.shape[-1:4])) == 1
assert len(list(x.shape[-6:4])) == 4
# test neg stop
assert len(list(x.shape[1:-2])) == 1
assert len(list(x.shape[1:-1])) == 2
class T_Join_and_Split(unittest.TestCase):
"""
Split is tested by each verify_grad method.
"""
def setUp(self):
Join.debug = False
utt.seed_rng()
self.mode = theano.compile.get_default_mode().excluding(
'constant_folding'
)
self.join_op = Join()
self.split_op_class = Split
self.make_vector_op = opt.MakeVector()
self.floatX = config.floatX
self.hide_error = theano.config.mode not in ['DebugMode',
'DEBUG_MODE',
'FAST_COMPILE']
self.shared = shared
def eval_outputs_and_check_join(self, outputs):
f = theano.function([], outputs, self.mode)
topo = f.maker.fgraph.toposort()
assert [True for node in topo
if isinstance(node.op, type(self.join_op))]
variables = f()
if isinstance(variables, (tuple, list)) and len(variables) == 1:
return variables[0]
return variables
def eval_outputs_and_check_vector(self, outputs,
make_vector_op=None):
if make_vector_op is None:
make_vector_op = self.make_vector_op
f = theano.function([], outputs, self.mode)
topo = f.maker.fgraph.toposort()
assert [True for node in topo
if isinstance(node.op, type(make_vector_op))]
variables = f()
if isinstance(variables, (tuple, list)) and len(variables) == 1:
return variables[0]
return variables
def test_join_scalar(self):
a = as_tensor_variable(1)
b = as_tensor_variable(2)
try:
s = join(0, a, b)
except TypeError:
return
self.fail()
def test_stack_mixed_type_constants(self):
# tested only on cpu as gpu support only float32
a = as_tensor_variable(1)
b = as_tensor_variable(2.0)
c = tensor._shared(numpy.asarray(3.0, dtype=self.floatX))
s = stack([a, b, c])
want = numpy.array([1, 2, 3])
out = self.eval_outputs_and_check_vector([s], opt.MakeVector())
self.assertTrue((out == want).all())
def test_stack_scalar(self):
a = self.shared(numpy.asarray(1., dtype=self.floatX))
b = as_tensor_variable(2.)
c = as_tensor_variable(3.)
s = stack([a, b, c])
want = numpy.array([1, 2, 3])
out = self.eval_outputs_and_check_vector([s])
self.assertTrue((out == want).all())
def test_stack_scalar_make_vector(self):
"""Test that calling stack() on scalars instantiates MakeVector,
not Join. Test that the floatX dtype stay floatX, not downcasted
to int64"""
a = tensor.scalar('a', dtype=self.floatX)
b = tensor.scalar('b', dtype=self.floatX)
s = stack([a, b, a, b])
f = function([a, b], s, mode=self.mode)
val = f(1, 2)
# print val
self.assertTrue(numpy.all(val == [1, 2, 1, 2]))
topo = f.maker.fgraph.toposort()
assert len([n for n in topo if isinstance(n.op, opt.MakeVector)]) > 0
assert len([n for n in topo if isinstance(n, type(self.join_op))]) == 0
assert f.maker.fgraph.outputs[0].dtype == self.floatX
def test_stack_scalar_make_vector_dtype(self):
'''Test that calling stack() on scalars instantiates MakeVector,
event when the scalar don't have the same dtype.'''
a = tensor.iscalar('a')
b = tensor.lscalar('b')
s = stack([a, b, a, b])
f = function([a, b], s, mode=self.mode)
val = f(1, 2)
self.assertTrue(numpy.all(val == [1, 2, 1, 2]))
topo = f.maker.fgraph.toposort()
assert len([n for n in topo if isinstance(n.op, opt.MakeVector)]) > 0
assert len([n for n in topo if isinstance(n, type(self.join_op))]) == 0
assert f.maker.fgraph.outputs[0].dtype == 'int64'
def test_stack_scalar_make_vector_constant(self):
'''Test that calling stack() on scalars instantiates MakeVector,
event when the scalar are simple int type.'''
a = tensor.iscalar('a')
b = tensor.lscalar('b')
# test when the constant is the first element.
# The first element is used in a special way
s = stack([10, a, b, numpy.int8(3)])
f = function([a, b], s, mode=self.mode)
val = f(1, 2)
self.assertTrue(numpy.all(val == [10, 1, 2, 3]))
topo = f.maker.fgraph.toposort()
assert len([n for n in topo if isinstance(n.op, opt.MakeVector)]) > 0
assert len([n for n in topo if isinstance(n, type(self.join_op))]) == 0
assert f.maker.fgraph.outputs[0].dtype == 'int64'
def test_stack_new_interface(self):
"""Test the new numpy-like interface: stack(tensors, axis=0)."""
# Testing against old interface
warnings.simplefilter('always', DeprecationWarning)
a = tensor.imatrix('a')
b = tensor.imatrix('b')
s1 = stack(a, b)
s2 = stack([a, b])
f = function([a, b], [s1, s2], mode=self.mode)
v1, v2 = f([[1, 2]], [[3, 4]])
self.assertTrue(v1.shape == v2.shape)
self.assertTrue(numpy.all(v1 == v2))
# Testing axis parameter
s3 = stack([a, b], 1)
f = function([a, b], s3, mode=self.mode)
v3 = f([[1, 2]], [[3, 4]])
v4 = numpy.array([[[1, 2], [3, 4]]])
self.assertTrue(v3.shape == v4.shape)
self.assertTrue(numpy.all(v3 == v4))
# Testing negative axis
v1 = [[1, 2, 3], [4, 5, 6]]
v2 = [[7, 8, 9], [10, 11, 12]]
s = stack([a, b], axis=-1)
f = function([a, b], s, mode=self.mode)
v = numpy.zeros((2, 3, 2))
v[:,:,0] = v1
v[:,:,1] = v2
out = f(v1, v2)
self.assertTrue(v.shape == out.shape)
self.assertTrue(numpy.all(v == out))
s = stack([a, b], axis=-2)
f = function([a, b], s, mode=self.mode)
v = numpy.zeros((2, 2, 3))
v[:,0,:] = v1
v[:,1,:] = v2
out = f(v1, v2)
self.assertTrue(v.shape == out.shape)
self.assertTrue(numpy.all(v == out))
# Testing out-of-bounds axis
self.assertRaises(IndexError, stack, [a, b], 4)
self.assertRaises(IndexError, stack, [a, b], -4)
# Testing depreciation warning
with warnings.catch_warnings(record=True) as w:
s = stack(a, b)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
with warnings.catch_warnings(record=True) as w:
s = stack([a, b])
s = stack([a, b], 1)
s = stack([a, b], axis=1)
s = stack(tensors=[a, b])
s = stack(tensors=[a, b], axis=1)
assert not w
def test_stack_hessian(self):
# Test the gradient of stack when used in hessian, see gh-1589
a = tensor.dvector('a')
b = tensor.dvector('b')
A = stack([a, b])
B = A.T.dot(A)
Ha, Hb = hessian(B.sum(), [a, b])
# Try some values
a_v = numpy.random.rand(4)
b_v = numpy.random.rand(4)
f = theano.function([a, b], [Ha, Hb])
Ha_v, Hb_v = f(a_v, b_v)
# The Hessian is always a matrix full of 2
assert Ha_v.shape == (4, 4)
assert Hb_v.shape == (4, 4)
assert numpy.allclose(Ha_v, 2.)
assert numpy.allclose(Hb_v, 2.)
def test_stack_hessian2(self):
# Test the hessian macro when the gradient itself does not depend
# on the input (but the cost does)
a = tensor.dvector('a')
b = tensor.dvector('b')
A = stack([a, b])
Ha, Hb = hessian(A.sum(), [a, b])
# Try some values
a_v = numpy.random.rand(4)
b_v = numpy.random.rand(4)
f = theano.function([a, b], [Ha, Hb])
Ha_v, Hb_v = f(a_v, b_v)
# The Hessian is always a matrix full of 0
assert Ha_v.shape == (4, 4)
assert Hb_v.shape == (4, 4)
assert numpy.allclose(Ha_v, 0.)
assert numpy.allclose(Hb_v, 0.)
def test_join_concatenate_one_element(self):
''' Fast test of concatenate as this is an alias for join.
also test that we remove the Join op if there is only 1 input'''
m = tensor.fmatrix()
c = tensor.concatenate([m])
f = theano.function(inputs=[m], outputs=[c],
mode=self.mode.including('local_join_1'))
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, DeepCopyOp)
def test_join_vector(self):
a = self.shared(numpy.array([1, 2, 3], dtype=self.floatX))
b = as_tensor_variable(numpy.array([7, 8, 9], dtype=self.floatX))
s = join(0, a, b)
want = numpy.array([1, 2, 3, 7, 8, 9])
out = self.eval_outputs_and_check_join([s])
self.assertTrue((out == want).all())
def test_roll(self):
for get_shift in [lambda a:a, lambda x:theano.shared(x)]:
# Test simple 1D example
a = self.shared(numpy.array([1, 2, 3, 4, 5, 6], dtype=self.floatX))
b = roll(a, get_shift(2))
want = numpy.array([5, 6, 1, 2, 3, 4])
out = theano.function([], b)()
assert (out == want).all()
# Test simple 1D example with explicit 0 axis
b = roll(a, get_shift(-1), 0)
want = numpy.array([2, 3, 4, 5, 6, 1])
out = theano.function([], b)()
assert (out == want).all()
# Test 2D example - ensure that behavior matches numpy.roll behavior
a = self.shared(numpy.arange(21).reshape((3, 7)).astype(self.floatX))
b = roll(a, get_shift(-2), 1)
want = numpy.roll(a.get_value(borrow=True), -2, 1)
out = theano.function([], b)()
assert (out == want).all()
# Test rolling on axis 0
want = numpy.roll(a.get_value(borrow=True), -2, 0)
b = roll(a, get_shift(-2), 0)
out = theano.function([], b)()
assert (out == want).all()
# Test rolling on default axis with ndim > 1
want = numpy.roll(a.get_value(borrow=True), 2)
b = roll(a, get_shift(2))
out = theano.function([], b)()
assert (out == want).all()
# Test rolling on axis 0 with a positive shift that is
# larger than axis size
want = numpy.roll(a.get_value(borrow=True), 4, 0)
b = roll(a, get_shift(4), 0)
out = theano.function([], b)()
assert (out == want).all()
# Test rolling on axis 0 with a negative shift that is
# larger than axis size
want = numpy.roll(a.get_value(borrow=True), -4, 0)
b = roll(a, get_shift(-4), 0)
out = theano.function([], b)()
assert (out == want).all()
def test_stack_vector(self):
a = self.shared(numpy.array([1, 2, 3], dtype=self.floatX))
b = as_tensor_variable(numpy.array([7, 8, 9], dtype=self.floatX))
s = stack([a, b])
want = numpy.array([[1, 2, 3], [7, 8, 9]])
out = self.eval_outputs_and_check_join([s])
self.assertTrue((out == want).all())
def test_join_matrix0(self):
a = self.shared(numpy.array([[1, 2, 3], [4, 5, 6]],
dtype=self.floatX))
b = as_tensor_variable(numpy.array([[7, 8, 9]], dtype=self.floatX))
s = join(0, a, b)
want = numpy.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
out = self.eval_outputs_and_check_join([s])
self.assertTrue((out == want).all())
def test_join_matrix1(self):
av = numpy.array([[.1, .2, .3], [.4, .5, .6]], dtype='float32')
bv = numpy.array([[.7], [.8]], dtype='float32')
a = self.shared(av)
b = as_tensor_variable(bv)
s = join(1, a, b)
want = numpy.array([[.1, .2, .3, .7], [.4, .5, .6, .8]],
dtype='float32')
out = self.eval_outputs_and_check_join([s])
self.assertTrue((out == want).all())
utt.verify_grad(lambda a, b: join(1, a, b), [av, bv],
mode=self.mode)
def test_join_matrix_dtypes(self):
if "float32" in self.shared.__name__:
raise SkipTest(
"The shared variable constructor"
" need to support other dtype then float32")
# Test mixed dtype. There was a bug that caused crash in the past.
av = numpy.array([[1, 2, 3], [4, 5, 6]], dtype='int8')
bv = numpy.array([[7], [8]], dtype='float32')
a = self.shared(av)
b = as_tensor_variable(bv)
s = join(1, a, b)
want = numpy.array([[1, 2, 3, 7], [4, 5, 6, 8]], dtype='float32')
out = self.eval_outputs_and_check_join([s])
self.assertTrue((out == want).all())
grad(s.sum(), b)
grad(s.sum(), a)
utt.verify_grad(lambda b: join(1, a, b), [bv],
eps=1.0e-2, mode=self.mode)
def test_join_matrix_ints(self):
if "float32" in self.shared.__name__:
raise SkipTest(
"The shared variable constructor"
" need to support other dtype then float32")
# Test mixed dtype. There was a bug that caused crash in the past.
av = numpy.array([[1, 2, 3], [4, 5, 6]], dtype='int8')
bv = numpy.array([[7], [8]], dtype='int32')
a = self.shared(av)
b = as_tensor_variable(bv)
s = join(1, a, b)
want = numpy.array([[1, 2, 3, 7], [4, 5, 6, 8]], dtype='float32')
out = self.eval_outputs_and_check_join([s])
self.assertTrue((out == want).all())
assert (numpy.asarray(grad(s.sum(), b).eval()) == 0).all()
assert (numpy.asarray(grad(s.sum(), a).eval()) == 0).all()
def test_join_matrix1_using_vertical_stack(self):
a = self.shared(numpy.array([[1, 2, 3], [4, 5, 6]], dtype=self.floatX))
b = as_tensor_variable(numpy.array([[7, 8, 9]], dtype=self.floatX))
c = as_tensor_variable(numpy.array([[9, 8, 7]], dtype=self.floatX))
s = vertical_stack(a, b, c)
want = numpy.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [9, 8, 7]])
out = self.eval_outputs_and_check_join([s])
self.assertTrue((out == want).all())
def test_join_matrix1_using_horizontal_stack(self):
av = numpy.array([[.1, .2, .3], [.4, .5, .6]], dtype='float32')
bv = numpy.array([[.7], [.8]], dtype='float32')
cv = numpy.array([[.3, .2, .1], [.6, .5, .4]], dtype='float32')
a = self.shared(av)
b = as_tensor_variable(bv)
c = as_tensor_variable(cv)
s = horizontal_stack(a, b, c)
want = numpy.array([[.1, .2, .3, .7, .3, .2, .1],
[.4, .5, .6, .8, .6, .5, .4]],
dtype='float32')
out = self.eval_outputs_and_check_join([s])
self.assertTrue((out == want).all())
utt.verify_grad(lambda a, b: join(1, a, b), [av, bv],
mode=self.mode)
def test_join_matrixV(self):
"""variable join axis"""
v = numpy.array([[.1, .2, .3], [.4, .5, .6]], dtype=self.floatX)
a = self.shared(v)
b = as_tensor_variable(v)
ax = lscalar()
s = join(ax, a, b)
f = inplace_func([ax], [s], mode=self.mode)
topo = f.maker.fgraph.toposort()
assert [True for node in topo
if isinstance(node.op, type(self.join_op))]
want = numpy.array([[.1, .2, .3], [.4, .5, .6],
[.1, .2, .3], [.4, .5, .6]])
got = f(0)
assert numpy.allclose(got, want)
want = numpy.array([[.1, .2, .3, .1, .2, .3],
[.4, .5, .6, .4, .5, .6]])
got = f(1)
assert numpy.allclose(got, want)
utt.verify_grad(lambda a, b: join(0, a, b), [v, 2 * v], mode=self.mode)
utt.verify_grad(lambda a, b: join(1, a, b), [v, 2 * v], mode=self.mode)
def test_join_matrixV_negative_axis(self):
"""variable join negative axis"""
v = numpy.array([[.1, .2, .3], [.4, .5, .6]], dtype=self.floatX)
a = self.shared(v)
b = as_tensor_variable(v)
ax = lscalar()
s = join(ax, a, b)
f = inplace_func([ax], [s], mode=self.mode)
topo = f.maker.fgraph.toposort()
assert [True for node in topo
if isinstance(node.op, type(self.join_op))]
want = numpy.array([[.1, .2, .3, .1, .2, .3],
[.4, .5, .6, .4, .5, .6]])
got = f(-1)
assert numpy.allclose(got, want)
want = numpy.array([[.1, .2, .3], [.4, .5, .6],
[.1, .2, .3], [.4, .5, .6]])
got = f(-2)
assert numpy.allclose(got, want)
self.assertRaises(IndexError, f, -3)
def test_join_matrixC_negative_axis(self):
"""constant join negative axis"""
v = numpy.array([[.1, .2, .3], [.4, .5, .6]], dtype=self.floatX)
a = self.shared(v)
b = as_tensor_variable(v)
s = join(-1, a, b)
f = theano.function([], [s], mode=self.mode)
topo = f.maker.fgraph.toposort()
assert [True for node in topo
if isinstance(node.op, type(self.join_op))]
want = numpy.array([[.1, .2, .3, .1, .2, .3],
[.4, .5, .6, .4, .5, .6]])
got = f()
assert numpy.allclose(got, want)
s = join(-2, a, b)
f = theano.function([], [s], mode=self.mode)
topo = f.maker.fgraph.toposort()
assert [True for node in topo
if isinstance(node.op, type(self.join_op))]
want = numpy.array([[.1, .2, .3], [.4, .5, .6],
[.1, .2, .3], [.4, .5, .6]])
got = f()
assert numpy.allclose(got, want)
self.assertRaises(IndexError, join, -3, a, b)
utt.verify_grad(lambda a, b: join(-1, a, b), [v, 2 * v],
mode=self.mode)
def test_vector_len(self):
x = lscalar('x')
y = dscalar('y')
triple = as_tensor_variable((x, y, 9.0))
assert 3 == get_vector_length(triple)
a, b, c = triple
f = function([x, y], [b, c, a], mode=self.mode)
topo = f.maker.fgraph.toposort()
assert [True for node in topo if isinstance(node.op, opt.MakeVector)]
assert numpy.allclose(f(4, 5), [5, 9, 4])
def test_broadcastable_flag_assignment_mixed_otheraxes(self):
"""
Test that the broadcastable flags for the output of
a join operation on non-join axes are True if one or
more inputs is broadcastable on that dimension.
"""
rng = numpy.random.RandomState(seed=utt.fetch_seed())
a_val = rng.rand(1, 4, 1).astype(self.floatX)
b_val = rng.rand(1, 3, 1).astype(self.floatX)
a = self.shared(a_val, broadcastable=(False, False, True))
b = self.shared(b_val, broadcastable=(True, False, True))
c = self.join_op(1, a, b)
assert c.type.broadcastable[0] and c.type.broadcastable[2]
assert not c.type.broadcastable[1]
# Opt can remplace the int by a Theano constant
c = self.join_op(theano.tensor.constant(1), a, b)
assert c.type.broadcastable[0] and c.type.broadcastable[2]
assert not c.type.broadcastable[1]
# In case futur opt insert other useless stuff
c = self.join_op(theano.tensor.cast(theano.tensor.constant(1),
dtype="int32"),
a, b)
assert c.type.broadcastable[0] and c.type.broadcastable[2]
assert not c.type.broadcastable[1]
f = function([], c, mode=self.mode)
topo = f.maker.fgraph.toposort()
assert [True for node in topo
if isinstance(node.op, type(self.join_op))]
f()
utt.verify_grad((lambda a, b: join(1, a, b)), [a_val, b_val], rng=rng,
mode=self.mode)
# Should raise an error if dimension 0 does not match
a.set_value(rng.rand(2, 4, 1).astype(self.floatX))
self.assertRaises(ValueError, f)
def test_broadcastable_flag_assignment_mixed_thisaxes(self):
"""
Test that the broadcastable flag of the join axis
is False when some inputs are broadcastable on that
dimension.
"""
rng = numpy.random.RandomState(seed=utt.fetch_seed())
a_val = rng.rand(2, 4, 1).astype(self.floatX)
b_val = rng.rand(1, 4, 1).astype(self.floatX)
a = self.shared(a_val, broadcastable=(False, False, True))
b = self.shared(b_val, broadcastable=(True, False, True))
c = self.join_op(0, a, b)
assert not c.type.broadcastable[0]
f = function([], c, mode=self.mode)
topo = f.maker.fgraph.toposort()
assert [True for node in topo
if isinstance(node.op, type(self.join_op))]
f()
utt.verify_grad((lambda a, b: join(0, a, b)), [a_val, b_val], rng=rng,
mode=self.mode)
# Should raise an error if b_val.shape[0] is not 1
# We can't set the value|
self.assertRaises(TypeError, b.set_value,
rng.rand(3, 4, 1).astype(self.floatX))
a = TensorType(dtype=self.floatX, broadcastable=[0, 0, 1])()
b = TensorType(dtype=self.floatX, broadcastable=[1, 0, 1])()
c = self.join_op(0, a, b)
f = function([a, b], c, mode=self.mode)
bad_b_val = rng.rand(3, 4, 1).astype(self.floatX)
self.assertRaises(TypeError, f, a_val, bad_b_val)
def test_broadcastable_flags_all_broadcastable_on_joinaxis(self):
"""
Test that joining together several inputs which are all
broadcastable on the join dimension results in the output
being non-broadcastable on the join dimension.
"""
rng = numpy.random.RandomState(seed=utt.fetch_seed())
a_val = rng.rand(1, 4, 1).astype(self.floatX)
b_val = rng.rand(1, 4, 1).astype(self.floatX)
a = self.shared(a_val, broadcastable=(True, False, True))
b = self.shared(b_val, broadcastable=(True, False, True))
c = self.join_op(0, a, b)
assert not c.type.broadcastable[0]
f = function([], c, mode=self.mode)
topo = f.maker.fgraph.toposort()
assert [True for node in topo
if isinstance(node.op, type(self.join_op))]
f()
utt.verify_grad((lambda a, b: join(0, a, b)), [a_val, b_val], rng=rng,
mode=self.mode)
def test_broadcastable_single_input_broadcastable_dimension(self):
# Test that all broadcastable flags are preserved by a
# single-input join.
rng = numpy.random.RandomState(seed=utt.fetch_seed())
a_val = rng.rand(1, 4, 1).astype(self.floatX)
a = self.shared(a_val, broadcastable=(True, False, True))
b = self.join_op(0, a)
assert b.type.broadcastable[0]
assert b.type.broadcastable[2]
assert not b.type.broadcastable[1]
f = function([], b, mode=self.mode)
topo = f.maker.fgraph.toposort()
if theano.config.mode != 'FAST_COMPILE':
assert not [True for node in topo
if isinstance(node.op, type(self.join_op))]
f()
utt.verify_grad((lambda a: join(0, a)), [a_val], rng=rng,
mode=self.mode)
# Should raise an error if length of dimension 0 is not 1
self.assertRaises(TypeError, a.set_value,
rng.rand(2, 4, 1).astype(self.floatX))
#self.assertRaises(TypeError, f, bad_a_val)
def test_broadcastable_flags_many_dims_and_inputs(self):
# Test that the right broadcastable flags get set for a join
# with many inputs and many input dimensions.
a = TensorType(dtype=self.floatX, broadcastable=[1, 0, 1, 0, 0, 0])()
b = TensorType(dtype=self.floatX, broadcastable=[1, 1, 1, 0, 0, 0])()
c = TensorType(dtype=self.floatX, broadcastable=[1, 0, 0, 0, 0, 0])()
d = TensorType(dtype=self.floatX, broadcastable=[1, 0, 1, 1, 0, 1])()
e = TensorType(dtype=self.floatX, broadcastable=[1, 0, 1, 0, 0, 1])()
f = self.join_op(0, a, b, c, d, e)
fb = f.type.broadcastable
assert not fb[0] and fb[1] and fb[2] and fb[3] and not fb[4] and fb[5]
g = self.join_op(1, a, b, c, d, e)
gb = g.type.broadcastable
assert gb[0] and not gb[1] and gb[2] and gb[3] and not gb[4] and gb[5]
h = self.join_op(4, a, b, c, d, e)
hb = h.type.broadcastable
assert hb[0] and hb[1] and hb[2] and hb[3] and not hb[4] and hb[5]
f = function([a, b, c, d, e], f, mode=self.mode)
topo = f.maker.fgraph.toposort()
assert [True for node in topo
if isinstance(node.op, type(self.join_op))]
rng = numpy.random.RandomState(seed=utt.fetch_seed())
a_val = rng.rand(1, 1, 1, 1, 2, 1).astype(self.floatX)
b_val = rng.rand(1, 1, 1, 1, 2, 1).astype(self.floatX)
c_val = rng.rand(1, 1, 1, 1, 2, 1).astype(self.floatX)
d_val = rng.rand(1, 1, 1, 1, 2, 1).astype(self.floatX)
e_val = rng.rand(1, 1, 1, 1, 2, 1).astype(self.floatX)
f(a_val, b_val, c_val, d_val, e_val)
utt.verify_grad((lambda a, b, c, d, e: join(0, a, b, c, d, e)),
[a_val, b_val, c_val, d_val, e_val], rng=rng,
mode=self.mode)
# Should raise an error if length of dimension 0 is not 1
bad_val = rng.rand(2, 1, 1, 1, 2, 1).astype(self.floatX)
self.assertRaises(TypeError, f, bad_val, b_val, c_val, d_val, e_val)
self.assertRaises(TypeError, f, a_val, bad_val, c_val, d_val, e_val)
self.assertRaises(TypeError, f, a_val, b_val, bad_val, d_val, e_val)
self.assertRaises(TypeError, f, a_val, b_val, c_val, bad_val, e_val)
self.assertRaises(TypeError, f, a_val, b_val, c_val, d_val, bad_val)
# Should raise an error if any dimension other than 4 has length != 1
bad_a_val = rng.rand(1, 2, 1, 1, 2, 1).astype(self.floatX)
bad_b_val = rng.rand(1, 1, 1, 1, 2, 2).astype(self.floatX)
bad_c_val = rng.rand(1, 1, 2, 1, 2, 1).astype(self.floatX)
bad_d_val = rng.rand(1, 2, 1, 1, 2, 1).astype(self.floatX)
bad_e_val = rng.rand(1, 1, 1, 2, 2, 1).astype(self.floatX)
self.assertRaises(ValueError, f, bad_a_val, b_val, c_val, d_val, e_val)
self.assertRaises(ValueError, f, a_val, bad_b_val, c_val, d_val, e_val)
self.assertRaises(ValueError, f, a_val, b_val, bad_c_val, d_val, e_val)
self.assertRaises(ValueError, f, a_val, b_val, c_val, bad_d_val, e_val)
self.assertRaises(ValueError, f, a_val, b_val, c_val, d_val, bad_e_val)
def test_infer_shape_join(self):
def get_mat(s1, s2):
return numpy.asarray(numpy.random.uniform(size=(s1, s2)),
dtype=self.floatX)
x1 = self.shared(get_mat(3, 4))
x2 = self.shared(get_mat(2, 4))
x3 = self.shared(get_mat(1, 4))
# Test dim 0
z = self.join_op(0, x1, x2, x3)
f = theano.function([], z.shape, mode=self.mode)
topo = f.maker.fgraph.toposort()
out = f()
assert (out == [6, 4]).all()
if theano.config.mode != 'FAST_COMPILE':
for node in f.maker.fgraph.toposort():
assert not isinstance(node.op, type(self.join_op))
# Test dim 1
z = self.join_op(1, x1, x2, x3)
f = theano.function([], z.shape, mode=self.mode)
topo = f.maker.fgraph.toposort()
x1.set_value(get_mat(3, 4))
x2.set_value(get_mat(3, 4))
x3.set_value(get_mat(3, 5))
out = f()
assert (out == [3, 13]).all()
if theano.config.mode != 'FAST_COMPILE':
for node in topo:
assert not isinstance(node.op, type(self.join_op))
# Test hide error
x1.set_value(get_mat(3, 4))
x2.set_value(get_mat(3, 4))
x3.set_value(get_mat(2, 5))
if not self.hide_error:
self.assertRaises(ValueError, f)
else:
f()
def test_rebroadcast(self):
# Regression test for a crash that used to happen when rebroadcasting.
x = tensor.TensorType(self.floatX, [False, False, True])()
u = tensor.TensorType(self.floatX, [False, False, True])()
# This line used to crash.
z = tensor.concatenate([x, -u], axis=2)
def test_concatenate_same(self):
# Test that we can concatenate the same tensor multiple time.
# In the past it was broken on the GPU.
rng = numpy.random.RandomState(seed=utt.fetch_seed())
T_shared = self.shared(rng.rand(3, 4).astype(self.floatX))
Tout = tensor.concatenate([T_shared, T_shared])
f = function([], Tout, mode=self.mode)
out = f()
if theano.config.mode != 'FAST_COMPILE':
assert [True for node in f.maker.fgraph.toposort()
if isinstance(node.op, type(self.join_op))]
assert numpy.allclose(out,
numpy.concatenate([T_shared.get_value(),
T_shared.get_value()]))
def test_mixed_ndim_error(self):
rng = numpy.random.RandomState(seed=utt.fetch_seed())
v = self.shared(rng.rand(4).astype(self.floatX))
m = self.shared(rng.rand(4, 4).astype(self.floatX))
self.assertRaises(TypeError, self.join_op, 0, v, m)
def test_split_0elem(self):
rng = numpy.random.RandomState(seed=utt.fetch_seed())
m = self.shared(rng.rand(4, 6).astype(self.floatX))
o = self.split_op_class(2)(m, 0, [4, 0])
f = function([], o, mode=self.mode)
assert any([isinstance(node.op, self.split_op_class)
for node in f.maker.fgraph.toposort()])
o1, o2 = f()
assert numpy.allclose(o1, m.get_value(borrow=True))
assert numpy.allclose(o2, m.get_value(borrow=True)[4:])
def test_split_neg(self):
rng = numpy.random.RandomState(seed=utt.fetch_seed())
m = self.shared(rng.rand(4, 6).astype(self.floatX))
o = self.split_op_class(2)(m, 0, [5, -1])
f = function([], o, mode=self.mode)
assert any([isinstance(node.op, self.split_op_class)
for node in f.maker.fgraph.toposort()])
self.assertRaises(ValueError, f)
class test_comparison(unittest.TestCase):
"""Test <, >, <=, >=, == and !=
Test that we can do the comparison with different
combination of tensor(shared and constant variable) with
ndarray. ndarray cmp tensor was crashing. In a NumPy PR (should
be in the release 1.8 of NumPy), it will work. So we assert it
work(futur behavior) or raise an error(current NumPy release).
"""
def setUp(self):
utt.seed_rng()
self.mode = None
self.shared = shared
self.dtypes = ['float64', 'float32', 'complex64', 'complex128']
def inplace_func(self, inputs, outputs, check_isfinite=None):
mode = self.mode
if check_isfinite is False:
if mode is None:
mode = get_default_mode()
mode.check_isfinite = False
f = inplace_func(inputs, outputs, mode=mode)
return f
def test_gt(self):
for dtype in self.dtypes:
l = numpy.asarray([0., -1., 1.], dtype=dtype)
r = numpy.asarray([0., 1., -1.], dtype=dtype)
for x, y, err in [
(self.shared(l.astype(dtype)),
self.shared(r.astype(dtype)), False),
(l, self.shared(r.astype(dtype)), True),
(tensor.constant(l), self.shared(r.astype(dtype)), False),
(self.shared(l.astype(dtype)), r, False),
(self.shared(l.astype(dtype)), tensor.constant(r), False),
]:
try:
fn = self.inplace_func([], x > y)
v = fn()
self.assertTrue(numpy.all(v == (l > r)), (v, (l > r)))
except TypeError:
assert err
def test_lt(self):
for dtype in self.dtypes:
l = numpy.asarray([0., -1., 1.], dtype=dtype)
r = numpy.asarray([0., 1., -1.], dtype=dtype)
for x, y, err in [
(self.shared(l.astype(dtype)), self.shared(r.astype(dtype)), False),
(l, self.shared(r.astype(dtype)), True),
(tensor.constant(l), self.shared(r.astype(dtype)), False),
(self.shared(l.astype(dtype)), r, False),
(self.shared(l.astype(dtype)), tensor.constant(r), False),
]:
try:
fn = self.inplace_func([], x < y)
v = fn()
self.assertTrue(numpy.all(v == (l < r)), (v, (l < r)))
except TypeError:
assert err
def test_le(self):
for dtype in self.dtypes:
l = numpy.asarray([0., -1., 1.], dtype=dtype)
r = numpy.asarray([0., 1., -1.], dtype=dtype)
for x, y, err in [
(self.shared(l.astype(dtype)),
self.shared(r.astype(dtype)), False),
(l, self.shared(r.astype(dtype)), True),
(tensor.constant(l), self.shared(r.astype(dtype)), False),
(self.shared(l.astype(dtype)), r, False),
(self.shared(l.astype(dtype)), tensor.constant(r), False),
]:
try:
fn = self.inplace_func([], x <= y)
v = fn()
self.assertTrue(numpy.all(v == (l <= r)), (v, (l <= r)))
except TypeError:
assert err
def test_ge(self):
for dtype in self.dtypes:
l = numpy.asarray([0., -1., 1.], dtype=dtype)
r = numpy.asarray([0., 1., -1.], dtype=dtype)
for x, y, err in [
(self.shared(l.astype(dtype)),
self.shared(r.astype(dtype)), False),
(l, self.shared(r.astype(dtype)), True),
(tensor.constant(l), self.shared(r.astype(dtype)), False),
(self.shared(l.astype(dtype)), r, False),
(self.shared(l.astype(dtype)), tensor.constant(r), False),
]:
try:
fn = self.inplace_func([], x >= y)
v = fn()
self.assertTrue(numpy.all(v == (l >= r)), (v, (l >= r)))
except TypeError:
assert err
def test_eq(self):
for dtype in self.dtypes:
l = numpy.asarray([0., -1., 1.], dtype=dtype)
r = numpy.asarray([0., 1., -1.], dtype=dtype)
for x, y, err in [
(self.shared(l.astype(dtype)),
self.shared(r.astype(dtype)), False),
(l, self.shared(r.astype(dtype)), True),
(tensor.constant(l), self.shared(r.astype(dtype)), False),
(self.shared(l.astype(dtype)), r, False),
(self.shared(l.astype(dtype)), tensor.constant(r), False),
]:
try:
fn = self.inplace_func([], eq(x, y))
v = fn()
self.assertTrue(numpy.all(v == (l == r)), (v, (l == r)))
except TypeError:
assert err
def test_neq(self):
for dtype in self.dtypes:
l = numpy.asarray([0., -1., 1.], dtype=dtype)
r = numpy.asarray([0., 1., -1.], dtype=dtype)
for x, y, err in [
(self.shared(l.astype(dtype)),
self.shared(r.astype(dtype)), False),
(l, self.shared(r.astype(dtype)), True),
(tensor.constant(l), self.shared(r.astype(dtype)), False),
(self.shared(l.astype(dtype)), r, False),
(self.shared(l.astype(dtype)), tensor.constant(r), False),
]:
try:
fn = self.inplace_func([], neq(x, y))
v = fn()
self.assertTrue(numpy.all(v == (l != r)), (v, (l != r)))
except TypeError:
assert err
def test_isclose(self):
for dtype in self.dtypes:
l = numpy.asarray(
[0., 1., -1., 0.,
numpy.nan, numpy.inf, -numpy.inf, numpy.inf],
dtype=dtype)
r = numpy.asarray(
[0., 1.0001, -1.000000000001, numpy.nan,
numpy.nan, numpy.inf, numpy.inf, 0.],
dtype=dtype)
for x, y, err in [
(self.shared(l.astype(dtype)),
self.shared(r.astype(dtype)), False),
(l, self.shared(r.astype(dtype)), True),
(constant(l), self.shared(r.astype(dtype)), False),
(self.shared(l.astype(dtype)), r, False),
(self.shared(l.astype(dtype)), constant(r), False),
]:
try:
o1 = isclose(x, y, equal_nan=False)
fn1 = self.inplace_func([], o1, check_isfinite=False)
o2 = isclose(x, y, equal_nan=True)
fn2 = self.inplace_func([], o2, check_isfinite=False)
v1 = fn1()
v2 = fn2()
self.assertTrue(
numpy.all(
v1 == numpy.asarray(
[True, False, True, False,
False, True, False, False],
dtype="bool"
)
),
numpy.all(
v2 == numpy.asarray(
[True, False, True, False,
True, True, False, False],
dtype="bool"
)
)
)
except TypeError:
if not dtype.startswith('complex'):
raise
assert err
def test_allclose(self):
# equal_nan argument not in current version of numpy allclose,
# force it to False.
for dtype in self.dtypes:
l = numpy.asarray(
[0., 1., -1., 0.,
numpy.nan, numpy.inf, -numpy.inf, numpy.inf],
dtype=dtype)
r = numpy.asarray(
[0., 1.0001, -1.000000000001, numpy.nan,
numpy.nan, numpy.inf, numpy.inf, 0.],
dtype=dtype)
for x, y, err in [
(self.shared(l.astype(dtype)),
self.shared(r.astype(dtype)), False),
(l, self.shared(r.astype(dtype)), True),
(constant(l), self.shared(r.astype(dtype)), False),
(self.shared(l.astype(dtype)), r, False),
(self.shared(l.astype(dtype)), constant(r), False),
]:
try:
fn = self.inplace_func([], allclose(x, y, equal_nan=False),
check_isfinite=False)
v = fn()
self.assertTrue(numpy.all(v == numpy.allclose(l, r)))
except TypeError:
if not dtype.startswith('complex'):
assert err
class test_bitwise(unittest.TestCase):
dtype = ['int8', 'int16', 'int32', 'int64', ]
def test_or(self):
for dtype in self.dtype:
x, y = vector(dtype=dtype), vector(dtype=dtype)
fn = inplace_func([x, y], x | y)
l = theano._asarray([0, 0, 1, 1], dtype=dtype)
r = theano._asarray([0, 1, 0, 1], dtype=dtype)
v = fn(l, r)
self.assertTrue(numpy.all(v == (operator.or_(l, r))), (l, r, v))
def test_xor(self):
for dtype in self.dtype:
x, y = vector(dtype=dtype), vector(dtype=dtype)
fn = inplace_func([x, y], x ^ y)
ix = x
ix = inplace.xor_inplace(ix, y)
gn = inplace_func([x, y], ix)
l = theano._asarray([0, 0, 1, 1], dtype=dtype)
r = theano._asarray([0, 1, 0, 1], dtype=dtype)
v = fn(l, r)
self.assertTrue(numpy.all(v == (operator.xor(l, r))), (l, r, v))
v = gn(l, r)
# test the in-place stuff
self.assertTrue(numpy.all(l == numpy.asarray([0, 1, 1, 0])), l)
def test_and(self):
for dtype in self.dtype:
x, y = vector(dtype=dtype), vector(dtype=dtype)
fn = inplace_func([x, y], x & y)
l = theano._asarray([0, 0, 1, 1], dtype=dtype)
r = theano._asarray([0, 1, 0, 1], dtype=dtype)
v = fn(l, r)
self.assertTrue(numpy.all(v == (operator.and_(l, r))), (l, r, v))
def test_inv(self):
for dtype in self.dtype:
x = vector(dtype=dtype)
fn = inplace_func([x], ~x)
for l in [[0, 0, 1, 1], [0, 1, 0, 1],
[0, 0, 1, 1], [0, 1, 0, 1],
[-1, 2 ** 16, 2 ** 16 - 1]
]:
l = theano._asarray([0, 0, 1, 1], dtype=dtype)
v = fn(l)
self.assertTrue(numpy.all(v == (~l)), (l, v))
def test_eye(self):
n = iscalar()
m = iscalar()
k = iscalar()
fn = theano.function([m, n, k], eye(m, n, k))
self.assertTrue(numpy.all(fn(5, 6, 1) == numpy.eye(5, 6, 1)))
class T_add(unittest.TestCase):
def setUp(self):
utt.seed_rng()
def test_complex_all_ops(self):
for nbits in (64, 128):
a = shared(numpy.ones(3, dtype='complex%i' % nbits) + 0.5j)
b = shared(numpy.ones(3, dtype='complex%i' % nbits) + 1.5j)
tests = (("+", lambda x, y: x + y),
("-", lambda x, y: x - y),
("*", lambda x, y: x * y),
("/", lambda x, y: x / y))
for s, fn in tests:
f = inplace_func([], fn(a, b))
# print 'valid output:', fn(a.data, b.data)
# print 'theano output:', f(a.data, b.data)
self.assertTrue(a.type.values_eq_approx(fn(
a.get_value(), b.get_value()), f()))
def test_grad_scalar_l(self):
utt.verify_grad(add, [numpy.asarray([3.0]), rand(3)])
def test_grad_scalar_r(self):
utt.verify_grad(add, [rand(3), numpy.asarray([3.0])])
def test_grad_row(self):
utt.verify_grad(add, [rand(3, 5), rand(1, 5)])
def test_grad_col(self):
utt.verify_grad(add, [rand(3, 5), rand(3, 1)])
class T_ceil(unittest.TestCase):
def test_complex(self):
self.assertRaises(TypeError, tensor.ceil, tensor.zvector())
class T_exp(unittest.TestCase):
def test_grad_0(self):
utt.verify_grad(exp, [
numpy.asarray([[1.5089518, 1.48439076, -4.7820262],
[2.04832468, 0.50791564, -1.58892269]])])
def test_grad_1(self):
utt.verify_grad(inplace.exp_inplace, [
numpy.asarray([[1.5089518, 1.48439076, -4.7820262],
[2.04832468, 0.50791564, -1.58892269]])])
def test_int(self):
x = ivector()
f = function([x], exp(x))
exp_3 = f([3])
assert exp_3.dtype == 'float64'
def test_complex(self):
x = zvector()
assert exp(x).dtype == 'complex128'
f = function([x], exp(x))
exp_3 = f([3 + 2j])
assert numpy.allclose(exp_3, numpy.exp(3 + 2j))
class T_divimpl(unittest.TestCase):
def test_impls(self):
i = iscalar()
ii = lscalar()
d = dscalar()
f = fscalar()
c = cscalar()
assert numpy.allclose(function([i, d], i / d)(5, 7.0), (5.0 / 7.0))
assert numpy.allclose(function([i, d], d / i)(5, 7.0), (7.0 / 5.0))
assert numpy.allclose(function([i, f], i / f)(5, 11.0), (5.0 / 11.0))
assert numpy.allclose(function([i, f], f / i)(5, 11.0), (11.0 / 5.0))
assert numpy.allclose(function([i, ii], i // ii)(5, 3), (5 // 3))
assert numpy.allclose(function([i, ii], ii // i)(5, 3), (3 // 5))
assert numpy.allclose(function([i, ii], true_div(i, ii))(5, 3),
(5. / 3.))
assert numpy.allclose(function([i, ii], true_div(ii, i))(5, 3),
(3. / 5.))
assert numpy.allclose(function([i, c], i / c)(5, numpy.complex(5, 3)),
(5. / (5 + 3j)))
assert numpy.allclose(function([i, c], c / i)(5, numpy.complex(5, 3)),
((5 + 3j) / 5.))
class T_mean(unittest.TestCase):
def test_regression_mean_of_ndarray_failure(self):
try:
tensor.mean(numpy.zeros(1))
except AttributeError:
self.fail()
def test0(self):
# Simple test...
x = tensor.vector()
f = theano.function([x], tensor.mean(x))
data = rand(50)
assert numpy.allclose(f(data), numpy.mean(data))
def test_list(self):
ll = [theano.shared(0.), theano.shared(2.)]
tensor.mean(ll).eval() == 1
class test_matinv(unittest.TestCase):
def setUp(self):
utt.seed_rng()
def mat_reciprocal(self, dim):
# symbolic program
# broadcastable=[False,False] means that the shape of matrix is two dimensional,
# and none of the dimensions are constrained to have length 1.
# Note that TensorType's constructor does not actually allocate any memory.
# TODO: Make TensorType syntax more explicit, and maybe give shape or number of dimensions.
utt.seed_rng()
a, b = matrices('ab')
ab = a * b
# Here, as_tensor_variable actually uses the data allocated by numpy.
diff = ab - as_tensor_variable(numpy.ones((dim, dim),
dtype=config.floatX))
# Sum of squared errors
ssdiff = sum((diff ** 2.0))
g_b = grad(ssdiff, b)
# compilation to function
# [a,b] are the inputs, [ssdiff,g_b] are the outputs
fn = inplace_func([a, b], [ssdiff, g_b])
# use the function
x = rand(dim, dim) + 0.1 # Initialized s.t. x is not too tiny
w = rand(dim, dim)
x = numpy.asarray(x, dtype=config.floatX)
w = numpy.asarray(w, dtype=config.floatX)
for i in xrange(100):
ssd, gw = fn(x, w)
# print ssd, x*w, x, w
if i == 0:
ssd0 = ssd
w -= 0.4 * gw
return ssd0, ssd
def test_reciprocal(self):
"""Matrix reciprocal by gradient descent"""
ssd0, ssd = self.mat_reciprocal(3)
utt.seed_rng()
# hand-coded numpy implementation for verification
x = rand(3, 3) + 0.1
w = rand(3, 3)
x = numpy.asarray(x, dtype=config.floatX)
w = numpy.asarray(w, dtype=config.floatX)
ones = numpy.ones((3, 3), dtype=config.floatX)
myssd0 = numpy.sum((x * w - ones) ** 2.0)
# we want at least a test that is not too fast. So we make one here.
for i in xrange(100):
gw = 2 * (x * w - ones) * x # derivative of dMSE/dw
myssd = numpy.sum((x * w - ones) ** 2)
w -= 0.4 * gw
self.assertAlmostEqual(ssd0, myssd0)
self.assertAlmostEqual(ssd, myssd)
class t_dot(unittest.TestCase):
def setUp(self):
utt.seed_rng()
def cmp_dot(self, x, y):
# x, y are matrices or numbers
def spec(x):
x = numpy.asarray(x)
return type(x), x.dtype, x.shape
nz = numpy.dot(x, y)
tz = eval_outputs([dot(as_tensor_variable(x), as_tensor_variable(y))])
self.assertTrue(tz.dtype == nz.dtype,
(tz.dtype, tz.dtype.num, nz.dtype, nz.dtype.num))
self.assertTrue(tz.shape == nz.shape, (tz.shape, nz.shape))
self.assertTrue(_approx_eq(nz, tz))
def test_Op_dims(self):
# _dot is a Dot op instance
_dot = theano.tensor.basic._dot
d0 = scalar()
d1 = vector()
d2 = matrix()
d3 = tensor3()
self.assertRaises(TypeError, _dot, d0, d0)
self.assertRaises(TypeError, _dot, d0, d1)
self.assertRaises(TypeError, _dot, d0, d2)
self.assertRaises(TypeError, _dot, d0, d3)
self.assertRaises(TypeError, _dot, d1, d0)
_dot(d1, d1)
_dot(d1, d2)
self.assertRaises(TypeError, _dot, d1, d3)
self.assertRaises(TypeError, _dot, d2, d0)
_dot(d2, d1)
_dot(d2, d2)
self.assertRaises(TypeError, _dot, d2, d3)
self.assertRaises(TypeError, _dot, d3, d0)
self.assertRaises(TypeError, _dot, d3, d1)
self.assertRaises(TypeError, _dot, d3, d2)
self.assertRaises(TypeError, _dot, d3, d3)
def test_dot_0d_0d(self):
self.cmp_dot(rand(), rand())
def test_dot_0d_1d(self):
self.cmp_dot(rand(), rand(5))
def test_dot_0d_2d(self):
self.cmp_dot(rand(), rand(6, 7))
def test_dot_0d_3d(self):
self.cmp_dot(rand(), rand(8, 6, 7))
def test_dot_1d_0d(self):
self.cmp_dot(rand(5), rand())
def test_dot_1d_1d(self):
self.cmp_dot(rand(5), rand(5))
def test_dot_1d0_1d0(self):
self.cmp_dot(rand(0), rand(0))
# numpy return matrix not aligned...
def test_dot_1d_1d0(self):
self.assertRaises(ValueError, self.cmp_dot, rand(5), rand(0))
# numpy return matrix not aligned...
def test_dot_1d0_1d(self):
self.assertRaises(ValueError, self.cmp_dot, rand(0), rand(5))
def test_dot_1d_2d(self):
self.cmp_dot(rand(6), rand(6, 7))
def test_dot_1d0_2d(self):
self.cmp_dot(rand(0), rand(0, 7))
def test_dot_1d_2d0(self):
self.cmp_dot(rand(6), rand(6, 0))
def test_dot_1d0_2d0(self):
self.cmp_dot(rand(0), rand(0, 0))
def test_dot_1d_3d(self):
self.cmp_dot(rand(6), rand(8, 6, 7))
def test_dot_2d_0d(self):
self.cmp_dot(rand(5, 6), rand())
def test_dot_2d_1d(self):
self.cmp_dot(rand(5, 6), rand(6))
def test_dot_2d0_1d(self):
self.cmp_dot(rand(0, 6), rand(6))
def test_dot_2d_1d0(self):
self.cmp_dot(rand(5, 0), rand(0))
def test_dot_2d0_1d0(self):
self.cmp_dot(rand(0, 0), rand(0))
def test_dot_2d_2d(self):
self.cmp_dot(rand(5, 6), rand(6, 7))
def test_dot_2d0_2d(self):
self.cmp_dot(rand(0, 6), rand(6, 7))
def test_dot_2d_2d0(self):
self.cmp_dot(rand(5, 6), rand(6, 0))
def test_dot_2d0_2d0(self):
self.cmp_dot(rand(0, 6), rand(6, 0))
def test_dot_2d_0_2d(self):
self.cmp_dot(rand(5, 0), rand(0, 7))
def test_dot_2d0_0_2d0(self):
self.cmp_dot(rand(0, 6), rand(6, 0))
def test_dot_2d_3d(self):
self.cmp_dot(rand(5, 6), rand(8, 6, 7))
def test_dot_3d_0d(self):
self.cmp_dot(rand(4, 5, 6), rand())
def test_dot_3d_1d(self):
self.cmp_dot(rand(4, 5, 6), rand(6))
def test_dot_3d_2d(self):
self.cmp_dot(rand(4, 5, 6), rand(6, 7))
def test_dot_3d_3d(self):
self.cmp_dot(rand(4, 5, 6), rand(8, 6, 7))
def not_aligned(self, x, y):
ctv_backup = config.compute_test_value
config.compute_test_value = 'off'
try:
z = dot(x, y)
finally:
config.compute_test_value = ctv_backup
# constant folding will complain to _logger that things are not aligned
# this is normal, testers are not interested in seeing that output.
_logger = logging.getLogger('theano.gof.opt')
oldlevel = _logger.level
_logger.setLevel(logging.CRITICAL)
try:
try:
tz = eval_outputs([z])
assert False # should have raised exception
except ValueError as e:
e0 = exc_message(e)
self.assertTrue(
# Reported by numpy.
e0.split()[1:4] == ['are', 'not', 'aligned'] or
# Reported by blas or Theano.
e0.split()[0:2] == ['Shape', 'mismatch:'] or
# Reported by Theano perform
e0.split()[0:4]
== ['Incompatible', 'shapes', 'for', 'gemv'] or
e)
finally:
_logger.setLevel(oldlevel)
def test_align_1_1(self):
self.not_aligned(rand(5), rand(6))
def test_align_1_2(self):
self.not_aligned(rand(5), rand(6, 4))
def test_align_1_3(self):
self.not_aligned(rand(5), rand(6, 4, 7))
def test_align_2_1(self):
self.not_aligned(rand(5, 4), rand(6))
def test_align_2_2(self):
self.not_aligned(rand(5, 4), rand(6, 7))
def test_align_2_3(self):
self.not_aligned(rand(5, 4), rand(6, 7, 8))
def test_align_3_1(self):
self.not_aligned(rand(5, 4, 3), rand(6))
def test_align_3_2(self):
self.not_aligned(rand(5, 4, 3), rand(6, 7))
def test_align_3_3(self):
self.not_aligned(rand(5, 4, 3), rand(6, 7, 8))
def test_grad(self):
utt.verify_grad(dot, [rand(2, 3), rand(3, 2)])
utt.verify_grad(dot, [rand(2), rand(2, 3)])
utt.verify_grad(dot, [rand(3, 2), rand(2)])
utt.verify_grad(dot, [rand(2), rand(2)])
utt.verify_grad(dot, [rand(), rand(2)])
utt.verify_grad(dot, [rand(), rand(2, 5)])
utt.verify_grad(dot, [rand(2), rand()])
utt.verify_grad(dot, [rand(2, 5), rand()])
utt.verify_grad(dot, [rand(2, 3, 4), rand(4)])
utt.verify_grad(dot, [rand(3), rand(2, 3, 4)])
utt.verify_grad(dot, [rand(4, 3), rand(2, 3, 4)])
utt.verify_grad(dot, [rand(2, 3, 4), rand(4, 5)])
utt.verify_grad(dot, [rand(2, 3, 4), rand(3, 4, 5)])
@attr('slow')
def test_broadcastable_patterns(self):
#
# These examples should all work because we broadcastable or
# no, all dimensions of all results have size 1.
#
def val_for(r):
if r.dtype.startswith('complex'):
# We want to test complex at the same time, so we give a value
# To the imaginary component.
# This strange way of doing things is the only way that worked
# on numpy 1.4.1
if r.ndim == 0:
return numpy.asarray(numpy.complex(1.1, 2.1),
dtype=r.dtype)
if r.ndim == 1:
if r.dtype == 'complex64':
return numpy.complex64([numpy.complex(1.2, 2.2)])
elif r.dtype == 'complex128':
return numpy.complex128([numpy.complex(1.2, 2.2)])
elif r.ndim == 2:
if r.dtype == 'complex64':
return numpy.complex64([[numpy.complex(1.3, 2.3)]])
elif r.dtype == 'complex128':
return numpy.complex128([[numpy.complex(1.3, 2.3)]])
if r.ndim == 0:
return numpy.asarray(1.1, dtype=r.dtype)
if r.ndim == 1:
return numpy.asarray([1.2], dtype=r.dtype)
elif r.ndim == 2:
return numpy.asarray([[1.3]], dtype=r.dtype)
raise ValueError()
for dtype0 in ('float32', 'float64', 'complex64'):
for dtype1 in ('float32', 'complex64', 'complex128'):
for bc0 in ((True,), (False,), (True, True),
(True, False), (False, True),
(False, False)):
x = TensorType(dtype=dtype0, broadcastable=bc0)()
for bc1 in ((True,), (False,), (True, True),
(True, False), (False, True),
(False, False)):
y = TensorType(dtype=dtype1, broadcastable=bc1)()
z = dot(x, y)
t = TensorType(dtype=dtype0,
broadcastable=z.broadcastable)()
rval = z * 3 + 2 * t
f = function([x, y, t], rval)
xval = val_for(x)
yval = val_for(y)
tval = val_for(t)
f(xval, yval, tval) # debugmode checks result
if (dtype0.startswith('float') and
dtype1.startswith('float')):
g = grad(z.sum(), x)
assert g.broadcastable == x.broadcastable
g = grad(z.sum(), y)
assert g.broadcastable == y.broadcastable
class T_tensorfromscalar(unittest.TestCase):
def test0(self):
s = scal.constant(56)
t = tensor_from_scalar(s)
self.assertTrue(t.owner.op is tensor_from_scalar)
self.assertTrue(t.type.broadcastable == (), t.type.broadcastable)
self.assertTrue(t.type.ndim == 0, t.type.ndim)
self.assertTrue(t.type.dtype == s.type.dtype)
v = eval_outputs([t])
self.assertTrue(v == 56, v)
self.assertTrue(isinstance(v, numpy.ndarray))
self.assertTrue(v.shape == (), v.shape)
def test1(self):
s = scal.constant(56)
t = as_tensor_variable(s)
self.assertTrue(t.owner.op is tensor_from_scalar)
self.assertTrue(t.type.broadcastable == (), t.type.broadcastable)
self.assertTrue(t.type.ndim == 0, t.type.ndim)
self.assertTrue(t.type.dtype == s.type.dtype)
v = eval_outputs([t])
self.assertTrue(v == 56, v)
self.assertTrue(isinstance(v, numpy.ndarray))
self.assertTrue(v.shape == (), v.shape)
g = grad(t, s)
self.assertTrue(eval_outputs([g]) == 0.)
def test2(self):
s = scal.constant(56.)
t = as_tensor_variable(s)
self.assertTrue(t.owner.op is tensor_from_scalar)
self.assertTrue(t.type.broadcastable == (), t.type.broadcastable)
self.assertTrue(t.type.ndim == 0, t.type.ndim)
self.assertTrue(t.type.dtype == s.type.dtype)
v = eval_outputs([t])
self.assertTrue(v == 56., v)
self.assertTrue(isinstance(v, numpy.ndarray))
self.assertTrue(v.shape == (), v.shape)
g = grad(t, s)
self.assertTrue(eval_outputs([g]) == 1.)
class T_scalarfromtensor(unittest.TestCase):
def test0(self):
tt = constant(56) # scal.constant(56)
ss = scalar_from_tensor(tt)
self.assertTrue(ss.owner.op is scalar_from_tensor)
self.assertTrue(ss.type.dtype == tt.type.dtype)
v = eval_outputs([ss])
self.assertTrue(v == 56, v)
if config.cast_policy == 'custom':
self.assertTrue(isinstance(v, numpy.int16))
elif config.cast_policy in ('numpy', 'numpy+floatX'):
self.assertTrue(isinstance(
v, getattr(numpy, str(numpy.asarray(56).dtype))))
else:
raise NotImplementedError(config.cast_policy)
self.assertTrue(v.shape == (), v.shape)
tt = lscalar()
ss = scalar_from_tensor(tt)
g = ss.owner.op.grad([tt], [ss])
fff = function([tt], ss)
v = fff(numpy.asarray(5))
self.assertTrue(v == 5, v)
self.assertTrue(isinstance(v, numpy.int64))
self.assertTrue(v.shape == (), v.shape)
class test_grad(unittest.TestCase):
class O(gof.op.Op):
def __init__(self):
self.gval0 = scalar('e')
self.gval1 = scalar('f')
def make_node(self):
inputs = [scalar('a'), scalar('c')]
outputs = [scalar('b'), scalar('d')]
return gof.Apply(self, inputs, outputs)
def grad(self, inp, grads):
x0, x1 = inp
gz0, gz1 = grads
return self.gval0, self.gval1
def test_1param(self):
"""grad: Test passing a single variable param"""
o = test_grad.O()
a1 = o.make_node()
self.assertTrue(o.gval0 is tensor.grad(a1.outputs[0], a1.inputs[0]))
def test_Nparam(self):
"""grad: Test passing multiple variable params"""
o = test_grad.O()
a1 = o.make_node()
g0, g1 = grad(a1.outputs[0], a1.inputs)
g0.name = None
self.assertTrue(o.gval0 is g0)
self.assertTrue(o.gval1 is g1)
def test_grad_keep_type(self):
"""Tests that the theano grad method returns a list if it is passed a list
and a single variable if it is passed a single variable.
pylearn2 depends on theano behaving this way. This functionality has been
added three times and erroneously removed twice. If you do anything that
requires changing this test or making it fail you are almost certainly
making a common mistake, NOT fixing something. """
X = tensor.matrix()
y = X.sum()
G = tensor.grad(y, [X])
assert isinstance(G, list)
G = tensor.grad(y, X)
assert not isinstance(G, list)
def test_1None_rval(self):
"""grad: Test returning a single zero value from grad"""
o = test_grad.O()
a1 = o.make_node()
g = grad(a1.outputs[0], a1.outputs[1],
disconnected_inputs='ignore')
self.assertTrue(g.owner.op == fill)
self.assertTrue(g.owner.inputs[1].data == 0)
self.assertRaises(TypeError, grad, a1.outputs[0], 'wtf')
def test_NNone_rval(self):
"""grad: Test returning some zero value from grad"""
o = test_grad.O()
a1 = o.make_node()
g0, g1, g2 = grad(a1.outputs[0], a1.inputs + [scalar('z')],
disconnected_inputs='ignore')
self.assertTrue(o.gval0 is g0)
self.assertTrue(o.gval1 is g1)
self.assertTrue(g2.owner.op == fill)
self.assertTrue(g2.owner.inputs[1].data == 0)
def test_zero_gradient_shape(self):
"""Ensure that a zero gradient has the proper shape."""
x = dmatrix()
f = theano.function([x], grad(dscalar(), x,
disconnected_inputs='ignore'))
a = numpy.ones((3, 7))
self.assertTrue((f(a) == 0).all()) # Zero gradient.
self.assertTrue(a.shape == f(a).shape) # With proper shape.
def test_cost_is_scalar(self):
'''grad: Test that a non-scalar cost raises a TypeError'''
s = scalar()
v = vector()
m = matrix()
# grad(v,...) and grad(m,...) should fail
self.assertRaises(TypeError, grad, v, v)
self.assertRaises(TypeError, grad, m, m)
class T_op_cache(unittest.TestCase):
def setUp(self):
utt.seed_rng()
def test0(self):
"""trigger bug in ticket #162
"""
lr = constant(0.011)
v = matrix()
v.name = 'v'
gv = fill(v / v, 1.0) / v - (fill(v / v, 1.0) * v) / (v * v)
fn_py = inplace_func([v], gv)
fn_c_or_py = inplace_func([v], gv)
a = rand(5, 2).astype(config.floatX)
self.assertTrue(numpy.all(fn_py(a) == fn_c_or_py(a)))
class T_reshape(utt.InferShapeTester, utt.TestOptimizationMixin):
def __init__(self, name, shared=tensor._shared, op=Reshape, mode=None,
ignore_topo=(DeepCopyOp, opt.MakeVector,
opt.Shape_i, DimShuffle, theano.tensor.Elemwise)):
self.shared = shared
self.op = op
# The tag canonicalize is needed for the shape test in FAST_COMPILE
self.mode = mode
self.ignore_topo = ignore_topo
super(T_reshape, self).__init__(name)
def function(self, inputs, outputs, ignore_empty=False):
f = function(inputs, outputs, mode=self.mode)
if self.mode is not None or theano.config.mode != "FAST_COMPILE":
topo = f.maker.fgraph.toposort()
topo_ = [node for node in topo if not isinstance(node.op,
self.ignore_topo)]
if ignore_empty:
assert len(topo_) <= 1, topo_
else:
assert len(topo_) == 1, topo_
if len(topo_) > 0:
assert type(topo_[0].op) is self.op
return f
def test_reshape(self):
a = dvector()
b = dmatrix()
d = dmatrix()
# basic to 1 dim(without list)
c = reshape(b, as_tensor_variable(6), ndim=1)
f = self.function([b], c)
b_val1 = numpy.asarray([[0, 1, 2], [3, 4, 5]])
c_val1 = numpy.asarray([0, 1, 2, 3, 4, 5])
b_val2 = b_val1.T
c_val2 = numpy.asarray([0, 3, 1, 4, 2, 5])
f_out1 = f(b_val1)
f_out2 = f(b_val2)
assert numpy.all(f_out1 == c_val1), (f_out1, c_val1)
assert numpy.all(f_out2 == c_val2), (f_out2, c_val2)
# print f.maker.fgraph.toposort()
# check that we remove the useless reshape
# basic to 1 dim(with list)
c = reshape(b, (as_tensor_variable(6),), ndim=1)
f = self.function([b], c)
assert numpy.all(f(numpy.asarray([[0, 1, 2], [3, 4, 5]])) ==
numpy.asarray([0, 1, 2, 3, 4, 5]))
# print f.maker.fgraph.toposort()
# check that we remove the useless reshape
# basic to shape object of same ndim
c = reshape(b, d.shape)
f = self.function([b, d], c)
assert numpy.all(f(numpy.asarray([[0, 1, 2], [3, 4, 5]]),
[[0, 1], [2, 3], [4, 5]]) ==
numpy.asarray([[0, 1], [2, 3], [4, 5]]))
# basic to 2 dims
c = reshape(a, [2, 3])
f = self.function([a], c)
assert numpy.all(f(numpy.asarray([0, 1, 2, 3, 4, 5])) ==
numpy.asarray([[0, 1, 2], [3, 4, 5]]))
# test that it works without inplace operations
a_val = numpy.asarray([0, 1, 2, 3, 4, 5])
a_val_copy = numpy.asarray([0, 1, 2, 3, 4, 5])
b_val = numpy.asarray([[0, 1, 2], [3, 4, 5]])
f_sub = self.function([a, b], c - b)
assert numpy.all(f_sub(a_val, b_val) == 0.0)
assert numpy.all(a_val == a_val_copy)
# test that it works with inplace operations
a_val = theano._asarray([0, 1, 2, 3, 4, 5], dtype='float64')
a_val_copy = theano._asarray([0, 1, 2, 3, 4, 5], dtype='float64')
b_val = theano._asarray([[0, 1, 2], [3, 4, 5]], dtype='float64')
f_sub = self.function([a, b], c - b)
assert numpy.all(f_sub(a_val, b_val) == 0.0)
assert numpy.all(a_val == a_val_copy)
# verify gradient
def just_vals(v):
return Reshape(2)(v, theano._asarray([2, 3], dtype='int32'))
utt.verify_grad(just_vals, [a_val], mode=self.mode)
# test infer_shape
self._compile_and_check([a], [c], (a_val,), self.op)
# test broadcast flag for constant value of 1
c = reshape(b, (b.shape[0], b.shape[1], 1))
# That reshape may get replaced with a dimshuffle, with is ignored,
# so we pass "ignore_empty=True"
f = self.function([b], c, ignore_empty=True)
assert numpy.all(f(numpy.asarray([[0, 1, 2], [3, 4, 5]])) ==
numpy.asarray([[[0], [1], [2]], [[3], [4], [5]]]))
assert (f.maker.fgraph.toposort()[-1].outputs[0].type.broadcastable ==
(False, False, True))
# test broadcast flag for constant value of 1 if it cannot be
# replaced with dimshuffle
c = reshape(b, (b.shape[1], b.shape[0], 1))
f = self.function([b], c, ignore_empty=True)
assert numpy.all(f(numpy.asarray([[0, 1, 2], [3, 4, 5]])) ==
numpy.asarray([[[0], [1]], [[2], [3]], [[4], [5]]]))
assert (f.maker.fgraph.toposort()[-1].outputs[0].type.broadcastable ==
(False, False, True))
def test_m1(self):
t = tensor3()
rng = numpy.random.RandomState(seed=utt.fetch_seed())
val = rng.uniform(size=(3, 4, 5)).astype(config.floatX)
for out in [t.reshape([-1]), t.reshape([-1, 5]),
t.reshape([5, -1]), t.reshape([5, -1, 3])]:
self._compile_and_check([t], [out], [val], self.op)
def test_reshape_long_in_shape(self):
v = dvector('v')
r = v.reshape((v.shape[0], L(1)))
print(r.eval({v: numpy.arange(5.)}))
assert numpy.allclose(r.eval({v: numpy.arange(5.)}).T,
numpy.arange(5.))
def test_bad_shape(self):
a = matrix('a')
shapes = ivector('shapes')
rng = numpy.random.RandomState(seed=utt.fetch_seed())
a_val = rng.uniform(size=(3, 4)).astype(config.floatX)
# Test reshape to 1 dim
r = a.reshape(shapes, ndim=1)
z = zeros_like(r)
f = self.function([a, shapes], r)
self.assertRaises(ValueError, f, a_val, [13])
# Test reshape to 2 dim
r = a.reshape(shapes, ndim=2)
z = zeros_like(r)
f = self.function([a, shapes], r)
self.assertRaises(ValueError, f, a_val, [-1, 5])
self.assertRaises(ValueError, f, a_val, [7, -1])
self.assertRaises(ValueError, f, a_val, [7, 5])
self.assertRaises(ValueError, f, a_val, [-1, -1])
def test_0(self):
x = fvector('x')
f = self.function([x], x.reshape((0, 100)))
assert f(numpy.ndarray((0,), dtype='float32')).shape == (0, 100)
def test_empty_shp(self):
const = theano.tensor.constant([1]).reshape(())
f = function([], const)
assert f().shape == ()
def test_make_column_matrix_broadcastable():
# The goal of the operation made by `b` is to ensure the second dimension
# of the column matrix is broadcastable.
a = tensor.dmatrix()
b = a.reshape((a.shape[0], )).dimshuffle(0, 'x')
f = function([a], b)
assert (f(numpy.zeros((3, 1))) + numpy.ones(2) == numpy.ones((3, 2))).all()
def test_flatten_outdimNone():
a = dmatrix()
c = flatten(a)
f = inplace_func([a], c)
a_val = theano._asarray([[0, 1, 2], [3, 4, 5]], dtype='float64')
c_val = theano._asarray([0, 1, 2, 3, 4, 5], dtype='float64')
assert numpy.all(f(a_val) == c_val)
f = inplace_func([a], c)
assert numpy.all(f(a_val) == c_val)
utt.verify_grad(flatten, [a_val])
def test_flatten_scalar():
a = dscalar()
c = flatten(a)
f = inplace_func([a], c)
a_val = theano._asarray(3.0, dtype='float64')
c_val = theano._asarray([3.0], dtype='float64')
assert numpy.all(f(a_val) == c_val)
f = inplace_func([a], c)
assert numpy.all(f(a_val) == c_val)
# utt.verify_grad(flatten, [a_val]) #TODO: fix verify_grd to work on scalars
def test_flatten_outdim1():
a = dmatrix()
c = flatten(a, 1)
f = inplace_func([a], c)
a_val = theano._asarray([[0, 1, 2], [3, 4, 5]], dtype='float64')
c_val = theano._asarray([0, 1, 2, 3, 4, 5], dtype='float64')
assert numpy.all(f(a_val) == c_val)
f = inplace_func([a], c)
assert numpy.all(f(a_val) == c_val)
utt.verify_grad(flatten, [a_val])
def test_flatten_outdim2():
a = dmatrix()
c = flatten(a, 2)
f = inplace_func([a], c)
a_val = theano._asarray([[0, 1, 2], [3, 4, 5]], dtype='float64')
assert numpy.all(f(a_val) == a_val)
f = inplace_func([a], c)
assert numpy.all(f(a_val) == a_val)
flatten_2 = partial(flatten, outdim=2)
utt.verify_grad(flatten_2, [a_val])
def test_flatten_outdim2_of_3():
a = TensorType('float64', (False, False, False))()
c = flatten(a, 2)
f = inplace_func([a], c)
a_val = theano._asarray([[[0, 1], [2, 3]], [[4, 5], [6, 7]]],
dtype='float64')
c_val = theano._asarray([[0, 1, 2, 3], [4, 5, 6, 7]], dtype='float64')
assert numpy.all(f(a_val) == c_val)
f = inplace_func([a], c)
assert numpy.all(f(a_val) == c_val)
flatten_2 = partial(flatten, outdim=2)
utt.verify_grad(flatten_2, [a_val])
def test_flatten_broadcastable():
# Ensure that the broadcastable pattern of the output is coherent with
# that of the input
inp = TensorType('float64', (False, False, False, False))()
out = flatten(inp, outdim=2)
assert out.broadcastable == (False, False)
inp = TensorType('float64', (False, False, False, True))()
out = flatten(inp, outdim=2)
assert out.broadcastable == (False, False)
inp = TensorType('float64', (False, True, False, True))()
out = flatten(inp, outdim=2)
assert out.broadcastable == (False, False)
inp = TensorType('float64', (False, True, True, True))()
out = flatten(inp, outdim=2)
assert out.broadcastable == (False, True)
inp = TensorType('float64', (True, False, True, True))()
out = flatten(inp, outdim=3)
assert out.broadcastable == (True, False, True)
def test_flatten_outdim_invalid():
a = dmatrix()
try:
c = flatten(a, 3)
assert False
except ValueError:
pass
try:
c = flatten(a, 0)
assert False
except ValueError:
pass
def test_is_flat():
"""
tests is_flat method for constant and symbolic variables,
as well as reshaped constant and symbolic variables on the
given outdim
"""
# Constant variable
assert tensor.is_flat(tensor.as_tensor_variable(numpy.zeros((10))))
assert tensor.is_flat(tensor.as_tensor_variable(numpy.zeros((10, 10, 10))),
outdim=3)
assert not tensor.is_flat(
tensor.as_tensor_variable(numpy.zeros((10, 10, 10))))
# Symbolic variable
assert tensor.is_flat(tensor.vector())
assert tensor.is_flat(tensor.tensor3(), outdim=3)
assert not tensor.is_flat(tensor.tensor3())
# Reshape with constant shape
X = tensor.tensor4()
assert tensor.is_flat(X.reshape((-1, )))
assert tensor.is_flat(X.reshape((10, 10, -1)), outdim=3)
assert not tensor.is_flat(X.reshape((10, 10, -1)))
# Reshape with symbolic shape
X = tensor.tensor4()
assert tensor.is_flat(X.reshape((tensor.iscalar(), )))
assert tensor.is_flat(X.reshape((tensor.iscalar(), ) * 3), outdim=3)
assert not tensor.is_flat(X.reshape((tensor.iscalar(), ) * 3))
def test_tile():
def run_tile(x, x_, reps, use_symbolic_reps):
if use_symbolic_reps:
rep_symbols = [iscalar() for _ in range(len(reps))]
f = function([x] + rep_symbols, tile(x, rep_symbols))
return f(*([x_] + list(reps)))
else:
f = function([x], tile(x, reps))
return f(x_)
rng = numpy.random.RandomState(utt.fetch_seed())
for use_symbolic_reps in [False, True]:
# Test the one-dimensional case.
x = vector()
x_ = rng.randn(5).astype(config.floatX)
assert numpy.all(run_tile(x, x_, (2,), use_symbolic_reps) ==
numpy.tile(x_, (2,)))
# Test the two-dimensional case.
x = matrix()
x_ = rng.randn(2, 4).astype(config.floatX)
assert numpy.all(run_tile(x, x_, (2, 3), use_symbolic_reps) ==
numpy.tile(x_, (2, 3)))
# Test the three-dimensional case.
x = tensor3()
x_ = rng.randn(2, 4, 3).astype(config.floatX)
assert numpy.all(run_tile(x, x_, (2, 3, 4), use_symbolic_reps) ==
numpy.tile(x_, (2, 3, 4)))
# Test the four-dimensional case.
x = tensor4()
x_ = rng.randn(2, 4, 3, 5).astype(config.floatX)
assert numpy.all(run_tile(x, x_, (2, 3, 4, 6), use_symbolic_reps) ==
numpy.tile(x_, (2, 3, 4, 6)))
# Test when reps is integer, tensor.scalar or tensor.vector.
# Test 1,2,3,4-dimensional cases.
# Test input x has the shape [2], [2, 4], [2, 4, 3], [2, 4, 3, 5].
test_shape = [2, 4, 3, 5]
k = 0
for xtype in [vector(), matrix(), tensor3(), tensor4()]:
x = xtype
k = k+1
x_ = rng.randn(*test_shape[0:k]).astype(config.floatX)
# integer:
reps_ = 2
f = function([x], tile(x, reps_))
assert numpy.all( f(x_) == numpy.tile(x_, reps_))
# tensor.scalar:
reps = iscalar()
reps_ = 2
f = function([x, reps], tile(x, reps))
assert numpy.all( f(x_, reps_) == numpy.tile(x_, reps_))
# tensor.vector:
reps = ivector()
reps_ = [2] if k == 1 or k == 2 else [2, 3]
ndim_ = k
f = function([x, reps], tile(x, reps, ndim_))
assert numpy.all( f(x_, reps_) == numpy.tile(x_, reps_))
# list of integers:
reps_ = [2, 3, 4]
f = function([x], tile(x, reps_))
assert numpy.all( f(x_) == numpy.tile(x_, reps_))
# list of integers and tensor.scalars:
d = iscalar()
reps = [2, d, 4]
f = function([x, d], tile(x, reps))
reps_ = [2, 3, 4]
assert numpy.all( f(x_, 3) == numpy.tile(x_, reps_))
# reps is list, len(reps) > x.ndim, 3 cases below:
r = [2, 3, 4, 5, 6]
reps_ = r[:k+1] # len(reps_) = x.ndim+1
# (1) ndim = None.
f = function([x], tile(x, reps_))
assert numpy.all( f(x_) == numpy.tile(x_, reps_))
# (2) ndim = len(reps).
ndim_ = len(reps_)
f = function([x], tile(x, reps_, ndim_))
assert numpy.all( f(x_) == numpy.tile(x_, reps_))
# (3) ndim > len(reps)
ndim_ = len(reps_) + 1
f = function([x], tile(x, reps_, ndim_))
assert numpy.all( f(x_) == numpy.tile(x_, [1] + reps_))
# reps is list, ndim > x.ndim > len(reps):
r = [2, 3, 4, 5]
if k > 1:
ndim_ = k+1
reps_ = r[:k-1]
f = function([x], tile(x, reps_, ndim_))
assert numpy.all( f(x_) == numpy.tile(x_, [1, 1] + reps_))
# error raising test: ndim not specified when reps is vector
reps = ivector()
numpy.testing.assert_raises(ValueError, tile, x, reps)
# error raising test: not a integer
for reps in [2.5, fscalar(), fvector()]:
numpy.testing.assert_raises(ValueError, tile, x, reps)
# error raising test: the dimension of reps exceeds 1
reps = imatrix()
numpy.testing.assert_raises(ValueError, tile, x, reps)
# error raising test: ndim is not None, ndim < x.ndim
# 3 cases below (reps is list/tensor.scalar/tensor.vector):
for reps in [[2,3,4], iscalar(), ivector()]:
if k > 1:
ndim = k-1
numpy.testing.assert_raises(ValueError, tile, x, reps, ndim)
# error raising test: reps is list, len(reps) > ndim
r = [2, 3, 4, 5, 6]
reps = r[:k+1]
ndim = k
numpy.testing.assert_raises(ValueError, tile, x, reps, ndim)
# error raising test:
# reps is tensor.vector and len(reps_value) > ndim,
# reps_value is the real value when excuting the function.
reps = ivector()
r = [2, 3, 4, 5, 6, 7]
reps_ = r[:k+2]
ndim_ = k+1
f = function([x, reps], tile(x, reps, ndim_))
numpy.testing.assert_raises(AssertionError, f, x_, reps_)
def test_tile_grad():
def grad_tile(x, reps, np_x):
y = tile(x, reps)
z = y.sum()
g = theano.function([x], grad(z, x))
grad_res = g(np_x)
# The gradient should be the product of the tiling dimensions
# (since the gradients are additive through the tiling operation)
assert numpy.all(grad_res == numpy.prod(reps))
rng = numpy.random.RandomState(utt.fetch_seed())
# test vector
grad_tile(vector('x'), [3], rng.randn(5).astype(config.floatX))
# test matrix
grad_tile(matrix('x'), [3, 4], rng.randn(2, 3).astype(config.floatX))
# test tensor3
grad_tile(tensor3('x'), [3, 4, 5],
rng.randn(2, 4, 3).astype(config.floatX))
# test tensor4
grad_tile(tensor4('x'), [3, 4, 5, 6],
rng.randn(2, 4, 3, 5).astype(config.floatX))
class TestARange(unittest.TestCase):
def setUp(self):
utt.seed_rng()
def test_Op_integers(self):
"""Test behaviour of ARange Op on integer inputs"""
start, stop, step = iscalars('start', 'stop', 'step')
out = ARange(start.type.dtype)(start, stop, step)
f = function([start, stop, step], out)
assert numpy.all(f(0, 5, 1) == numpy.arange(0, 5, 1))
assert numpy.all(f(2, 11, 4) == numpy.arange(2, 11, 4))
assert numpy.all(f(-5, 1, 1) == numpy.arange(-5, 1, 1))
assert numpy.all(f(10, 2, -2) == numpy.arange(10, 2, -2))
assert numpy.all(f(10, 2, 2) == numpy.arange(10, 2, 2))
assert numpy.all(f(0, 0, 1) == numpy.arange(0, 0, 1))
def test_integers(self):
"""Test arange constructor, on integer outputs"""
start, stop, step = iscalars('start', 'stop', 'step')
out = arange(start, stop, step)
f = function([start, stop, step], out)
if config.cast_policy == 'custom':
assert out.dtype == 'int64'
elif config.cast_policy in ('numpy', 'numpy+floatX'):
numpy_dtype = numpy.arange(numpy.array(1, dtype='int32')).dtype
assert out.dtype == numpy_dtype
else:
raise NotImplementedError(config.cast_policy)
assert numpy.all(f(0, 5, 1) == numpy.arange(0, 5, 1))
assert numpy.all(f(2, 11, 4) == numpy.arange(2, 11, 4))
assert numpy.all(f(-5, 1, 1) == numpy.arange(-5, 1, 1))
assert numpy.all(f(10, 2, -2) == numpy.arange(10, 2, -2))
assert numpy.all(f(10, 2, 2) == numpy.arange(10, 2, 2))
assert numpy.all(f(0, 0, 1) == numpy.arange(0, 0, 1))
def test_float32(self):
"""Test arange constructor, on float32 outputs"""
start, stop, step = fscalars('start', 'stop', 'step')
out = arange(start, stop, step)
f = function([start, stop, step], out)
if config.cast_policy == 'custom':
assert out.dtype == start.type.dtype
elif config.cast_policy == 'numpy':
numpy_dtype = numpy.arange(numpy.array(0, dtype=start.dtype),
numpy.array(1, dtype=stop.dtype),
numpy.array(1, dtype=step.dtype)).dtype
assert out.dtype == numpy_dtype
elif config.cast_policy == 'numpy+floatX':
assert out.dtype == config.floatX
else:
raise NotImplementedError(config.cast_policy)
arg_vals = [(0, 5, 1), (2, 11, 4), (-5, 1.1, 1.2), (1.3, 2,
-2.1), (10, 2, 2)]
for arg_v in arg_vals:
start_v, stop_v, step_v = arg_v
start_v_, stop_v_, step_v_ = numpy.asarray(arg_v,
dtype=start.type.dtype)
f_val = f(start_v_, stop_v_, step_v_)
if config.cast_policy == 'custom':
expected_val = numpy.arange(start_v, stop_v, step_v,
dtype=start.type.dtype)
elif config.cast_policy in ('numpy', 'numpy+floatX'):
expected_val = numpy.arange(start_v_, stop_v_, step_v_,
dtype=out.dtype)
else:
raise NotImplementedError(config.cast_policy)
assert numpy.all(f_val == expected_val)
def test_float64(self):
"""Test arange constructor, on float64 outputs"""
start, stop, step = dscalars('start', 'stop', 'step')
out = arange(start, stop, step)
f = function([start, stop, step], out)
assert out.dtype == start.type.dtype
arg_vals = [(0, 5, 1), (2, 11, 4), (-5, 1.1, 1.2), (1.3, 2,
-2.1), (10, 2, 2)]
for arg_v in arg_vals:
start_v, stop_v, step_v = arg_v
start_v_, stop_v_, step_v_ = numpy.asarray(arg_v,
dtype=start.type.dtype)
f_val = f(start_v_, stop_v_, step_v_)
if config.cast_policy == 'custom':
expected_val = numpy.arange(start_v, stop_v, step_v,
dtype=start.type.dtype)
elif config.cast_policy in ('numpy', 'numpy+floatX'):
expected_val = numpy.arange(start_v_, stop_v_, step_v_)
else:
raise NotImplementedError(config.cast_policy)
assert numpy.all(f_val == expected_val)
def test_default_step(self):
"""Test that arange constructor uses the correct default step"""
start, stop = iscalars('start', 'stop')
out = arange(start, stop)
f = function([start, stop], out)
if config.cast_policy == 'custom':
assert out.dtype == 'int64'
elif config.cast_policy in ('numpy', 'numpy+floatX'):
assert out.dtype == numpy.arange(numpy.int32(0),
numpy.int32(1)).dtype
else:
raise NotImplementedError(config.cast_policy)
assert numpy.all(f(0, 5) == numpy.arange(0, 5))
assert numpy.all(f(-5, 1) == numpy.arange(-5, 1))
assert numpy.all(f(0, 0) == numpy.arange(0, 0))
dstart, dstop = dscalars('start', 'stop')
dout = arange(dstart, dstop)
df = function([dstart, dstop], dout)
assert dout.dtype == dstart.type.dtype
# print df(0.2, 5.3)
# print numpy.arange(0.2, 5.3)
assert numpy.all(df(0.2, 5.3) == numpy.arange(0.2, 5.3))
assert numpy.all(df(0.8, 5.3) == numpy.arange(0.8, 5.3))
assert numpy.all(df(-0.7, 5.3) == numpy.arange(-0.7, 5.3))
def test_default_start(self):
"""Test that arange constructor uses the correct default start"""
stop = iscalar('stop')
out = arange(stop)
f = function([stop], out)
if config.cast_policy == 'custom':
assert out.dtype == 'int64'
elif config.cast_policy in ('numpy', 'numpy+floatX'):
assert out.dtype == numpy.arange(numpy.int32(1)).dtype
else:
raise NotImplementedError(config.cast_policy)
assert numpy.all(f(8) == numpy.arange(8))
assert numpy.all(f(-2) == numpy.arange(-2))
fstop = fscalar('stop')
fout = arange(fstop)
ff = function([fstop], fout)
if config.cast_policy == 'custom':
assert fout.dtype == fstop.type.dtype
elif config.cast_policy == 'numpy':
assert fout.dtype == numpy.arange(numpy.float32(1)).dtype
elif config.cast_policy == 'numpy+floatX':
if config.floatX == 'float32':
assert fout.dtype == 'float32'
else:
assert fout.dtype == numpy.arange(numpy.float32(1)).dtype
else:
raise NotImplementedError(config.cast_policy)
fstop_values = [0.2, -0.7, 8.5]
for fstop_v in fstop_values:
fstop_v32 = numpy.float32(fstop_v)
assert numpy.all(ff(fstop_v32) == numpy.arange(fstop_v))
def test_upcast(self):
"""Test that arange computes output type adequately"""
if config.cast_policy == 'custom':
assert arange(iscalar()).dtype == 'int64'
assert arange(fscalar()).dtype == fscalar().dtype
assert arange(dscalar()).dtype == dscalar().dtype
# int32 + float32 -> float64
assert arange(iscalar(), fscalar()).dtype == dscalar().dtype
assert arange(iscalar(), dscalar()).dtype == dscalar().dtype
assert arange(fscalar(), dscalar()).dtype == dscalar().dtype
assert arange(iscalar(), fscalar(), dscalar()).dtype == \
dscalar().dtype
elif config.cast_policy in ('numpy', 'numpy+floatX'):
for dtype in get_numeric_types():
# Test with a single argument.
arange_dtype = arange(scalar(dtype=str(dtype))).dtype
numpy_dtype = numpy.arange(numpy.array(1, dtype=dtype)).dtype
if (dtype != 'float64' and
numpy_dtype == 'float64' and
config.cast_policy == 'numpy+floatX' and
config.floatX == 'float32'):
# We want a float32 arange.
assert arange_dtype == 'float32'
else:
# Follow numpy.
assert arange_dtype == numpy_dtype
# Test with two arguments.
for stop_dtype in get_numeric_types():
arange_dtype = arange(
start=scalar(dtype=str(dtype)),
stop=scalar(dtype=str(stop_dtype))).dtype
numpy_dtype = numpy.arange(
start=numpy.array(0, dtype=dtype),
stop=numpy.array(1, dtype=stop_dtype)).dtype
if (dtype != 'float64' and
stop_dtype != 'float64' and
numpy_dtype == 'float64' and
config.cast_policy == 'numpy+floatX' and
config.floatX == 'float32'):
# We want a float32 arange.
assert arange_dtype == 'float32'
else:
# Follow numpy.
assert arange_dtype == numpy_dtype
# Test with three arguments.
for step_dtype in get_numeric_types():
arange_dtype = arange(
start=scalar(dtype=str(dtype)),
stop=scalar(dtype=str(stop_dtype)),
step=scalar(dtype=str(step_dtype))).dtype
numpy_dtype = numpy.arange(
start=numpy.array(0, dtype=dtype),
stop=numpy.array(1, dtype=stop_dtype),
step=numpy.array(1, dtype=step_dtype)).dtype
if (dtype != 'float64' and
stop_dtype != 'float64' and
step_dtype != 'float64' and
numpy_dtype == 'float64' and
config.cast_policy == 'numpy+floatX' and
config.floatX == 'float32'):
# We want a float32 arange.
assert arange_dtype == 'float32'
else:
# Follow numpy.
assert arange_dtype == numpy_dtype
else:
raise NotImplementedError(config.cast_policy)
def test_dtype_cache(self):
"""Checks that the same Op is returned on repeated calls to arange
using the same dtype, but not for different dtypes."""
start, stop, step = iscalars('start', 'stop', 'step')
out1 = arange(start, stop, step)
out2 = arange(start, stop, step, dtype=out1.dtype)
out3 = arange(start, stop, 2., dtype=out1.dtype)
out4 = arange(start, stop, 2.)
assert out1.owner.op is out2.owner.op
assert out2.owner.op is out3.owner.op
assert out3.owner.op is not out4.owner.op
def test_infer_shape(self):
start, stop, step = iscalars('start', 'stop', 'step')
out = arange(start, stop, step)
mode = theano.config.mode
if mode == 'FAST_COMPILE':
mode = 'FAST_RUN'
mode = compile.mode.get_mode(mode).excluding('fusion')
f = function([start, stop, step], out.shape, mode=mode)
assert len(f.maker.fgraph.toposort()) == 9
if config.cast_policy == 'custom':
assert out.dtype == 'int64'
elif config.cast_policy in ('numpy', 'numpy+floatX'):
numpy_dtype = numpy.arange(numpy.array(0, dtype=start.dtype),
numpy.array(1, dtype=stop.dtype),
numpy.array(1, dtype=step.dtype)).dtype
assert out.dtype == numpy_dtype
else:
raise NotImplementedError(config.cast_policy)
assert numpy.all(f(0, 5, 1) == len(numpy.arange(0, 5, 1)))
assert numpy.all(f(2, 11, 4) == len(numpy.arange(2, 11, 4)))
assert numpy.all(f(-5, 1, 1) == len(numpy.arange(-5, 1, 1)))
assert numpy.all(f(10, 2, -2) == len(numpy.arange(10, 2, -2)))
assert numpy.all(f(10, 2, 2) == len(numpy.arange(10, 2, 2)))
assert numpy.all(f(0, 0, 1) == len(numpy.arange(0, 0, 1)))
out = arange(start, stop, 1)
f = function([start, stop], out.shape, mode=mode)
assert len(f.maker.fgraph.toposort()) == 5
# 4 [Elemwise{sub,no_inplace}(stop, start), Elemwise{Cast{int64}}(Elemwise{sub,no_inplace}.0), Elemwise{Maximum{output_types_preference=transfer_type{0}}}[(0, 0)](Elemwise{Cast{int64}}.0, 0), MakeVector(Elemwise{Maximum{output_types_preference=transfer_type{0}}}[(0, 0)].0)]
if config.cast_policy == 'custom':
assert out.dtype == 'int64'
elif config.cast_policy in ('numpy', 'numpy+floatX'):
assert out.dtype == numpy.arange(
numpy.int32(0), numpy.int32(1), numpy.int32(1)).dtype
else:
raise NotImplementedError(config.cast_policy)
assert numpy.all(f(0, 5) == len(numpy.arange(0, 5)))
assert numpy.all(f(2, 11) == len(numpy.arange(2, 11)))
assert numpy.all(f(-5, 1) == len(numpy.arange(-5, 1)))
assert numpy.all(f(10, 2) == len(numpy.arange(10, 2)))
assert numpy.all(f(10, 2) == len(numpy.arange(10, 2)))
assert numpy.all(f(0, 0) == len(numpy.arange(0, 0)))
assert numpy.all(f(-64, 64) == len(numpy.arange(-64, 64)))
assert arange(-64, 64).shape.eval() == [128]
assert arange(-64, 64, 2).shape.eval() == [64]
out = arange(0, stop, 1)
f = function([stop], out.shape, mode=mode)
assert len(f.maker.fgraph.toposort()) == 2
#[Elemwise{Cast{int64}}(stop), MakeVector(Elemwise{Cast{int64}}.0)]
if config.cast_policy == 'custom':
assert out.dtype == 'int64'
elif config.cast_policy in ('numpy', 'numpy+floatX'):
numpy_dtype = numpy.arange(0,
numpy.array(1, dtype=stop.dtype),
1).dtype
assert out.dtype == numpy_dtype
else:
raise NotImplementedError(config.cast_policy)
assert numpy.all(f(5) == len(numpy.arange(0, 5)))
assert numpy.all(f(11) == len(numpy.arange(0, 11)))
assert numpy.all(f(1) == len(numpy.arange(0, 1)))
assert numpy.all(f(2) == len(numpy.arange(0, 2)))
assert numpy.all(f(2) == len(numpy.arange(0, 2)))
assert numpy.all(f(0) == len(numpy.arange(0, 0)))
class TestNdGrid(unittest.TestCase):
def setUp(self):
pass
def test_mgrid_numpy_equiv(self):
nmgrid = (numpy.mgrid[0:1:.1, 1:10:1., 10:100:10.],
numpy.mgrid[0:2:1, 1:10:1, 10:100:10])
tmgrid = (mgrid[0:1:.1, 1:10:1., 10:100:10.],
mgrid[0:2:1, 1:10:1, 10:100:10])
for n, t in zip(nmgrid, tmgrid):
for ng, tg in zip(n, t):
utt.assert_allclose(ng, tg.eval())
def test_ogrid_numpy_equiv(self):
nogrid = (numpy.ogrid[0:1:.1, 1:10:1., 10:100:10.],
numpy.ogrid[0:2:1, 1:10:1, 10:100:10])
togrid = (ogrid[0:1:.1, 1:10:1., 10:100:10.],
ogrid[0:2:1, 1:10:1, 10:100:10])
for n, t in zip(nogrid, togrid):
for ng, tg in zip(n, t):
utt.assert_allclose(ng, tg.eval())
def test_mgrid_theano_variable_numpy_equiv(self):
nfmgrid = numpy.mgrid[0:1:.1, 1:10:1., 10:100:10.]
nimgrid = numpy.mgrid[0:2:1, 1:10:1, 10:100:10]
i,j,k = dscalars('i','j','k')
l,m,n = iscalars('l','m','n')
tfmgrid = mgrid[i:1:.1, 1:j:1., 10:100:k]
timgrid = mgrid[l:2:1, 1:m:1, 10:100:n]
ff = theano.function([i, j, k], tfmgrid)
fi = theano.function([l, m, n], timgrid)
for n, t in zip((nfmgrid,nimgrid), (ff(0, 10, 10.),fi(0, 10, 10))):
for ng, tg in zip(n, t):
utt.assert_allclose(ng, tg)
def test_ogrid_theano_variable_numpy_equiv(self):
nfogrid = numpy.ogrid[0:1:.1, 1:10:1., 10:100:10.]
niogrid = numpy.ogrid[0:2:1, 1:10:1, 10:100:10]
i,j,k = dscalars('i','j','k')
l,m,n = iscalars('l','m','n')
tfogrid = ogrid[i:1:.1, 1:j:1., 10:100:k]
tiogrid = ogrid[l:2:1, 1:m:1, 10:100:n]
ff = theano.function([i, j, k], tfogrid)
fi = theano.function([l, m, n], tiogrid)
for n, t in zip((nfogrid,niogrid), (ff(0, 10, 10.),fi(0, 10, 10))):
for ng, tg in zip(n, t):
utt.assert_allclose(ng, tg)
class TestInversePermutation(unittest.TestCase):
def setUp(self):
utt.seed_rng()
def test_dim1(self):
"""Test the inversion of one permutation (int vector)"""
p = ivector()
inv = inverse_permutation(p)
assert inv.dtype == p.dtype
f_inverse = function([p], inv)
# Generate a random permutation
rng = numpy.random.RandomState(utt.fetch_seed())
p_val = rng.permutation(10).astype('int32')
inv_val = f_inverse(p_val)
# Check that the inverse of the inverse is the original permutation
assert numpy.all(f_inverse(inv_val) == p_val)
# Check that permutation(inverse) == inverse(permutation) = identity
assert numpy.all(p_val[inv_val] == numpy.arange(10))
assert numpy.all(inv_val[p_val] == numpy.arange(10))
def test_dim2(self):
"""Test the inversion of several permutations at a time"""
# Each row of p is a different permutation to inverse
p = imatrix()
inv = inverse_permutation(p)
f_inverse = function([p], inv)
rng = numpy.random.RandomState(utt.fetch_seed())
# Generate 10 random permutations
p_val = numpy.asarray([rng.permutation(10) for i in range(7)],
dtype='int32')
inv_val = f_inverse(p_val)
# Check that the inverse of the inverse is the original permutation list
assert numpy.all(f_inverse(inv_val) == p_val)
# Check that, for each permutation,
# permutation(inverse) == inverse(permutation) = identity
for p_row, i_row in zip(p_val, inv_val):
assert numpy.all(p_row[i_row] == numpy.arange(10))
assert numpy.all(i_row[p_row] == numpy.arange(10))
class TestPermuteRowElements(unittest.TestCase):
def setUp(self):
utt.seed_rng()
def test_1_1(self):
"""Test PermuteRowElements(vector, vector)"""
input = dvector()
p = ivector()
out = permute_row_elements(input, p)
permute = function([input, p], out)
rng = numpy.random.RandomState(utt.fetch_seed())
input_val = rng.uniform(size=(5,))
p_val = rng.permutation(5).astype('int32')
out_val = permute(input_val, p_val)
# Should be equivalent to advanced indexing
out_bis = input_val[p_val]
assert numpy.all(out_val == out_bis)
# Verify gradient
def permute_fixed(s_input):
"""Auxiliary op defined to get rid of gradient wrt p_val"""
return permute_row_elements(s_input, p_val)
utt.verify_grad(permute_fixed, [input_val])
def test_2_1(self):
"""Test broadcasting in PermuteRowElements(matrix, vector)"""
input = matrix()
p = ivector()
out = permute_row_elements(input, p)
permute = function([input, p], out)
rng = numpy.random.RandomState(utt.fetch_seed())
input_val = rng.uniform(size=(3, 5)).astype(config.floatX)
p_val = rng.permutation(5).astype('int32')
out_val = permute(input_val, p_val)
# The same permutation should be applied to every row of the input matrix.
out_bis = numpy.asarray([r[p_val] for r in input_val])
assert numpy.all(out_val == out_bis)
# Verify gradient
def permute_fixed(s_input):
"""Auxiliary op defined to get rid of gradient wrt p_val"""
return permute_row_elements(s_input, p_val)
utt.verify_grad(permute_fixed, [input_val])
def test_2_2(self):
"""Test PermuteRowElements(matrix, matrix)"""
input = matrix()
p = imatrix()
out = permute_row_elements(input, p)
permute = function([input, p], out)
rng = numpy.random.RandomState(utt.fetch_seed())
input_val = rng.uniform(size=(3, 5)).astype(config.floatX)
p_val = numpy.asarray([rng.permutation(5) for i in range(3)],
dtype='int32')
out_val = permute(input_val, p_val)
# Each row of p contains a permutation to apply to the corresponding
# row of input
out_bis = numpy.asarray([i_row[p_row] for i_row,
p_row in zip(input_val, p_val)])
assert numpy.all(out_val == out_bis)
# Verify gradient
def permute_fixed(s_input):
"""Auxiliary op defined to get rid of gradient wrt p_val"""
return permute_row_elements(s_input, p_val)
utt.verify_grad(permute_fixed, [input_val])
def test_1_2(self):
"""Test PermuteRowElements(vector, matrix)
Different permutations will be applied to the same input vector"""
input = vector()
p = imatrix()
out = permute_row_elements(input, p)
permute = function([input, p], out)
rng = numpy.random.RandomState(utt.fetch_seed())
input_val = rng.uniform(size=(5,)).astype(config.floatX)
p_val = numpy.asarray([rng.permutation(5) for i in range(3)
], dtype='int32')
out_val = permute(input_val, p_val)
# Each row of p contains a permutation to apply to the input vector
out_bis = numpy.asarray([input_val[p_row] for p_row in p_val])
assert numpy.all(out_val == out_bis)
# Verify gradient
def permute_fixed(s_input):
"""Auxiliary op defined to get rid of gradient wrt p_val"""
return permute_row_elements(s_input, p_val)
utt.verify_grad(permute_fixed, [input_val])
def test_3b_2(self):
"""Test permute_row_elements on a more complex broadcasting pattern:
input.type.broadcastable = (False, True, False),
p.type.broadcastable = (False, False)."""
input = TensorType('floatX', (False, True, False))()
p = imatrix()
out = permute_row_elements(input, p)
permute = function([input, p], out)
rng = numpy.random.RandomState(utt.fetch_seed())
input_val = rng.uniform(size=(4, 1, 5)).astype(config.floatX)
p_val = numpy.asarray([rng.permutation(5) for i in range(3)],
dtype='int32')
out_val = permute(input_val, p_val)
# Each row of p contains a permutation to apply to each row
# of the input tensor
out_bis = numpy.asarray([[in_mat[0, p_row]
for p_row in p_val] for in_mat in input_val])
assert numpy.all(out_val == out_bis)
# Verify gradient
def permute_fixed(s_input):
"""Auxiliary op defined to get rid of gradient wrt p_val"""
return permute_row_elements(s_input, p_val)
utt.verify_grad(permute_fixed, [input_val])
class test_tensordot(unittest.TestCase):
def TensorDot(self, axes):
"""
Since tensordot is no longer an op, mimic the old op signature
to allow easy use of verify_grad.
"""
return lambda a, b: tensordot(a, b, axes)
def setUp(self):
utt.seed_rng()
def test0(self):
# Test vector-vector
avec = vector()
bvec = vector()
axes = ((0, ), (0, ))
c = tensordot(avec, bvec, axes)
f1 = inplace_func([avec, bvec], c)
aval = rand(5)
bval = rand(5)
out0 = numpy.tensordot(aval, bval, axes)
out1 = f1(aval, bval)
utt.assert_allclose(out0, out1)
utt.verify_grad(self.TensorDot(axes), [aval, bval])
# Test matrix-vector
bmat = matrix()
axes = ((0, ), (1, ))
c = tensordot(avec, bmat, axes)
f2 = inplace_func([avec, bmat], c)
aval = rand(5)
bval = rand(8, 5)
utt.assert_allclose(numpy.tensordot(aval, bval, axes),
f2(aval, bval))
utt.verify_grad(self.TensorDot(axes), [aval, bval])
# Test matrix-matrix
amat = matrix()
for axes, shps in [[((0,), (0,)), [(4, 7), (4, 9)]],
[((0,), (1,)), [(4, 7), (9, 4)]],
[((1,), (0,)), [(4, 7), (7, 9)]],
[((1,), (1,)), [(4, 7), (9, 7)]],
[((0, 1), (0, 1)), [(4, 7), (4, 7)]],
# [((0, 1), (1, 0)), [(4, 7), (7, 4)]],
# [((1, 0), (1, 0)), [(4, 7), (4, 7)]],
# [((1, 0), (0, 1)), [(4, 7), (7, 4)]],
]:
c = tensordot(amat, bmat, axes)
f3 = inplace_func([amat, bmat], c)
aval = rand(*shps[0])
bval = rand(*shps[1])
utt.assert_allclose(numpy.tensordot(aval, bval, axes),
f3(aval, bval))
utt.verify_grad(self.TensorDot(axes), [aval, bval])
# Test ndarray-matrix, sum over one dim of matrix
for axes, shps in [[((2,), (1,)), [(1, 2, 3, 4), (2, 3)]],
[((0,), (1,)), [(1, 2, 3, 4), (3, 1)]],
[((0,), (0,)), [(1, 2, 3, 4), (1, 3)]],
[((3,), (0,)), [(1, 2, 3, 4), (4, 1)]],
# [((3, 1), (0, 1)), [(1, 2, 3, 4), (4, 2)]],
# [((0, 1), (1, 0)), [(1, 2, 3, 4), (2, 1)]],
# [((3, 1), (1, 0)), [(1, 2, 3, 4), (2, 4)]],
]:
atens = tensor4()
c = tensordot(atens, bmat, axes)
f4 = inplace_func([atens, bmat], c)
aval = rand(*shps[0])
bval = rand(*shps[1])
utt.assert_allclose(numpy.tensordot(aval, bval, axes),
f4(aval, bval))
utt.verify_grad(self.TensorDot(axes), [aval, bval])
# Test ndarray-ndarray
atens = tensor4()
btens = tensor3()
axes = ((1, 3), (0, 2))
c = tensordot(atens, btens, axes)
f5 = inplace_func([atens, btens], c)
aval = rand(4, 3, 5, 2)
bval = rand(3, 4, 2)
utt.assert_allclose(numpy.tensordot(aval, bval, axes),
f5(aval, bval))
utt.verify_grad(self.TensorDot(axes), [aval, bval])
axes = (axes[1], axes[0])
c = tensordot(btens, atens, axes)
f6 = inplace_func([btens, atens], c)
utt.assert_allclose(numpy.tensordot(bval, aval, axes),
f6(bval, aval))
utt.verify_grad(self.TensorDot(axes), [bval, aval])
def test_raise_error(self):
amat = matrix()
bmat = matrix()
bvec = vector()
# Test invalid length for axes
self.assertRaises(ValueError, tensordot, amat, bmat, (0, 1, 2))
# Test axes of uneven length
self.assertRaises(ValueError, tensordot, amat, bmat, ((0, 1), (0)))
# Test invalid len(axes) given inputs are matrices
self.assertRaises(ValueError, tensordot, amat, bmat, ((0, 1, 2), (0, 1, 2)))
# Test invalid axes[1] given that y is a vector
self.assertRaises(ValueError, tensordot, amat, bvec, (0, 1))
# Test invalid scalar axes given inputs are matrices
self.assertRaises(ValueError, tensordot, amat, bvec, 2)
def test_weird_valid_axes(self):
# Test matrix-matrix
amat = matrix()
bmat = matrix()
for axes in [0,
(1, 0),
[1, 0],
(1, (0, )),
((1, ), 0),
([1], [0]),
([], [])]:
c = tensordot(amat, bmat, axes)
f3 = inplace_func([amat, bmat], c)
aval = rand(4, 7)
bval = rand(7, 9)
self.assertTrue(numpy.allclose(numpy.tensordot(aval, bval, axes),
f3(aval, bval)))
utt.verify_grad(self.TensorDot(axes), [aval, bval])
def test_scalar_axes(self):
# Test matrix-matrix
amat = fmatrix()
bmat = dmatrix()
# We let at float64 to test mix of float32 and float64.
axes = 1
aval = rand(4, 5).astype('float32')
bval = rand(5, 3)
c = tensordot(amat, bmat, axes)
f3 = inplace_func([amat, bmat], c)
self.assertTrue(numpy.allclose(numpy.tensordot(aval, bval, axes),
f3(aval, bval)))
utt.verify_grad(self.TensorDot(axes), [aval, bval])
# Test tensor-tensor
amat = tensor3()
bmat = tensor3()
axes = 2
aval = rand(3, 4, 5)
bval = rand(4, 5, 3)
c = tensordot(amat, bmat, axes)
f3 = inplace_func([amat, bmat], c)
self.assertTrue(numpy.allclose(numpy.tensordot(aval, bval, axes),
f3(aval, bval)))
utt.verify_grad(self.TensorDot(axes), [aval, bval])
def test_scalar0(self):
# Test tensor-tensor
amat = matrix()
bmat = matrix()
axes = 0
aval = rand(4, 5)
bval = rand(5, 4)
c = tensordot(amat, bmat, axes)
f3 = inplace_func([amat, bmat], c)
self.assertTrue(numpy.allclose(numpy.tensordot(aval, bval, axes),
f3(aval, bval)))
utt.verify_grad(self.TensorDot(axes), [aval, bval])
def test_broadcastable1(self):
x = TensorType(dtype=floatX, broadcastable=(True, False, False))('x')
y = tensor3('y')
z = tensordot(x, y)
assert z.broadcastable == (True, False)
f = inplace_func([x, y], z)
xv = rand(1, 3, 4)
yv = rand(3, 4, 5)
zv = f(xv, yv)
self.assertTrue(numpy.allclose(numpy.tensordot(xv, yv), zv))
def test_broadcastable2(self):
x = TensorType(dtype=floatX, broadcastable=(True, False, False))('x')
y = tensor3('y')
axes = [[2, 1], [0, 1]]
z = tensordot(x, y, axes=axes)
assert z.broadcastable == (True, False)
f = inplace_func([x, y], z)
xv = rand(1, 3, 4)
yv = rand(4, 3, 5)
zv = f(xv, yv)
self.assertTrue(numpy.allclose(numpy.tensordot(xv, yv, axes=axes), zv))
def test_smallest_stack():
sx, sy = dscalar(), dscalar()
rval = inplace_func([sx, sy], stack([sx, sy]))(-4.0, -2.0)
assert type(rval) == numpy.ndarray
assert [-4, -2] == list(rval)
def test_smallest():
x = dvector()
y = dvector()
z = dvector()
f1 = inplace_func([x], smallest(x))
assert numpy.all([1, 2, 3] == f1([1, 2, 3]))
f3 = inplace_func([x, y, z], smallest(x, y, z))
assert numpy.all([1, 2, 3] == f3([1, 3, 9], [7, 7, 7], [8, 2, 3]))
sx, sy = dscalar(), dscalar()
assert -4 == inplace_func([sx, sy], smallest(sx, sy))(-4.0, -2.0)
def test_reshape_member_fn():
x = dmatrix()
y = x.reshape((4, 5, 6))
assert y.owner.op == Reshape(3)
def test_var():
a = Tensor(dtype='float64', broadcastable=[False, False, False])()
f = function([a], var(a))
a_val = numpy.arange(60).reshape(3, 4, 5)
assert numpy.allclose(numpy.var(a_val), f(a_val))
f = function([a], var(a, axis=0))
assert numpy.allclose(numpy.var(a_val, axis=0), f(a_val))
f = function([a], var(a, axis=1))
assert numpy.allclose(numpy.var(a_val, axis=1), f(a_val))
f = function([a], var(a, axis=2))
assert numpy.allclose(numpy.var(a_val, axis=2), f(a_val))
f = function([a], var(a, axis=0, ddof=0))
assert numpy.allclose(numpy.var(a_val, axis=0, ddof=0), f(a_val))
f = function([a], var(a, axis=1, ddof=1))
assert numpy.allclose(numpy.var(a_val, axis=1, ddof=1), f(a_val))
f = function([a], var(a, axis=2, ddof=1))
assert numpy.allclose(numpy.var(a_val, axis=2, ddof=1), f(a_val))
f = function([a], var(a, ddof=0, corrected=True))
mean_a = numpy.mean(a_val)
centered_a = a_val - mean_a
v = numpy.mean(centered_a ** 2)
error = (numpy.mean(centered_a)) ** 2
v = v - error
assert numpy.allclose(v, f(a_val))
f = function([a], var(a, axis=2, ddof=1, corrected=True))
mean_a = numpy.mean(a_val, axis=2, keepdims=True)
centered_a = a_val - mean_a
v = numpy.var(a_val, axis=2, ddof=1)
shp_inp = numpy.shape(a_val)
shp = shp_inp - numpy.array(1)
error = (numpy.sum(centered_a, axis=2)) ** 2
error = numpy.true_divide(error, shp[1] * shp_inp[1])
v = v - error
assert numpy.allclose(v, f(a_val))
class T_sum(unittest.TestCase):
def test_sum_overflow(self):
"""Ensure that overflow errors are a little bit harder to get"""
a = Tensor(dtype='int8', broadcastable=[False])()
f = function([a], sum(a))
assert f([1] * 300) == 300
def test_list(self):
ll = [theano.shared(0.), theano.shared(2.)]
tensor.sum(ll).eval() == 2
@dec.skipif(
isinstance(get_default_mode(), theano.compile.debugmode.DebugMode),
("This test fails in DEBUG_MODE, but the generated code is OK. "
"It is actually a problem of DEBUG_MODE, see #626."))
def test_default():
x, y = scalars('xy')
z = default(x, y)
f = function([x, y], z)
assert f(1, 2) == 1
assert f(None, 2) == 2
assert f(1, None) == 1
@dec.skipif(
isinstance(get_default_mode(), theano.compile.debugmode.DebugMode),
("This test fails in DEBUG_MODE, but the generated code is OK. "
"It is actually a problem of DEBUG_MODE, see #626."))
def test_default_state():
x, y = scalars('xy')
# print config.floatX
# print x.type
# print y.type
z = default(x, 3.8)
new_x = y + z
f = function([y, compile.In(x, update=new_x, value=12.0)], new_x)
assert f(3) == 15
f['x'] = None
assert numpy.allclose(f(1), 4.8)
assert numpy.allclose(f(numpy.asarray(2.2, dtype=config.floatX)), 7)
def test_autocast():
backup_config = config.cast_policy
# Call test functions for all possible values of `config.cast_policy`.
for autocast_cfg in (
'custom',
#'numpy', # Commented out until it is implemented properly.
'numpy+floatX',
):
config.cast_policy = autocast_cfg
try:
eval('_test_autocast_' + autocast_cfg.replace('+', '_'))()
finally:
config.cast_policy = backup_config
def _test_autocast_custom():
"""Called from `test_autocast`."""
assert config.cast_policy == 'custom'
orig_autocast = autocast_float.dtypes
# Test that autocast_float_as sets the autocast dtype correctly
with autocast_float_as('float32'):
assert autocast_float.dtypes == ('float32',)
assert autocast_float.dtypes == orig_autocast
with autocast_float_as('float64'):
assert autocast_float.dtypes == ('float64',)
assert autocast_float.dtypes == orig_autocast
# Test that we can set it back to something, and nest it
with autocast_float_as('float32'):
assert autocast_float.dtypes == ('float32',)
with autocast_float_as('float64'):
assert autocast_float.dtypes == ('float64',)
assert autocast_float.dtypes == ('float32',)
assert autocast_float.dtypes == orig_autocast
# Test that the autocasting dtype is used correctly in expression-building
with autocast_float_as('float32'):
assert (dvector() + 1.1).dtype == 'float64'
assert (fvector() + 1.1).dtype == 'float32'
assert (fvector() + theano._asarray(1.1, dtype='float64')).dtype == \
'float64'
assert (fvector() + theano._asarray(1.1, dtype='float32')).dtype == \
'float32'
assert (dvector() + 1).dtype == 'float64'
assert (fvector() + 1).dtype == 'float32'
# Test that the autocasting dtype is used correctly in expression-building
with autocast_float_as('float64'):
assert (dvector() + 1.1).dtype == 'float64'
assert (fvector() + 1.1).dtype == 'float64'
assert (fvector() + 1.0).dtype == 'float64'
assert (fvector() + theano._asarray(1.1, dtype='float64')).dtype == \
'float64'
assert (fvector() + theano._asarray(1.1, dtype='float32')).dtype == \
'float32'
assert (dvector() + 1).dtype == 'float64'
assert (fvector() + 1).dtype == 'float32'
# Test that the autocasting dtype is used correctly in expression-building
with autocast_float_as('float32', 'float64'):
assert (dvector() + 1.1).dtype == 'float64'
assert (fvector() + 1.1).dtype == theano.config.floatX
assert (fvector() + 1.0).dtype == 'float32'
assert (dvector() + numpy.float32(1.1)).dtype == 'float64'
assert (dvector() + numpy.float64(1.1)).dtype == 'float64'
assert (dvector() + numpy.float(1.1)).dtype == 'float64'
assert (fvector() + numpy.float32(1.1)).dtype == 'float32'
assert (fvector() + numpy.float64(1.1)).dtype == 'float64'
assert (fvector() + numpy.float(1.1)).dtype == theano.config.floatX
assert (lvector() + numpy.int64(1)).dtype == 'int64'
assert (lvector() + numpy.int32(1)).dtype == 'int64'
assert (lvector() + numpy.int16(1)).dtype == 'int64'
assert (lvector() + numpy.int8(1)).dtype == 'int64'
assert (ivector() + numpy.int8(1)).dtype == 'int32'
assert (wvector() + numpy.int8(1)).dtype == 'int16'
assert (bvector() + numpy.int8(1)).dtype == 'int8'
with autocast_float_as('float64'):
assert (fvector() + 1.0).dtype == 'float64'
def _test_autocast_numpy():
"""Called from `test_autocast`."""
assert config.cast_policy == 'numpy'
# Go through some typical scalar values.
def ok(z):
assert tensor.constant(z).dtype == numpy.asarray(z).dtype
for x in ([2 ** i for i in xrange(63)] +
[0, L(0), L(1), L(2 ** 63 - 1)] +
[0., 1., 1.1, 1.5]):
n_x = numpy.asarray(x)
# Make sure the data type is the same as the one found by numpy.
ok(x)
ok(-x)
ok(x - 1)
ok(-x + 1)
ok(n_x)
def _test_autocast_numpy_floatX():
"""Called from `test_autocast`."""
assert config.cast_policy == 'numpy+floatX'
backup_floatX = config.floatX
def ok(z, floatX):
if (isinstance(z, float) and
floatX == 'float32' and
not hasattr(z, 'dtype')):
# Special case where we use 'float32' instead of 'float64'.
assert tensor.constant(z).dtype == 'float32'
else:
assert tensor.constant(z).dtype == numpy.asarray(z).dtype
try:
# Test with various values of `config.floatX`.
for floatX in ('float32', 'float64'):
config.floatX = floatX
# Go through some typical scalar values.
# We only consider 'int' and 'long' Python values that can fit
# into int64, as that is the maximal integer type that Theano
# supports, and that is the maximal type in Python indexing.
for x in ([2 ** i - 1 for i in xrange(64)] +
[0, L(0), L(1), L(2 ** 63 - 1)] +
[0., 1., 1.1, 1.5]):
ok(x, floatX)
ok(-x, floatX)
ok(x - 1, floatX)
ok(-x + 1, floatX)
ok(numpy.asarray(x), floatX)
ok(numpy.float64(x), floatX)
finally:
config.floatX = backup_floatX
class test_arithmetic_cast(unittest.TestCase):
"""
Test output types of basic arithmeric operations (* / + - //).
We only test the behavior for `config.cast_policy` set to either 'numpy' or
'numpy+floatX': the 'custom' behavior is (at least partially) tested in
`_test_autocast_custom`.
"""
def test_arithmetic_cast(self):
backup_config = config.cast_policy
dtypes = get_numeric_types(with_complex=True)
# Here:
# scalar == scalar stored as a 0d array
# array == 1d array
# i_scalar == scalar type used internally by Theano
theano_scalar = lambda dtype: tensor.scalar(dtype=str(dtype))
numpy_scalar = lambda dtype: numpy.array(1, dtype=dtype)
theano_array = lambda dtype: tensor.vector(dtype=str(dtype))
numpy_array = lambda dtype: numpy.array([1], dtype=dtype)
theano_i_scalar = lambda dtype: theano.scalar.Scalar(str(dtype))()
numpy_i_scalar = numpy_scalar
if config.int_division == 'int':
# Avoid deprecation warning during tests.
warnings.filterwarnings('ignore', message='Division of two integer',
category=DeprecationWarning)
try:
for cfg in ('numpy+floatX', ): # Used to test 'numpy' as well.
config.cast_policy = cfg
for op in (operator.add, operator.sub, operator.mul,
operator_div, operator.floordiv):
for a_type in dtypes:
for b_type in dtypes:
# Note that we do not test division between
# integers if it is forbidden.
# Theano deals with integer division in its own
# special way (depending on `config.int_division`).
is_int_division = (
op is operator_div and
a_type in tensor.discrete_dtypes and
b_type in tensor.discrete_dtypes)
# We will test all meaningful combinations of
# scalar and array operations.
for combo in (
('scalar', 'scalar'),
('array', 'array'),
('scalar', 'array'),
('array', 'scalar'),
('i_scalar', 'i_scalar'),
):
theano_args = list(map(eval,
['theano_%s' % c for c in combo]))
numpy_args = list(map(eval,
['numpy_%s' % c for c in combo]))
try:
theano_dtype = op(
theano_args[0](a_type),
theano_args[1](b_type)).type.dtype
# Should have crashed if it is an integer
# division and `config.int_division` does
# not allow it.
assert not (is_int_division and
config.int_division == 'raise')
except theano.scalar.IntegerDivisionError:
assert (is_int_division and
config.int_division == 'raise')
# This is the expected behavior.
continue
# For numpy we have a problem:
# http://projects.scipy.org/numpy/ticket/1827
# As a result we only consider the highest data
# type that numpy may return.
numpy_dtypes = [
op(numpy_args[0](a_type),
numpy_args[1](b_type)).dtype,
op(numpy_args[1](b_type),
numpy_args[0](a_type)).dtype]
numpy_dtype = theano.scalar.upcast(
*list(map(str, numpy_dtypes)))
if numpy_dtype == theano_dtype:
# Same data type found, all is good!
continue
if (cfg == 'numpy+floatX' and
config.floatX == 'float32' and
a_type != 'float64' and
b_type != 'float64' and
numpy_dtype == 'float64'):
# We should keep float32.
assert theano_dtype == 'float32'
continue
if 'array' in combo and 'scalar' in combo:
# For mixed scalar / array operations,
# Theano may differ from numpy as it does
# not try to prevent the scalar from
# upcasting the array.
array_type, scalar_type = (
(a_type, b_type)[
list(combo).index(arg)]
for arg in ('array', 'scalar'))
up_type = theano.scalar.upcast(array_type,
scalar_type)
if (
# The two data types are different.
scalar_type != array_type and
# The array type is not enough to hold
# the scalar type as well.
array_type != up_type and
# Theano upcasted the result array.
theano_dtype == up_type and
# But Numpy kept its original type.
array_type == numpy_dtype):
# Then we accept this difference in
# behavior.
continue
if (is_int_division and
config.int_division == 'floatX'):
assert theano_dtype == config.floatX
continue
if (cfg == 'numpy+floatX' and
a_type == 'complex128' and
(b_type == 'float32' or
b_type == 'float16') and
combo == ('scalar', 'array') and
theano_dtype == 'complex128' and
numpy_dtype == 'complex64'):
# In numpy 1.6.x adding a complex128 with
# a float32 may result in a complex64. As
# of 1.9.2. this is still the case so it is
# probably by design
raise SkipTest("Known issue with"
"numpy see #761")
# In any other situation: something wrong is
# going on!
assert False
finally:
config.cast_policy = backup_config
if config.int_division == 'int':
# Restore default deprecation warning behavior.
warnings.filterwarnings(
'default',
message='Division of two integer',
category=DeprecationWarning)
class T_long_tensor(unittest.TestCase):
def test_fit_int64(self):
for exp in xrange(theano.configdefaults.python_int_bitwidth()):
val = L(2 ** exp - 1)
scalar_ct = constant(val)
assert scalar_ct.dtype.startswith('int'), (exp, val, scalar_ct.dtype)
assert scalar_ct.value == val
vector_ct = constant([val, val])
assert vector_ct.dtype == 'int64'
assert numpy.all(vector_ct.value == val)
matrix_ct = constant([[val, val]])
assert matrix_ct.dtype == 'int64'
assert numpy.all(matrix_ct.value == val)
def test_too_big(self):
val = L(2 ** 63)
# NumPy 1.7 this will raise an exception
# NumPy 1.7.1 this will work
try:
cst = constant(val)
assert cst.value == val
assert cst.dtype == "uint64"
except OverflowError:
pass
try:
cst = constant([val, val])
assert cst.value[0] == val
assert cst.value[1] == val
assert cst.value.size == 2
assert cst.dtype == "uint64"
except TypeError:
pass
try:
cst = constant([[val, val]])
assert cst.value[0, 0] == val
assert cst.value[0, 1] == val
assert cst.value.size == 2
assert cst.dtype == "uint64"
except TypeError:
pass
val = L(2 ** 64)
# This fail for all NumPy version.
self.assertRaises(Exception, constant, val)
self.assertRaises(Exception, constant, [val, val])
self.assertRaises(Exception, constant, [[val, val]])
class test_broadcast(unittest.TestCase):
def test_broadcast_bigdim(self):
def f():
x = matrix()
addbroadcast(x, 2)
self.assertRaises(ValueError, f)
def test_unbroadcast_addbroadcast(self):
"""
test that the unbroadcast fct don't insert not needed broadcast
and fuse consecutive Rebroadcast op
"""
x = matrix()
assert unbroadcast(x, 0) is x
assert unbroadcast(x, 1) is x
assert unbroadcast(x, 1, 0) is x
assert unbroadcast(x, 0, 1) is x
assert addbroadcast(x, 0) is not x
assert addbroadcast(x, 1) is not x
assert addbroadcast(x, 1, 0).owner.inputs[0] is x
assert unbroadcast(addbroadcast(x, 0), 0) is x
assert addbroadcast(unbroadcast(x, 0), 0) is not x
x = row()
assert unbroadcast(x, 0) is not x
assert unbroadcast(x, 1) is x
assert unbroadcast(x, 1, 0) is not x
assert unbroadcast(x, 0, 1) is not x
assert addbroadcast(x, 0) is x
assert addbroadcast(x, 1).owner.inputs[0] is x
assert addbroadcast(x, 1, 0).owner.inputs[0] is x
assert addbroadcast(x, 0, 1).owner.inputs[0] is x
assert unbroadcast(addbroadcast(x, 1), 1) is x
assert addbroadcast(unbroadcast(x, 1), 1) is not x
# The first broadcast is remove the broadcast, so the second
# should not make one
assert unbroadcast(unbroadcast(x, 0), 0).owner.inputs[0] is x
# Test that consecutive Rebroadcast op are fused
x = TensorType(dtype='float64', broadcastable=(True, True))()
assert unbroadcast(unbroadcast(x, 1), 0).owner.inputs[0] is x
assert addbroadcast(unbroadcast(x, 1), 0).owner.inputs[0] is x
assert addbroadcast(unbroadcast(x, 0), 0) is x
def test_patternbroadcast(self):
# Test that patternbroadcast with an empty broadcasting pattern works
x = scalar('x')
m = tensor.matrix('m')
s = patternbroadcast(m, x.broadcastable)
assert s is m
x2 = patternbroadcast(x, x.broadcastable)
assert x2 is x
def test_infer_shape(self):
x = matrix()
y = addbroadcast(x, 0)
f = theano.function([x], y.shape)
assert (f(numpy.zeros((1, 5), dtype=config.floatX)) == [1, 5]).all()
topo = f.maker.fgraph.toposort()
if theano.config.mode != 'FAST_COMPILE':
assert len(topo) == 2
assert isinstance(topo[0].op, opt.Shape_i)
assert isinstance(topo[1].op, opt.MakeVector)
x = matrix()
y = unbroadcast(x, 0)
f = theano.function([x], y.shape)
assert (f(numpy.zeros((2, 5), dtype=config.floatX)) == [2, 5]).all()
topo = f.maker.fgraph.toposort()
if theano.config.mode != 'FAST_COMPILE':
assert len(topo) == 3
assert isinstance(topo[0].op, opt.Shape_i)
assert isinstance(topo[1].op, opt.Shape_i)
assert isinstance(topo[2].op, opt.MakeVector)
x = row()
y = unbroadcast(x, 0)
f = theano.function([x], y.shape)
assert (f(numpy.zeros((1, 5), dtype=config.floatX)) == [1, 5]).all()
topo = f.maker.fgraph.toposort()
if theano.config.mode != 'FAST_COMPILE':
assert len(topo) == 2
assert isinstance(topo[0].op, opt.Shape_i)
assert isinstance(topo[1].op, opt.MakeVector)
def test_len():
for shape in [(5,), (3, 4), (7, 4, 6)]:
x = tensor.tensor(dtype='floatX', broadcastable=(False,) * len(shape))
try:
len(x)
assert False, "Expected an error"
except TypeError:
pass
def test_mod():
"""
We add this test as not all language and C implementation give the same
sign to the result. This check that the c_code of `Mod` is implemented
as Python. That is what we want.
"""
x, y = fscalars('xy')
fn = gof.DualLinker().accept(
gof.FunctionGraph([x, y], [x % y])).make_function()
for a, b in ((0, 1), (1, 1), (0, -1), (1, -1), (-1, -1),
(1, 2), (-1, 2), (1, -2), (-1, -2),
(5, 3), (-5, 3), (5, -3), (-5, -3)
):
assert fn(a, b) == a % b, (a,)
def test_divmod():
"""
Confirm that divmod is equivalent to the python version.
"""
x, y = fscalars('xy')
d, r = divmod(x, y)
fn = gof.DualLinker().accept(
gof.FunctionGraph([x, y], [d, r])).make_function()
for a, b in ((0, 1), (1, 1), (0, -1), (1, -1), (-1, -1),
(1, 2), (-1, 2), (1, -2), (-1, -2),
(5, 3), (-5, 3), (5, -3), (-5, -3)
):
d_v, r_v = fn(a, b)
d_vp, r_vp = divmod(a, b)
assert d_v == d_vp and r_v == r_vp, (a,)
def test_mod_compile():
"""
This test generate an Elemwise of Composite as:
Elemwise{
Composite{
Composite{
Composite{
Composite{mod,EQ},
Switch},
mul},
add}}
The c_code generated is not compiling as of 30 June 2010. I fix the
compilation in the same commit.
"""
x = tensor.vector()
y = tensor.vector()
shape = x.shape
out = tensor.switch(tensor.eq(3 % x.shape[0], 0), y, y[:-1])
f = theano.function([x, y], out)
def test_unalign():
if config.floatX == 'float64':
dtype = "b1,f8"
else:
dtype = "b1,f4"
a = numpy.empty(10000, dtype=dtype)['f1']
b = numpy.empty(10000, dtype=dtype)['f1']
assert not a.flags.aligned
assert not b.flags.aligned
a[:] = rand(len(a))
b[:] = rand(len(b))
out_numpy = 2 * a + 3 * b
av, bv = tensor.vectors('ab')
f = theano.function([av, bv], 2 * av + 3 * bv)
f.maker.fgraph.toposort()
try:
out_theano = f(a, b)
assert not a.flags.aligned
assert not b.flags.aligned
assert numpy.allclose(out_numpy, out_theano)
assert False
except TypeError as e:
pass
a = numpy.empty((), dtype=dtype)['f1']
b = numpy.empty((), dtype=dtype)['f1']
assert not a.flags.aligned
assert not b.flags.aligned
out_numpy = 2 * a + 3 * b
av, bv = tensor.scalars('ab')
f = theano.function([av, bv], 2 * av + 3 * bv)
f.maker.fgraph.toposort()
try:
out_theano = f(a, b)
assert not a.flags.aligned
assert not b.flags.aligned
assert numpy.allclose(out_numpy, out_theano)
assert False
except TypeError as e:
pass
def test_dimshuffle_duplicate():
x = tensor.vector()
success = False
try:
y = tensor.DimShuffle((False, ), (0, 0))(x)
except ValueError as e:
assert str(e).find("may not appear twice") != -1
success = True
assert success
class T_get_scalar_constant_value(unittest.TestCase):
def test_get_scalar_constant_value(self):
a = tensor.stack([1, 2, 3])
assert get_scalar_constant_value(a[0]) == 1
assert get_scalar_constant_value(a[1]) == 2
assert get_scalar_constant_value(a[2]) == 3
b = tensor.iscalar()
a = tensor.stack([b, 2, 3])
self.assertRaises(tensor.basic.NotScalarConstantError, get_scalar_constant_value, a[0])
assert get_scalar_constant_value(a[1]) == 2
assert get_scalar_constant_value(a[2]) == 3
# For now get_scalar_constant_value goes through only MakeVector and Join of
# scalars.
v = tensor.ivector()
a = tensor.stack([v, [2], [3]])
self.assertRaises(tensor.NotScalarConstantError, get_scalar_constant_value, a[0])
self.assertRaises(tensor.NotScalarConstantError, get_scalar_constant_value, a[1])
self.assertRaises(tensor.NotScalarConstantError, get_scalar_constant_value, a[2])
# Test the case SubTensor(Shape(v)) when the dimensions
# is broadcastable.
v = tensor.row()
assert get_scalar_constant_value(v.shape[0]) == 1
def test_subtensor_of_constant(self):
c = constant(rand(5))
for i in range(c.value.shape[0]):
assert get_scalar_constant_value(c[i]) == c.value[i]
c = constant(rand(5, 5))
for i in range(c.value.shape[0]):
for j in range(c.value.shape[1]):
assert get_scalar_constant_value(c[i, j]) == c.value[i, j]
def test_numpy_array(self):
# Regression test for crash when called on a numpy array.
assert get_scalar_constant_value(numpy.array(3)) == 3
self.assertRaises(
tensor.NotScalarConstantError,
get_scalar_constant_value,
numpy.array([0, 1]))
self.assertRaises(
tensor.EmptyConstantError,
get_scalar_constant_value,
numpy.array([]))
def test_make_vector(self):
mv = opt.make_vector(1, 2, 3)
self.assertRaises(
tensor.NotScalarConstantError,
get_scalar_constant_value,
mv)
assert get_scalar_constant_value(mv[0]) == 1
assert get_scalar_constant_value(mv[1]) == 2
assert get_scalar_constant_value(mv[2]) == 3
assert get_scalar_constant_value(mv[numpy.int32(0)]) == 1
assert get_scalar_constant_value(mv[numpy.int64(1)]) == 2
assert get_scalar_constant_value(mv[numpy.uint(2)]) == 3
t = theano.scalar.Scalar('int64')
self.assertRaises(
tensor.NotScalarConstantError,
get_scalar_constant_value,
mv[t()])
def test_shape_i(self):
c = theano.tensor.constant(numpy.random.rand(3, 4))
s = opt.Shape_i(0)(c)
assert get_scalar_constant_value(s) == 3
s = opt.Shape_i(1)(c)
assert get_scalar_constant_value(s) == 4
d = theano.shared(numpy.random.randn(1,1), broadcastable=(True, True))
f = theano.tensor.basic.ScalarFromTensor()(opt.Shape_i(0)(d))
assert get_scalar_constant_value(f) == 1
def test_elemwise(self):
# We test only for a few elemwise, the list of all supported
# elemwise are in the fct.
c = theano.tensor.constant(numpy.random.rand())
s = c + 1
assert numpy.allclose(get_scalar_constant_value(s), c.data + 1)
s = c - 1
assert numpy.allclose(get_scalar_constant_value(s), c.data - 1)
s = c * 1.2
assert numpy.allclose(get_scalar_constant_value(s), c.data * 1.2)
s = c < 0.5
assert numpy.allclose(get_scalar_constant_value(s), int(c.data < 0.5))
s = tensor.second(c, .4)
assert numpy.allclose(get_scalar_constant_value(s), .4)
def test_second(self):
# Second should apply when the value is constant but not the shape
c = theano.tensor.constant(numpy.random.rand())
shp = theano.tensor.vector()
s = theano.tensor.second(shp, c)
assert get_scalar_constant_value(s) == c.data
def test_copy(self):
# Make sure we do not return the internal storage of a constant,
# so we cannot change the value of a constant by mistake.
c = theano.tensor.constant(3)
d = extract_constant(c)
d += 1
e = extract_constant(c)
self.assertTrue(e == 3, (c, d, e))
class T_as_tensor_variable(unittest.TestCase):
"""
We test that ticket #649 stay fixed.
We should not allow as_tensor_variable to accept True or False
But it should upcast an ndarray of bool to uint8
"""
def test_bool(self):
self.assertRaises(TypeError, as_tensor_variable, True)
self.assertRaises(TypeError, as_tensor_variable, False)
def test_ndarray_bool(self):
ten = as_tensor_variable(numpy.array([True, False, False, True, True]))
assert ten.type.dtype == 'uint8'
def test_memmap(self):
inp = numpy.random.rand(4, 3)
f, fname = mkstemp()
new_inp = numpy.memmap(fname, dtype=inp.dtype,
mode='w+', shape=inp.shape)
new_inp[...] = inp
x = as_tensor_variable(new_inp)
def test_empty_dtype(self):
old = theano.config.floatX
for dtype in ['float16', 'float32', 'float64']:
try:
theano.config.floatX = dtype
assert theano.tensor.as_tensor_variable(()).dtype == dtype
assert theano.tensor.as_tensor_variable([]).dtype == dtype
finally:
theano.config.floatX = old
class test_complex_mod(unittest.TestCase):
"""Make sure % fails on complex numbers."""
def test_fail(self):
x = vector(dtype='complex64')
try:
x % 5
assert False
except theano.scalar.ComplexError:
pass
class test_size(unittest.TestCase):
"""
Ensure the `size` attribute of tensors behaves as in numpy.
"""
def test_matrix(self):
x = tensor.matrix()
y = numpy.zeros((5, 7), dtype=config.floatX)
assert y.size == function([x], x.size)(y)
def test_vector(self):
x = tensor.vector()
y = numpy.zeros(7, dtype=config.floatX)
assert y.size == function([x], x.size)(y)
def test_scalar(self):
x = tensor.scalar()
y = numpy.array(7, dtype=config.floatX)
assert y.size == function([x], x.size)(y)
def test_shared(self):
# NB: we also test higher order tensors at the same time.
y = numpy.zeros((1, 2, 3, 4), dtype=config.floatX)
x = theano.shared(y)
assert y.size == function([], x.size)()
class test_numpy_assumptions(unittest.TestCase):
"""
Verify that some assumptions Theano makes on Numpy's behavior still hold.
"""
def test_ndarray_copy(self):
"""
A copy or deepcopy of the ndarray type should not create a new object.
This is because Theano makes some comparisons of the form:
if type(x) is numpy.ndarray
"""
assert copy(numpy.ndarray) is numpy.ndarray
assert deepcopy(numpy.ndarray) is numpy.ndarray
def test_dtype_equality(self):
"""
Ensure dtype string comparisons are consistent.
Theano often uses string representations of dtypes (e.g. 'float32'). We
need to make sure that comparing the string representations is the same
as comparing the dtype objects themselves.
"""
dtypes = get_numeric_types(with_complex=True)
# Perform all pairwise comparisons of dtypes, making sure comparing
# their string representation yields the same result.
for dtype1_idx, dtype1 in enumerate(dtypes):
for dtype2 in dtypes[dtype1_idx + 1:]:
assert (dtype1 == dtype2) == (str(dtype1) == str(dtype2))
def test_transpose():
x1 = tensor.dvector('x1')
x2 = tensor.dmatrix('x2')
x3 = tensor.dtensor3('x3')
x1v = numpy.arange(24)
x2v = numpy.arange(24).reshape(2, 12)
x3v = numpy.arange(24).reshape(2, 3, 4)
f = theano.function([x1, x2, x3], [
tensor.transpose(x1),
tensor.transpose(x2),
tensor.transpose(x3),
x1.transpose(),
x2.transpose(),
x3.transpose(),
x2.transpose(0, 1),
x3.transpose((0, 2, 1)),
tensor.transpose(x2, [0, 1]),
tensor.transpose(x3, [0, 2, 1]),
])
t1, t2, t3, t1b, t2b, t3b, t2c, t3c, t2d, t3d = f(x1v, x2v, x3v)
assert t1.shape == numpy.transpose(x1v).shape
assert t2.shape == numpy.transpose(x2v).shape
assert t3.shape == numpy.transpose(x3v).shape
assert numpy.all(t1 == numpy.transpose(x1v))
assert numpy.all(t2 == numpy.transpose(x2v))
assert numpy.all(t3 == numpy.transpose(x3v))
assert numpy.all(t1b == x1v.transpose())
assert numpy.all(t2b == x2v.transpose())
assert numpy.all(t3b == x3v.transpose())
assert t2c.shape == (2, 12)
assert t3c.shape == (2, 4, 3)
assert numpy.all(t2c == x2v.transpose([0, 1]))
assert numpy.all(t3c == x3v.transpose([0, 2, 1]))
assert t2d.shape == (2, 12)
assert t3d.shape == (2, 4, 3)
assert numpy.all(t2d == numpy.transpose(x2v, [0, 1]))
assert numpy.all(t3d == numpy.transpose(x3v, [0, 2, 1]))
# Check that we create a name.
assert tensor.transpose(x1).name == 'x1.T'
assert tensor.transpose(x2).name == 'x2.T'
assert tensor.transpose(x3).name == 'x3.T'
assert tensor.transpose(tensor.dmatrix()).name is None
def test_stacklists():
a, b, c, d = map(scalar, 'abcd')
X = stacklists([[a, b],
[c, d]])
f = function([a, b, c, d], X)
result = f(1, 2, 3, 4)
assert result.shape == (2, 2)
assert numpy.allclose(f(1, 2, 3, 4), numpy.asarray([[1, 2], [3, 4]]))
X = stacklists([a, b, c, d])
f = function([a, b, c, d], X)
result = f(1, 2, 3, 4)
assert result.shape == (4,)
assert numpy.allclose(f(1, 2, 3, 4), numpy.asarray([[1, 2, 3, 4]]))
X = stacklists([[[a], [b]], [[c], [d]]])
f = function([a, b, c, d], X)
result = f(1, 2, 3, 4)
assert result.shape == (2, 2, 1)
a, b, c, d = [matrix(a) for a in 'abcd']
X = stacklists([[a, b],
[c, d]])
f = function([a, b, c, d], X)
x = numpy.ones((4, 4), 'float32')
assert f(x, x, x, x).shape == (2, 2, 4, 4)
class TestSpecifyShape(unittest.TestCase):
mode = None
input_type = TensorType
def shortDescription(self):
return None
def test_bad_shape(self):
""" Test that at run time we raise an exception when the shape
is not the one specified"""
specify_shape = SpecifyShape()
x = vector()
xval = numpy.random.rand(2).astype(floatX)
f = theano.function([x], specify_shape(x, [2]), mode=self.mode)
f(xval)
xval = numpy.random.rand(3).astype(floatX)
self.assertRaises(AssertionError, f, xval)
theano.printing.debugprint(f)
assert isinstance([n for n in f.maker.fgraph.toposort()
if isinstance(n.op, SpecifyShape)][0].inputs[0].type,
self.input_type)
x = matrix()
xval = numpy.random.rand(2, 3).astype(floatX)
f = theano.function([x], specify_shape(x, [2, 3]), mode=self.mode)
assert isinstance([n for n in f.maker.fgraph.toposort()
if isinstance(n.op, SpecifyShape)][0].inputs[0].type,
self.input_type)
f(xval)
for shape in [(1, 3), (2, 2), (5, 5)]:
xval = numpy.random.rand(*shape).astype(floatX)
self.assertRaises(AssertionError, f, xval)
def test_bad_number_of_shape(self):
""" Test that the number of dimensions provided is good"""
specify_shape = SpecifyShape()
x = vector()
shape_vec = ivector()
xval = numpy.random.rand(2).astype(floatX)
self.assertRaises(AssertionError, specify_shape, x, [])
self.assertRaises(AssertionError, specify_shape, x, [2, 2])
f = theano.function([x, shape_vec], specify_shape(x, shape_vec),
mode=self.mode)
assert isinstance([n for n in f.maker.fgraph.toposort()
if isinstance(n.op, SpecifyShape)][0].inputs[0].type,
self.input_type)
self.assertRaises(AssertionError, f, xval, [])
self.assertRaises(AssertionError, f, xval, [2, 2])
x = matrix()
xval = numpy.random.rand(2, 3).astype(floatX)
for shape in [(),
(1,),
(2, 3, 4)]:
self.assertRaises(AssertionError, specify_shape, x, shape)
f = theano.function([x, shape_vec], specify_shape(x, shape_vec),
mode=self.mode)
assert isinstance([n for n in f.maker.fgraph.toposort()
if isinstance(n.op, SpecifyShape)][0].inputs[0].type,
self.input_type)
self.assertRaises(AssertionError, f, xval, shape)
class TestInferShape(utt.InferShapeTester):
def test_infer_shape(self):
# Flatten
atens3 = tensor3()
atens3_val = rand(4, 5, 3)
self._compile_and_check([atens3],
[flatten(atens3, 1)],
[atens3_val], Reshape)
for outdim in (3, 2, 1):
self._compile_and_check([atens3],
[flatten(atens3, outdim)],
[atens3_val], Reshape)
amat = matrix()
amat_val = rand(4, 5)
for outdim in (2, 1):
self._compile_and_check([amat],
[flatten(amat, outdim)],
[amat_val], Reshape)
avec = vector()
avec_val = rand(4)
outdim = 1
self._compile_and_check([avec],
[flatten(avec, outdim)],
[avec_val], Reshape,
excluding=['local_useless_reshape'])
# Eye
aiscal = iscalar()
biscal = iscalar()
ciscal = iscalar()
self._compile_and_check([aiscal, biscal, ciscal],
[Eye()(aiscal, biscal, ciscal)],
[4, 4, 0], Eye)
self._compile_and_check([aiscal, biscal, ciscal],
[Eye()(aiscal, biscal, ciscal)],
[4, 5, 0], Eye)
self._compile_and_check([aiscal, biscal, ciscal],
[Eye()(aiscal, biscal, ciscal)],
[3, 5, 0], Eye)
# Tri
aiscal = iscalar()
biscal = iscalar()
ciscal = iscalar()
self._compile_and_check([aiscal, biscal, ciscal],
[Tri()(aiscal, biscal, ciscal)],
[4, 4, 0], Tri)
self._compile_and_check([aiscal, biscal, ciscal],
[Tri()(aiscal, biscal, ciscal)],
[4, 5, 0], Tri)
self._compile_and_check([aiscal, biscal, ciscal],
[Tri()(aiscal, biscal, ciscal)],
[3, 5, 0], Tri)
# Diagonal
atens3 = tensor3()
atens3_val = rand(4, 5, 3)
atens3_diag = Diagonal()(atens3)
self._compile_and_check([atens3], [atens3_diag],
[atens3_val], Diagonal)
atens3_diag = Diagonal(1)(atens3)
self._compile_and_check([atens3], [atens3_diag],
[atens3_val], Diagonal)
atens3_diag = Diagonal(-1)(atens3)
self._compile_and_check([atens3], [atens3_diag],
[atens3_val], Diagonal)
atens3_diag = Diagonal(1, 0, 2)(atens3)
self._compile_and_check([atens3], [atens3_diag],
[atens3_val], Diagonal)
atens3_diag = Diagonal(1, 1, 2)(atens3)
self._compile_and_check([atens3], [atens3_diag],
[atens3_val], Diagonal)
atens3_diag = Diagonal(1, 2, 0)(atens3)
self._compile_and_check([atens3], [atens3_diag],
[atens3_val], Diagonal)
# Diag
advec = dvector()
advec_val = rand(4)
self._compile_and_check([advec], [Diag()(advec)],
[advec_val], Diag)
# Shape
# 'opt.Makevector' precludes optimizer from disentangling
# elements of shape
adtens = tensor3()
adtens_val = rand(4, 5, 3)
self._compile_and_check([adtens],
[Shape()(adtens)],
[adtens_val], (opt.MakeVector, Shape))
# Dot
# vec/vec
advec = dvector()
bdvec = dvector()
advec_val = rand(4)
bdvec_val = rand(4)
self._compile_and_check([advec, bdvec],
[Dot()(advec, bdvec)],
[advec_val, bdvec_val],
(Dot, tensor.blas.Dot22,
tensor.blas.Gemv, tensor.blas_c.CGemv))
# mat/mat
admat = dmatrix()
bdmat = dmatrix()
admat_val = rand(4, 5)
bdmat_val = rand(5, 3)
self._compile_and_check([admat, bdmat],
[Dot()(admat, bdmat)],
[admat_val, bdmat_val],
(Dot, tensor.blas.Dot22))
# vec/mat
bdmat_val = rand(4, 5)
self._compile_and_check([advec, bdmat],
[Dot()(advec, bdmat)],
[advec_val, bdmat_val],
(Dot, tensor.blas.Dot22,
tensor.blas.Gemv, tensor.blas_c.CGemv))
# mat/vec
admat_val = rand(5, 4)
self._compile_and_check([admat, bdvec],
[Dot()(admat, bdvec)],
[admat_val, bdvec_val],
(Dot, tensor.blas.Dot22,
tensor.blas.Gemv, tensor.blas_c.CGemv))
# Split
aivec = ivector()
adtens_val = rand(4, 10, 3)
aivec_val = [2, 5, 3]
for aiscal_val in [1, -2]:
self._compile_and_check(
[adtens, aiscal, aivec],
[Split(3)(adtens, aiscal, aivec)[0]],
[adtens_val, aiscal_val, aivec_val], (Split))
# Join
cdmat = dmatrix()
admat_val = rand(1, 3)
bdmat_val = rand(2, 3)
cdmat_val = rand(4, 3)
for aiscal_val in [0, -2]:
self._compile_and_check(
[aiscal, admat, bdmat, cdmat],
[Join()(aiscal, admat, bdmat, cdmat)],
[aiscal_val, admat_val, bdmat_val, cdmat_val], Join)
admat_val = rand(4, 1)
bdmat_val = rand(4, 3)
cdmat_val = rand(4, 2)
for aiscal_val in [-1, 1]:
self._compile_and_check(
[aiscal, admat, bdmat, cdmat],
[Join()(aiscal, admat, bdmat, cdmat)],
[aiscal_val, admat_val, bdmat_val, cdmat_val], Join)
# PermuteRowElements
abool = True
rng = numpy.random.RandomState(utt.fetch_seed())
advec_val = rand(5)
aivec_val = rng.permutation(5).astype('int32')
self._compile_and_check([advec, aivec],
[PermuteRowElements()(advec, aivec, abool)],
[advec_val, aivec_val], PermuteRowElements)
admat_val = rand(3, 5)
self._compile_and_check([admat, aivec],
[PermuteRowElements()(admat, aivec, abool)],
[admat_val, aivec_val], PermuteRowElements)
adtens3 = dtensor3()
adtens3_val = rand(3, 2, 5)
self._compile_and_check([adtens3, aivec],
[PermuteRowElements()(adtens3, aivec, abool)],
[adtens3_val, aivec_val], PermuteRowElements)
aimat = imatrix()
perma = rng.permutation(5).astype('int32')
permb = rng.permutation(5).astype('int32')
permc = rng.permutation(5).astype('int32')
aimat_val = numpy.vstack((perma, permb, permc))
admat_val = rand(3, 5)
self._compile_and_check([admat, aimat],
[PermuteRowElements()(admat, aimat, abool)],
[admat_val, aimat_val], PermuteRowElements)
aitens3 = itensor3()
perma = rng.permutation(5).astype('int32')
permb = rng.permutation(5).astype('int32')
permc = rng.permutation(5).astype('int32')
bimat_val = numpy.vstack((perma, permb, permc))
aitens3_val = numpy.empty((2, 3, 5), 'int32')
aitens3_val[0, ::, ::] = aimat_val
aitens3_val[1, ::, ::] = bimat_val
self._compile_and_check([admat, aitens3],
[PermuteRowElements()(admat, aitens3, abool)],
[admat_val, aitens3_val], PermuteRowElements)
# ScalarFromTensor
aiscal = iscalar()
self._compile_and_check([aiscal],
[TensorFromScalar()(ScalarFromTensor()(aiscal))],
[45], ScalarFromTensor,
excluding=["local_tensor_scalar_tensor"])
# TensorFromScalar
aiscal = scal.float64()
self._compile_and_check([aiscal],
[TensorFromScalar()(aiscal)],
[4.], TensorFromScalar)
# Rebroadcast
adtens4 = dtensor4()
adict = [(0, False), (1, True), (2, False), (3, True)]
adtens4_val = rand(2, 1, 3, 1)
self._compile_and_check([adtens4],
[Rebroadcast(*adict)(adtens4)],
[adtens4_val], Rebroadcast,
warn=False)
adtens4_bro = TensorType('float64', (True, True, True, False))()
bdict = [(0, True), (1, False), (2, False), (3, False)]
adtens4_bro_val = rand(1, 1, 1, 3)
self._compile_and_check([adtens4_bro],
[Rebroadcast(*bdict)(adtens4_bro)],
[adtens4_bro_val], Rebroadcast)
# Alloc
randint = numpy.random.randint
adscal = dscalar()
aiscal = lscalar()
biscal = lscalar()
ciscal = lscalar()
discal = lscalar()
adscal_val = rand()
aiscal_val = randint(3, 6, size=())
biscal_val = randint(3, 6, size=())
ciscal_val = randint(3, 6, size=())
discal_val = randint(3, 6, size=())
self._compile_and_check([adscal, aiscal, biscal, ciscal, discal],
[Alloc()(adscal, aiscal, biscal, ciscal, discal)],
[adscal_val, aiscal_val, biscal_val,
ciscal_val, discal_val], Alloc)
# MaxAndArgmax,
adtens3_val = rand(4, 5, 3)
self._compile_and_check([adtens3],
MaxAndArgmax()(adtens3, None),
[adtens3_val], MaxAndArgmax)
self._compile_and_check([adtens3],
MaxAndArgmax()(adtens3, 0),
[adtens3_val], MaxAndArgmax)
self._compile_and_check([adtens3],
MaxAndArgmax()(adtens3, 1),
[adtens3_val], MaxAndArgmax)
self._compile_and_check([adtens3],
MaxAndArgmax()(adtens3, 2),
[adtens3_val], MaxAndArgmax)
self._compile_and_check([adtens3],
MaxAndArgmax()(adtens3, [0, 1, 2]),
[adtens3_val], MaxAndArgmax)
# ARange
self._compile_and_check([aiscal, biscal, ciscal],
[ARange('int64')(aiscal, biscal, ciscal)],
[0, 5, 1], ARange)
self._compile_and_check([aiscal, biscal, ciscal],
[ARange('int64')(aiscal, biscal, ciscal)],
[2, 11, 4], ARange)
self._compile_and_check([aiscal, biscal, ciscal],
[ARange('int64')(aiscal, biscal, ciscal)],
[-5, 1, 1], ARange)
self._compile_and_check([aiscal, biscal, ciscal],
[ARange('int64')(aiscal, biscal, ciscal)],
[10, 2, -2], ARange)
self._compile_and_check([aiscal, biscal, ciscal],
[ARange('int64')(aiscal, biscal, ciscal)],
[10, 2, 2], ARange)
self._compile_and_check([aiscal, biscal, ciscal],
[ARange('int64')(aiscal, biscal, ciscal)],
[0, 0, 1], ARange)
# SpecifyShape
aivec_val = [3, 4, 2, 5]
adtens4_val = rand(*aivec_val)
self._compile_and_check([adtens4, aivec],
[SpecifyShape()(adtens4, aivec)],
[adtens4_val, aivec_val], SpecifyShape)
# Mean
adtens3_val = rand(3, 4, 5)
aiscal_val = 2
self._compile_and_check([adtens3],
[Mean(None)(adtens3)],
[adtens3_val], Mean)
self._compile_and_check([adtens3],
[Mean(aiscal_val)(adtens3)],
[adtens3_val], Mean)
# Reshape
# TODO: generalize infer_shape to account for tensor variable
# (non-constant) input shape
admat = dmatrix()
aivec = ivector()
ndim = 1
admat_val = rand(3, 4)
self._compile_and_check([admat],
[Reshape(ndim)(admat, [12])],
[admat_val], Reshape)
self._compile_and_check([admat],
[Reshape(ndim)(admat, [-1])],
[admat_val], Reshape)
ndim = 2
self._compile_and_check([admat],
[Reshape(ndim)(admat, [4, 3])],
[admat_val], Reshape)
self._compile_and_check([admat],
[Reshape(ndim)(admat, [4, -1])],
[admat_val], Reshape)
self._compile_and_check([admat],
[Reshape(ndim)(admat, [3, -1])],
[admat_val], Reshape)
self._compile_and_check([admat],
[Reshape(ndim)(admat, [-1, 3])],
[admat_val], Reshape)
self._compile_and_check([admat],
[Reshape(ndim)(admat, [-1, 4])],
[admat_val], Reshape)
# enable when infer_shape is generalized:
# self._compile_and_check([admat, aivec],
# [Reshape(ndim)(admat, aivec)],
# [admat_val, [4, 3]], Reshape)
#
# self._compile_and_check([admat, aivec],
# [Reshape(ndim)(admat, aivec)],
# [admat_val, [4, -1]], Reshape)
adtens4 = dtensor4()
ndim = 4
adtens4_val = rand(2, 4, 3, 5)
self._compile_and_check([adtens4],
[Reshape(ndim)(adtens4, [1, -1, 10, 4])],
[adtens4_val], Reshape)
self._compile_and_check([adtens4],
[Reshape(ndim)(adtens4, [1, 3, 10, 4])],
[adtens4_val], Reshape)
# enable when infer_shape is generalized:
# self._compile_and_check([adtens4, aivec],
# [Reshape(ndim)(adtens4, aivec)],
# [adtens4_val, [1, -1, 10, 4]], Reshape)
#
# self._compile_and_check([adtens4, aivec],
# [Reshape(ndim)(adtens4, aivec)],
# [adtens4_val, [1, 3, 10, 4]], Reshape)
# Tile op is deprecated so the tile function doesn't use it
# anymore, we'll test here the op directly
advec = dvector()
advec_val = rand(5)
aivec_val = [3]
ndim = 1
self._compile_and_check([advec],
[Tile(ndim)(advec, aivec_val)],
[advec_val], Tile)
admat = dmatrix()
admat_val = rand(2, 4)
aivec_val = [2, 3]
ndim = 2
self._compile_and_check([admat],
[Tile(ndim)(admat, aivec_val)],
[admat_val], Tile)
adtens4 = dtensor4()
adtens4_val = rand(2, 4, 3, 5)
aivec_val = [2, 3, 1, 4]
ndim = 4
self._compile_and_check([adtens4],
[Tile(ndim)(adtens4, aivec_val)],
[adtens4_val], Tile)
class TestTensorInstanceMethods(unittest.TestCase):
def setUp(self):
self.vars = matrices('X', 'Y')
self.vals = [m.astype(floatX) for m in [rand(2, 2), rand(2, 2)]]
def test_argmin(self):
X, _ = self.vars
x, _ = self.vals
assert_array_equal(X.argmin().eval({X: x}), x.argmin())
def test_argmax(self):
X, _ = self.vars
x, _ = self.vals
assert_array_equal(X.argmax().eval({X: x}), x.argmax())
def test_argsort(self):
X, _ = self.vars
x, _ = self.vals
assert_array_equal(X.argsort().eval({X: x}), x.argsort())
assert_array_equal(X.argsort(1).eval({X: x}), x.argsort(1))
def test_clip(self):
X, Y = self.vars
x, y = self.vals
# numpy.clip gives unexpected values when min > max,
# so we have to make sure that min <= max in that test,
# otherwise it randomly fails.
Z = X.clip(Y - 0.5, Y + 0.5)
z = x.clip(y - 0.5, y + 0.5)
assert_array_equal(Z.eval({X: x, Y: y}), z)
def test_dot(self):
X, Y = self.vars
x, y = self.vals
# Use allclose comparison as a user reported on the mailing
# list failure otherwise with array that print exactly the same.
assert_allclose(x.dot(y), X.dot(Y).eval({X: x, Y: y}))
Z = X.dot(Y)
z = x.dot(y)
assert_allclose(x.dot(z), X.dot(Z).eval({X: x, Z: z}))
def test_real_imag(self):
X, Y = self.vars
x, y = self.vals
Z = X + Y * 1j
z = x + y * 1j
assert_array_equal(Z.real.eval({Z: z}), x)
assert_array_equal(Z.imag.eval({Z: z}), y)
def test_conj(self):
X, Y = self.vars
x, y = self.vals
Z = X + Y * 1j
z = x + y * 1j
assert_array_equal(Z.conj().eval({Z: z}), z.conj())
assert_array_equal(Z.conjugate().eval({Z: z}), z.conj())
def test_round(self):
X, _ = self.vars
x, _ = self.vals
assert_array_equal(X.round().eval({X: x}), x.round())
def test_std(self):
X, _ = self.vars
x, _ = self.vals
# std() is implemented as theano tree and does not pass its
# args directly to numpy. This sometimes results in small
# difference, so we use allclose test.
assert_allclose(X.std().eval({X: x}), x.std())
def test_repeat(self):
X, _ = self.vars
x, _ = self.vals
assert_array_equal(X.repeat(2).eval({X: x}), x.repeat(2))
def test_trace(self):
X, _ = self.vars
x, _ = self.vals
assert_array_equal(X.trace().eval({X: x}), x.trace())
def test_ravel(self):
X, _ = self.vars
x, _ = self.vals
assert_array_equal(X.ravel().eval({X: x}), x.ravel())
def test_diagonal(self):
X, _ = self.vars
x, _ = self.vals
assert_array_equal(X.diagonal().eval({X: x}), x.diagonal())
assert_array_equal(X.diagonal(1).eval({X: x}), x.diagonal(1))
assert_array_equal(X.diagonal(-1).eval({X: x}), x.diagonal(-1))
for offset, axis1, axis2 in [(1, 0, 1), (-1, 0, 1), (0, 1, 0), (-2, 1, 0)]:
assert_array_equal(X.diagonal(offset, axis1, axis2).eval({X: x}),
x.diagonal(offset, axis1, axis2))
def test_take(self):
X, _ = self.vars
x, _ = self.vals
indices = [1, 0, 3]
assert_array_equal(X.take(indices).eval({X: x}), x.take(indices))
indices = [1, 0, 1]
assert_array_equal(X.take(indices, 1).eval({X: x}), x.take(indices, 1))
indices = numpy.array([-10, 5, 12], dtype='int32')
assert_array_equal(X.take(indices, 1, mode='wrap').eval({X: x}),
x.take(indices, 1, mode='wrap'))
assert_array_equal(X.take(indices, -1, mode='wrap').eval({X: x}),
x.take(indices, -1, mode='wrap'))
assert_array_equal(X.take(indices, 1, mode='clip').eval({X: x}),
x.take(indices, 1, mode='clip'))
assert_array_equal(X.take(indices, -1, mode='clip').eval({X: x}),
x.take(indices, -1, mode='clip'))
# Test error handling
self.assertRaises(IndexError, X.take(indices).eval, {X: x})
self.assertRaises(IndexError, (2 * X.take(indices)).eval, {X: x})
self.assertRaises(TypeError, X.take, [0.0])
indices = [[1, 0, 1], [0, 1, 1]]
assert_array_equal(X.take(indices, 1).eval({X: x}), x.take(indices, 1))
# Test equivalent advanced indexing
assert_array_equal(X[:, indices].eval({X: x}), x[:, indices])
def test_cumsum(self):
X, _ = self.vars
x, _ = self.vals
assert_array_equal(X.cumsum().eval({X: x}), x.cumsum())
def test_cumprod(self):
X, _ = self.vars
x, _ = self.vals
assert_array_equal(X.cumprod().eval({X: x}), x.cumprod())
def test_norm():
x = theano.tensor.vector('x')
n = x.norm(2)
f = theano.function([x], n)
assert numpy.allclose(f([1, 1]), numpy.sqrt(2))
class test_ptp(unittest.TestCase):
def test_scalar(self):
"""
Should return 0 for all scalar
"""
x = scalar('x')
p = ptp(x)
f = theano.function([x], p)
y = numpy.asarray(rand() * 2000 - 1000, dtype=config.floatX)
result = f(y)
numpyResult = numpy.ptp(y)
self.assertTrue(numpy.array_equal(result, numpyResult))
def test_vector(self):
x = vector('x')
p = ptp(x, 0)
f = theano.function([x], p)
y = rand_ranged(-1000, 1000, [100])
result = f(y)
numpyResult = numpy.ptp(y, 0)
self.assertTrue(numpy.array_equal(result, numpyResult))
def test_matrix_first_axis(self):
x = matrix('x')
p = ptp(x, 1)
f = theano.function([x], p)
y = rand_ranged(-1000, 1000, [100, 100])
result = f(y)
numpyResult = numpy.ptp(y, 1)
self.assertTrue(numpy.array_equal(result, numpyResult))
def test_matrix_second_axis(self):
x = matrix('x')
p = ptp(x, 0)
f = theano.function([x], p)
y = rand_ranged(-1000, 1000, [100, 100])
result = f(y)
numpyResult = numpy.ptp(y, 0)
self.assertTrue(numpy.array_equal(result, numpyResult))
def test_matrix_neg_axis(self):
x = matrix('x')
p = ptp(x, -1)
f = theano.function([x], p)
y = rand_ranged(-1000, 1000, [100, 100])
result = f(y)
numpyResult = numpy.ptp(y, -1)
self.assertTrue(numpy.array_equal(result, numpyResult))
def test_matrix_no_axis(self):
x = matrix('x')
p = ptp(x)
f = theano.function([x], p)
y = rand_ranged(-1000, 1000, [100, 100])
result = f(y)
numpyResult = numpy.ptp(y)
self.assertTrue(numpy.array_equal(result, numpyResult))
def test_interface(self):
x = matrix('x')
p = x.ptp(1)
f = theano.function([x], p)
y = rand_ranged(-1000, 1000, [100, 100])
result = f(y)
numpyResult = numpy.ptp(y, 1)
self.assertTrue(numpy.array_equal(result, numpyResult))
if __name__ == '__main__':
t = TestInferShape('setUp')
t.setUp()
t.test_infer_shape()
class T_swapaxes(unittest.TestCase):
def test_no_dimensional_input(self):
self.assertRaises(IndexError, swapaxes, 2, 0, 1)
def test_unidimensional_input(self):
self.assertRaises(IndexError, swapaxes, [2, 1], 0, 1)
def test_not_enough_dimension(self):
self.assertRaises(IndexError, swapaxes, [[2, 1], [3, 4]], 3, 4)
def test_doubleswap(self):
y = matrix()
n = swapaxes(y, 0, 1)
f = function([y], n)
testMatrix = [[2, 1], [3, 4]]
self.assertTrue(numpy.array_equal(testMatrix, f(f(testMatrix))))
def test_interface(self):
x = theano.tensor.matrix()
x.swapaxes(0, 1)
def test_numpy_compare(self):
rng = numpy.random.RandomState(utt.fetch_seed())
A = tensor.matrix("A", dtype=theano.config.floatX)
Q = swapaxes(A, 0, 1)
fn = function([A], [Q])
a = rng.rand(4, 4).astype(theano.config.floatX)
n_s = numpy.swapaxes(a, 0, 1)
t_s = fn(a)
assert numpy.allclose(n_s, t_s)
class T_Power(unittest.TestCase):
def test_numpy_compare(self):
rng = numpy.random.RandomState(utt.fetch_seed())
A = tensor.matrix("A", dtype=theano.config.floatX)
Q = power(A, 3)
fn = function([A], [Q])
a = rng.rand(4, 4).astype(theano.config.floatX)
n_p = numpy.power(a, 3)
t_p = fn(a)
assert numpy.allclose(n_p, t_p)
def test_multiple_power(self):
x = tensor.vector()
y = [1, 2, 3]
z = power(x, y)
f = function([x], z)
assert numpy.allclose(f([1, 2, 3]), [1, 4, 27])
def test_wrong_shape(self):
x = tensor.vector()
y = [1, 2, 3]
z = power(x, y)
f = function([x], z)
self.assertRaises(ValueError, f, [1, 2, 3, 4])
class T_Choose(utt.InferShapeTester):
op = staticmethod(choose)
op_class = Choose
modes = ['raise', 'wrap', 'clip']
def test_numpy_compare(self):
a = tensor.vector(dtype='int32')
b = tensor.matrix(dtype='float32')
A = numpy.random.randint(0, 4, 4).astype('int32')
B = numpy.asarray(numpy.random.rand(4, 4), dtype='float32')
for m in self.modes:
f = function([a, b], choose(a, b, mode=m))
t_c = f(A, B)
n_c = numpy.choose(A, B, mode=m)
assert numpy.allclose(t_c, n_c)
def test_broadcasted(self):
a = tensor.scalar(dtype='int32')
b = tensor.matrix(dtype='float32')
# Test when a is broadcastable
A = 3
B = numpy.asarray(numpy.random.rand(4, 4), dtype='float32')
for m in self.modes:
f = function([a, b], choose(a, b, mode=m))
t_c = f(A, B)
n_c = numpy.choose(A, B, mode=m)
assert numpy.allclose(t_c, n_c)
# Test when the result should be broadcastable
b = theano.tensor.col(dtype='float32')
B = numpy.asarray(numpy.random.rand(4, 1), dtype='float32')
for m in self.modes:
f = function([a, b], choose(a, b, mode=m))
assert choose(a, b, mode=m).broadcastable[0]
t_c = f(A, B)
n_c = numpy.choose(A, B, mode=m)
assert numpy.allclose(t_c, n_c)
def test_dtype_error(self):
a = tensor.scalar(dtype='float32')
b = tensor.matrix(dtype='float32')
A = 3
B = numpy.asarray(numpy.random.rand(4, 4), dtype='float32')
self.assertRaises(TypeError, choose, a, b)
def test_numpy_compare_tuple(self):
a = tensor.tensor3(dtype='int32')
b = tensor.tensor3(dtype='float32')
c = tensor.tensor3(dtype='float32')
A = numpy.random.randint(0, 2, (2, 1, 1)).astype('int32')
B = numpy.asarray(numpy.random.rand(1, 6, 1), dtype='float32')
C = numpy.asarray(numpy.random.rand(1, 1, 5), dtype='float32')
for m in self.modes:
f = function([a, b, c], choose(a, (b, c), mode=m))
t_c = f(A, B, C)
n_c = numpy.choose(A, (B, C), mode=m)
assert numpy.allclose(t_c, n_c)
def test_infer_shape(self):
for shp1, shp2 in [
((5, 4), (7, 4)),
((1, 4), (7, 4)),
((5, 1), (7, 4)),
((5, 4), (1, 4)),
((5, 4), (7, 1)),
((5, 4), (4,)),
((1, 4), (4,)),
((5, 1), (4,)),
((5, 4), (1,)),
((4,), (5, 4)),
((1,), (5, 4)),
((4,), (1, 4)),
((4,), (3, 1)),
((4,), (4,)),
((1,), (4,)),
((4,), (1,)),
((1,), (1,)),
]:
a = tensor.tensor(dtype='int32',
broadcastable=[n == 1 for n in shp1])
c = tensor.tensor(dtype='float32',
broadcastable=[n == 1 for n in shp2])
A = numpy.asarray(numpy.random.rand(*shp1) * shp2[0], dtype='int32')
C = numpy.asarray(numpy.random.rand(*shp2) * shp2[0], dtype='float32')
self._compile_and_check([a, c], # theano.function inputs
[self.op(a, c)], # theano.function outputs
# Always use not square matrix!
# inputs data
[A, C],
# Op that should be removed from the graph.
self.op_class)
# Disabled as it isn't implemented.
def ___test_infer_shape_tuple(self):
a = tensor.tensor3(dtype='int32')
b = tensor.tensor3(dtype='int32')
c = tensor.tensor3(dtype='int32')
A = numpy.asarray([1, 0], dtype='int32').reshape((2, 1, 1))
B = numpy.asarray(numpy.random.rand(1, 4, 1), dtype='int32')
C = numpy.asarray(numpy.random.rand(1, 1, 7), dtype='int32')
f = function([a, b, c], choose(a, (b, c)))
shape = (2, 4, 7)
assert numpy.allclose(f(A, B, C).shape, shape)
self._compile_and_check([a, b, c], # theano.function inputs
[self.op(a, (b, c))], # theano.function outputs
# Always use not square matrix!
# inputs data
[A, B, C],
# Op that should be removed from the graph.
self.op_class)
def test_allocempty():
# Test that we allocated correctly
f = theano.function([], AllocEmpty("float32")(2, 3))
assert len(f.maker.fgraph.apply_nodes) == 1
out = f()
assert out.shape == (2, 3)
assert out.dtype == 'float32'
def test_symbolic_slice():
x = theano.tensor.tensor4('x')
a, b = x.shape[:2]
output = a.eval({x: numpy.zeros((5, 4, 3, 2), dtype=theano.config.floatX)})
assert output == numpy.array(5)
def test_composite_neg_bool():
# Check that taking the negation of a Boolean intermediate value
# works correctly with Python code. It used to be an issue because
# `-numpy.bool_(True)` is False and `-numpy.bool_(False)` is True.
x = theano.tensor.vector()
f = theano.function([x], - (x > 0), mode=theano.Mode(linker='py'))
utt.assert_allclose(f([-1, 0, 1]), [0, 0, -1])
"""
if __name__ == '__main__':
if 0:
unittest.main()
else:
testcase = FloorInplaceTester
suite = unittest.TestLoader()
suite = suite.loadTestsFromTestCase(testcase)
unittest.TextTestRunner(verbosity=2).run(suite)
"""
| 38.564159 | 274 | 0.544323 |
fa447e2b35fde4753e31b63c558c07e8914c853f | 1,517 | py | Python | oop 1-1.py | johndaguio/OOP---1-1 | ea94f38412aac5f8d0ee99a16bf252af97546c8b | [
"Apache-2.0"
] | null | null | null | oop 1-1.py | johndaguio/OOP---1-1 | ea94f38412aac5f8d0ee99a16bf252af97546c8b | [
"Apache-2.0"
] | null | null | null | oop 1-1.py | johndaguio/OOP---1-1 | ea94f38412aac5f8d0ee99a16bf252af97546c8b | [
"Apache-2.0"
] | null | null | null | from tkinter import *
window = Tk()
window.geometry("600x500+30+20")
window.title("Welcome to Python Programming")
#add Button widget
btn = Button(window, text = "Click to add name", fg="blue")
btn.place(x= 80, y = 100)
#Add label widget
lbl = Label(window, text = "Student Personal Information", fg = "Blue", bg = "orange")
lbl.place(relx=.5, y=50,anchor='center')
lbl2 = Label(window, text = "Gender", fg = "red")
lbl2.place(x =80, y=150)
#Add text field widget
txtfld = Entry(window, bd = 3, font = ("verdana",16))
txtfld.place(x=150, y=100)
#add radio button
v1 = StringVar()
v2 = StringVar
v1.set(1)
r1 = Radiobutton(window,text="Male",value=v1)
r1.place(x=80, y = 200)
r2 = Radiobutton(window,text = "Female", value = v2)
r2.place(x=200,y = 200)
v3 = IntVar()
v4 = IntVar()
v5 = IntVar()
chkbox = Checkbutton(window,text = "Basketball", variable = v3)
chkbox2 = Checkbutton(window, text = "Tennis", variable = v4)
chkbox3 = Checkbutton(window, text = "Swimming", variable = v5)
chkbox.place(x=80, y=300)
chkbox2.place(x=250, y=300)
chkbox3.place(x=350, y = 300)
lbl3 = Label(window, text = "Sports")
lbl3.place(x=80, y=250)
lbl4 = Label(window, text = "Subjects")
lbl4.place(x = 80, y=350)
var = StringVar()
var.set("Arithmetic")
data1 = "Arithmetic"
data2 = "Reading"
data3 = "Writing"
lstbox = Listbox(window,height = 5, selectmode = 'multiple' )
lstbox.insert(END, data1, data2, data3)
lstbox.place(x = 80,y = 400)
window.mainloop()
| 24.868852 | 87 | 0.651945 |
f33d08632137524aa64828c5ac39b2885b92195f | 93 | py | Python | bmstocker/apps.py | hchockarprasad/bmdjango | a978e4bca264eaa5a1f21df332f7da06f9f69ee5 | [
"MIT"
] | 3 | 2017-10-29T13:37:58.000Z | 2017-11-06T15:31:35.000Z | bmstocker/apps.py | hchockarprasad/bmdjango | a978e4bca264eaa5a1f21df332f7da06f9f69ee5 | [
"MIT"
] | null | null | null | bmstocker/apps.py | hchockarprasad/bmdjango | a978e4bca264eaa5a1f21df332f7da06f9f69ee5 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class BmstockerConfig(AppConfig):
name = 'bmstocker'
| 15.5 | 33 | 0.763441 |
ade4dc093d58dece6985550d71244e96ecc0c484 | 1,075 | py | Python | misc/python/setup.py | rsignavong/materialize | 1a3be2b7b73919d59274e45d100592813c186d44 | [
"MIT"
] | 3,840 | 2020-02-13T18:28:21.000Z | 2022-03-31T17:25:04.000Z | misc/python/setup.py | rsignavong/materialize | 1a3be2b7b73919d59274e45d100592813c186d44 | [
"MIT"
] | 5,802 | 2020-02-13T18:59:27.000Z | 2022-03-31T21:50:24.000Z | misc/python/setup.py | morsapaes/materialize | 9f70c024869d681dbd8a2644b6d368b5f7e9707e | [
"MIT"
] | 295 | 2020-02-13T18:49:32.000Z | 2022-03-30T10:55:12.000Z | # Copyright Materialize, Inc. and contributors. All rights reserved.
#
# Use of this software is governed by the Business Source License
# included in the LICENSE file at the root of this repository.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0.
from pathlib import Path
from typing import List
from setuptools import find_packages, setup # type: ignore
# stub setup.py that allows running `pip install -e .` to install into a virtualenv
HERE = Path(__file__).parent
def requires(fname: str) -> List[str]:
return [l for l in HERE.joinpath(fname).open().read().splitlines() if l]
setup(
name="materialize",
packages=find_packages(),
install_requires=requires("requirements.txt"),
extras_require={
"dev": requires("requirements-dev.txt"),
},
package_data={
"materialize": ["py.typed"],
"materialize.optbench": ["schema/*.sql", "workload/*.sql"],
},
include_package_data=True,
)
| 29.054054 | 83 | 0.705116 |
c15d972c2dec4b9d93aefecc31bb25dda99bca9f | 507 | py | Python | plotly/validators/layout/scene/xaxis/tickformatstop/_enabled.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 12 | 2020-04-18T18:10:22.000Z | 2021-12-06T10:11:15.000Z | plotly/validators/layout/scene/xaxis/tickformatstop/_enabled.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 27 | 2020-04-28T21:23:12.000Z | 2021-06-25T15:36:38.000Z | plotly/validators/layout/scene/xaxis/tickformatstop/_enabled.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 6 | 2020-04-18T23:07:08.000Z | 2021-11-18T07:53:06.000Z | import _plotly_utils.basevalidators
class EnabledValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self,
plotly_name='enabled',
parent_name='layout.scene.xaxis.tickformatstop',
**kwargs
):
super(EnabledValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'plot'),
role=kwargs.pop('role', 'info'),
**kwargs
)
| 26.684211 | 70 | 0.615385 |
0692f27bed5be0b33f5b0d2ac143fadf6063eaac | 3,372 | py | Python | metaci/repository/models.py | sfdc-qbranch/MetaCI | 78ac0d2bccd2db381998321ebd71029dd5d9ab39 | [
"BSD-3-Clause"
] | 48 | 2018-10-24T14:52:06.000Z | 2022-03-25T21:14:50.000Z | metaci/repository/models.py | sfdc-qbranch/MetaCI | 78ac0d2bccd2db381998321ebd71029dd5d9ab39 | [
"BSD-3-Clause"
] | 2,034 | 2018-10-31T20:59:16.000Z | 2022-03-22T21:38:03.000Z | metaci/repository/models.py | sfdc-qbranch/MetaCI | 78ac0d2bccd2db381998321ebd71029dd5d9ab39 | [
"BSD-3-Clause"
] | 27 | 2018-12-24T18:16:23.000Z | 2021-12-15T17:57:27.000Z | import github3.exceptions
from cumulusci.core.exceptions import GithubException
from cumulusci.core.github import get_github_api_for_repo
from django.apps import apps
from django.db import models
from django.http import Http404
from django.urls import reverse
from model_utils.managers import SoftDeletableManager
from model_utils.models import SoftDeletableModel
from metaci.cumulusci.keychain import GitHubSettingsKeychain
class RepositoryQuerySet(models.QuerySet):
def for_user(self, user, perms=None):
if user.is_superuser:
return self
if perms is None:
perms = "plan.view_builds"
PlanRepository = apps.get_model("plan.PlanRepository")
return self.filter(
planrepository__in=PlanRepository.objects.for_user(user, perms)
).distinct()
def get_for_user_or_404(self, user, query, perms=None):
try:
return self.for_user(user, perms).get(**query)
except Repository.DoesNotExist:
raise Http404
class Repository(models.Model):
name = models.CharField(max_length=255)
owner = models.CharField(max_length=255)
github_id = models.IntegerField(null=True, blank=True)
url = models.URLField(max_length=255)
release_tag_regex = models.CharField(max_length=255, blank=True, null=True)
default_implementation_steps = models.JSONField(null=True, blank=True, default=list)
metadata = models.JSONField(null=True, blank=True, default=dict)
objects = RepositoryQuerySet.as_manager()
class Meta:
ordering = ["name", "owner"]
verbose_name_plural = "repositories"
def get_absolute_url(self):
return reverse("repo_detail", kwargs={"owner": self.owner, "name": self.name})
def __str__(self):
return f"{self.owner}/{self.name}"
def get_github_api(self):
gh = get_github_api_for_repo(GitHubSettingsKeychain(), self.owner, self.name)
repo = gh.repository(self.owner, self.name)
return repo
@property
def latest_release(self):
try:
return self.releases.latest()
except Repository.DoesNotExist:
return None
class BranchManager(SoftDeletableManager):
def get_queryset(self):
return super().get_queryset().select_related("repo")
class Branch(SoftDeletableModel):
name = models.CharField(max_length=255)
repo = models.ForeignKey(
Repository, related_name="branches", on_delete=models.CASCADE
)
objects = BranchManager()
include_deleted = models.QuerySet.as_manager()
class Meta:
ordering = ["repo__name", "repo__owner", "name"]
verbose_name_plural = "branches"
def get_absolute_url(self):
return reverse(
"branch_detail",
kwargs={
"owner": self.repo.owner,
"name": self.repo.name,
"branch": self.name,
},
)
def __str__(self):
return f"{self.repo.name}: {self.name}"
def is_tag(self):
"""Returns True if this branch is related to a tag in GitHub"""
return self.name.startswith("tag: ")
def get_github_api(self):
try:
branch = self.repo.get_github_api().branch(self.name)
except (github3.exceptions.NotFoundError, GithubException):
branch = None
return branch
| 31.811321 | 88 | 0.668743 |
77f574bc3dc735abe684c9910e4b6ccda9523230 | 1,850 | py | Python | mvp/mvp/views/main_view.py | 2110521-2563-1-Software-Architecture/TBD-Assignment-3 | d78e849a50c6367e1e01c1271753301d3d8e4dd8 | [
"MIT"
] | null | null | null | mvp/mvp/views/main_view.py | 2110521-2563-1-Software-Architecture/TBD-Assignment-3 | d78e849a50c6367e1e01c1271753301d3d8e4dd8 | [
"MIT"
] | null | null | null | mvp/mvp/views/main_view.py | 2110521-2563-1-Software-Architecture/TBD-Assignment-3 | d78e849a50c6367e1e01c1271753301d3d8e4dd8 | [
"MIT"
] | null | null | null | import wx
from typing import List
from mvp.contracts.main_contract import MainContract
from mvp.models.entities.note import Note
class MainView(MainContract.View):
def __init__(self):
MainContract.View.__init__(self, "MVP Note Application")
def init_ui(self):
panel = wx.Panel(self)
vbox = wx.BoxSizer(wx.VERTICAL)
new_note_label = wx.StaticText(panel, label="New Note:")
note_input = wx.TextCtrl(panel)
add_note_button = wx.Button(panel, label="Add Note")
clear_all_button = wx.Button(panel, label="Clear All")
note_list_label = wx.StaticText(panel, label="Note List:")
vbox.Add(new_note_label)
vbox.AddSpacer(8)
vbox.Add(note_input, 0, wx.EXPAND)
vbox.AddSpacer(8)
vbox.Add(add_note_button, 0, wx.EXPAND)
vbox.AddSpacer(8)
vbox.Add(clear_all_button, 0, wx.EXPAND)
vbox.AddSpacer(8)
vbox.Add(note_list_label)
add_note_button.Bind(wx.EVT_BUTTON, self.on_add_note_button_clicked)
clear_all_button.Bind(wx.EVT_BUTTON, self.on_clear_all_button_clicked)
panel.SetSizer(vbox)
self.note_list_label = note_list_label
self.note_input = note_input
if self.presenter:
self.presenter.get_all_notes()
def update_view(self, items: List[Note]):
self.note_list_label.SetLabel(
"Note List:\n" + "\n".join([f"{i + 1}. {note.content}" for i, note in enumerate(items)]))
def on_clear_all_button_clicked(self, e):
# Clear all notes
# Your code here
self.presenter.clear_all()
pass
def on_add_note_button_clicked(self, e):
content = self.note_input.GetValue()
self.note_input.SetValue("")
# Add note
# Your code here
self.presenter.add_note(content)
| 31.355932 | 101 | 0.651351 |
356b0230101a01dba88c99eaee5135d4b3aaa1e1 | 1,554 | py | Python | samples/justcount.py | rodrigoacastro/seacow | 17b89951bbb8d7f765d9cdbd330ef70e4bfcc2fa | [
"BSD-2-Clause"
] | null | null | null | samples/justcount.py | rodrigoacastro/seacow | 17b89951bbb8d7f765d9cdbd330ef70e4bfcc2fa | [
"BSD-2-Clause"
] | null | null | null | samples/justcount.py | rodrigoacastro/seacow | 17b89951bbb8d7f765d9cdbd330ef70e4bfcc2fa | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from SeaCOW import Query, Nonprocessor
# Create a Query object and set whatever needs to be set.
q = Query()
q.corpus = 'decow16a-nano' # Lower-case name of the corpusto use.
q.string = '[word="Chuzpe"]' # A normal CQL string as used in NoSketchEngine.
q.max_hits = -1 # Maximal number of hits to return. Ignored for Nonprocessor.
q.attributes = [] # For counting, you don't need word attributes.
q.structures = [] # ... you don't need structural attributes.
q.references = [] # ... you don't need reference attrs.
q.container = None # Which container strutcure should be used? None is OK
# only if class is Nonprocessor.
# Using the deduplicator would NOT change the outcome. Switch off.
q.set_deduplication(off = True)
# Create a Processor object and attach it to the Query object.
# The Nonprocessor processor does nothing. You can work with the results
# yourself in the finalise method or just get the hits value from the
# query object. It is the concordance as seported by Manatee.
p = Nonprocessor() # Create a processor object of apporpriate type.
q.processor = p # Attach the processor to the query.
q.run() # Run the query.
print('Query was: %s' % (q.string))
print('Corpus used: %s' % (q.corpus))
print('Query returned %d hits.' % (q.hits))
| 51.8 | 105 | 0.597812 |
05ee83a43ca22e64bab376d5cc1e218397af96b7 | 32,722 | py | Python | ion_networks/ms_utils.py | swillems/ion_networks | 5304a92248ec007ac2253f246a3d44bdb58ae110 | [
"MIT"
] | 2 | 2020-10-28T16:11:56.000Z | 2020-12-03T13:19:18.000Z | ion_networks/ms_utils.py | swillems/ion_networks | 5304a92248ec007ac2253f246a3d44bdb58ae110 | [
"MIT"
] | null | null | null | ion_networks/ms_utils.py | swillems/ion_networks | 5304a92248ec007ac2253f246a3d44bdb58ae110 | [
"MIT"
] | null | null | null | #!python
# builtin
import os
import sys
import logging
import json
import time
import contextlib
import multiprocessing
import urllib
import csv
# external
import numpy as np
import pandas as pd
import h5py
import pyteomics.mgf
# local
from ion_networks._version import __version__ as VERSION
from ion_networks import numba_functions
GITHUB_VERSION_FILE = "https://raw.githubusercontent.com/swillems/ion_networks/master/ion_networks/_version.py"
BASE_PATH = os.path.dirname(__file__)
UPDATE_COMMAND = os.path.join(os.path.dirname(BASE_PATH), "install", "update.sh")
LIB_PATH = os.path.join(BASE_PATH, "lib")
DEFAULT_PARAMETER_PATH = os.path.join(LIB_PATH, "default_parameters")
DEFAULT_PARAMETER_FILES = {
"convert": "convert_parameters.json",
"create": "create_parameters.json",
"evidence": "evidence_parameters.json",
"interface": "interface_parameters.json",
"database": "database_parameters.json",
"annotation": "annotation_parameters.json",
"mgf": "mgf_parameters.json",
}
DATA_TYPE_FILE_EXTENSIONS = {
"DDA": ".mgf",
"SONAR": "_Apex3DIons.csv",
"HDMSE": "_Apex3DIons.csv",
"SWIMDIA": "_Apex3DIons.csv",
"DIAPASEF": "_centroids.hdf",
}
LOGGER = logging.getLogger("Ion-networks")
MAX_THREADS = 1
@contextlib.contextmanager
def open_logger(log_file_name, log_level=logging.INFO):
# TODO: Docstring
start_time = time.time()
formatter = logging.Formatter('%(asctime)s > %(message)s')
LOGGER.setLevel(log_level)
if not LOGGER.hasHandlers():
console_handler = logging.StreamHandler(stream=sys.stdout)
console_handler.setLevel(log_level)
console_handler.setFormatter(formatter)
LOGGER.addHandler(console_handler)
if log_file_name is not None:
if log_file_name == "":
log_file_name = BASE_PATH
else:
log_file_name = os.path.abspath(log_file_name)
if os.path.isdir(log_file_name):
log_file_name = os.path.join(log_file_name, "log.txt")
directory = os.path.dirname(log_file_name)
if not os.path.exists(directory):
os.makedirs(directory)
file_handler = logging.FileHandler(log_file_name, mode="a")
file_handler.setLevel(log_level)
file_handler.setFormatter(formatter)
LOGGER.addHandler(file_handler)
LOGGER.info("=" * 50)
LOGGER.info(f"COMMAND: ion_networks {' '.join(sys.argv[1:])}")
LOGGER.info(f"VERSION: {VERSION}")
LOGGER.info(f"LOGFILE: {log_file_name}")
LOGGER.info("")
try:
yield LOGGER
LOGGER.info("")
LOGGER.info("Successfully finished execution")
except:
LOGGER.info("")
LOGGER.exception("Something went wrong, execution incomplete!")
finally:
LOGGER.info(f"Time taken: {time.time() - start_time}")
LOGGER.info("=" * 50)
if log_file_name is not None:
LOGGER.removeHandler(file_handler)
def read_parameters_from_json_file(file_name="", default=""):
"""
Read a custom or default parameter file.
Parameters
----------
default : str
The default parameters that should be loaded. Options are:
"create"
"evidence"
"interface"
""
file_name : str
The name of a .json file that contains parameters defined by the user.
These will override the default parameters.
Returns
-------
dict
A dictionary with parameters.
"""
if default == "":
parameters = {"log_file_name": ""}
else:
default_parameter_file_name = os.path.join(
DEFAULT_PARAMETER_PATH,
DEFAULT_PARAMETER_FILES[default]
)
with open(default_parameter_file_name, "r") as in_file:
parameters = json.load(in_file)
if file_name != "":
with open(file_name, "r") as in_file:
user_defined_parameters = json.load(in_file)
parameters.update(user_defined_parameters)
# TODO: Numba expects proper floats or integers, not a mixture
# TODO: e.g. DT_error = 2.0, instead of DT_error = 2
if "threads" in parameters:
set_threads(parameters["threads"])
return parameters
def set_threads(threads):
global MAX_THREADS
max_cpu_count = multiprocessing.cpu_count()
if threads > max_cpu_count:
MAX_THREADS = max_cpu_count
else:
while threads <= 0:
threads += max_cpu_count
MAX_THREADS = threads
def get_file_names_with_extension(input_path, extension=""):
"""
Get all file names with a specific extension from a list of files and
folders.
Parameters
----------
input_path : iterable[str]
An iterable with files or folders from which all files with a specific
extension need to be selected.
extension : str
The extension of the files of interest.
Returns
-------
list
A sorted list with unique file names with the specific extension.
"""
input_files = set()
if not isinstance(extension, str):
for tmp_extension in extension:
for file_name in get_file_names_with_extension(
input_path,
tmp_extension
):
input_files.add(file_name)
else:
for current_path in input_path:
if os.path.isfile(current_path):
if current_path.endswith(extension):
input_files.add(current_path)
elif os.path.isdir(current_path):
for current_file_name in os.listdir(current_path):
if current_file_name.endswith(extension):
file_name = os.path.join(
current_path,
current_file_name
)
input_files.add(file_name)
return sorted([os.path.abspath(file_name) for file_name in input_files])
def read_data_from_file(
data_type,
file_name,
log_transform_intensity=True,
):
"""
Convert an [input_file.*] file to a pd.DataFrame with as columns the
dimensions associated with the data type.
Parameters
----------
data_type : str
The data type of the [input_file.*] file. Options are:
'DDA'
'SONAR'
'HDMSE'
'SWIMDIA'
'DIAPASEF'
file_name : str
The file name containing centroided ions.
log_transform_intensity : bool
Transform the intensities to logarithmic values.
Returns
-------
pd.DataFrame
A pd.DataFrame with as columns the PRECURSOR_RT, PRECURSOR_MZ,
FRAGMENT_MZ and FRAGMENT_LOGINT dimensions.
"""
if data_type == "DDA":
read_function = read_data_from_mgf_file
elif data_type == "SONAR":
read_function = read_data_from_sonar_file
elif data_type == "HDMSE":
read_function = read_data_from_hdmse_file
elif data_type == "SWIMDIA":
read_function = read_data_from_swimdia_file
elif data_type == "DIAPASEF":
read_function = read_data_from_diapasef_file
data = read_function(
file_name,
log_transform_intensity=log_transform_intensity,
)
return data
def read_data_from_mgf_file(
file_name,
log_transform_intensity=True,
):
"""
Convert an [mgf_input.mgf] file to a pd.DataFrame with as columns the
PRECURSOR_RT, PRECURSOR_MZ, FRAGMENT_MZ and FRAGMENT_LOGINT dimensions.
Parameters
----------
file_name : str
The file name of the DDA .mgf file (generated with ms-convert).
log_transform_intensity : bool
Transform the intensities to logarithmic values.
Returns
-------
pd.DataFrame
A pd.DataFrame with as columns the PRECURSOR_RT, PRECURSOR_MZ,
FRAGMENT_MZ and FRAGMENT_LOGINT dimensions.
"""
LOGGER.info(f"Reading mgf file {file_name}")
mz1s = []
mz2s = []
rts = []
ints = []
for spectrum in pyteomics.mgf.read(file_name):
peak_count = len(spectrum["intensity array"])
ints.append(spectrum["intensity array"])
mz2s.append(spectrum["m/z array"])
rts.append(
np.repeat(spectrum["params"]["rtinseconds"] / 60, peak_count)
)
mz1s.append(np.repeat(spectrum["params"]["pepmass"][0], peak_count))
mz1s = np.concatenate(mz1s)
mz2s = np.concatenate(mz2s)
rts = np.concatenate(rts)
ints = np.concatenate(ints)
if log_transform_intensity:
ints = np.log2(ints)
dimensions = [
"FRAGMENT_MZ",
"PRECURSOR_RT",
"FRAGMENT_LOGINT",
"PRECURSOR_MZ"
]
data = np.stack([mz2s, rts, ints, mz1s]).T
return pd.DataFrame(data, columns=dimensions)
def read_data_from_sonar_file(
file_name,
log_transform_intensity=True,
):
"""
Convert a [sonar_input.csv] file to a pd.DataFrame with as columns the
PRECURSOR_RT, PRECURSOR_MZ, FRAGMENT_MZ and FRAGMENT_LOGINT dimensions.
Parameters
----------
file_name : str
The file name of the SONAR .csv file (generated with Waters' Apex3d).
log_transform_intensity : bool
Transform the intensities to logarithmic values.
Returns
-------
pd.DataFrame
A pd.DataFrame with as columns the PRECURSOR_RT, PRECURSOR_MZ,
FRAGMENT_MZ and FRAGMENT_LOGINT dimensions.
"""
LOGGER.info(f"Reading sonar file {file_name}")
data = pd.read_csv(
file_name,
engine="c",
dtype=np.float,
usecols=["Function", "m_z", "rt", "mobility", "area"]
).values
data = data[np.searchsorted(data[:, 0], 2):, 1:]
if log_transform_intensity:
data[:, 2] = np.log2(data[:, 2])
data[:, 3] = 400 + data[:, 3] * (900 - 400) / 200
dimensions = [
"FRAGMENT_MZ",
"PRECURSOR_RT",
"FRAGMENT_LOGINT",
"PRECURSOR_MZ"
]
return pd.DataFrame(data, columns=dimensions)
def read_data_from_hdmse_file(
file_name,
log_transform_intensity=True,
):
"""
Convert a [hdmse_input.csv] file to a pd.DataFrame with as columns the
PRECURSOR_RT, PRECURSOR_DT, FRAGMENT_MZ and FRAGMENT_LOGINT dimensions.
Parameters
----------
file_name : str
The file name of the HDMSE .csv file (generated with Waters' Apex3d).
log_transform_intensity : bool
Transform the intensities to logarithmic values.
Returns
-------
pd.DataFrame
A pd.DataFrame with as columns the PRECURSOR_RT, PRECURSOR_DT,
FRAGMENT_MZ and FRAGMENT_LOGINT dimensions.
"""
LOGGER.info(f"Reading hdmse file {file_name}")
data = pd.read_csv(
file_name,
engine="c",
dtype=np.float,
usecols=["Function", "m_z", "rt", "mobility", "area"]
).values
data = data[np.searchsorted(data[:, 0], 2):, 1:]
if log_transform_intensity:
data[:, 2] = np.log2(data[:, 2])
dimensions = [
"FRAGMENT_MZ",
"PRECURSOR_RT",
"FRAGMENT_LOGINT",
"PRECURSOR_DT"
]
return pd.DataFrame(data, columns=dimensions)
def read_data_from_swimdia_file(
file_name,
log_transform_intensity=True,
):
"""
Convert a [swimdia_input.csv] file to a pd.DataFrame with as columns the
PRECURSOR_RT, PRECURSOR_DT, FRAGMENT_MZ and FRAGMENT_LOGINT dimensions.
Parameters
----------
file_name : str
The file name of the SWIM-DIA .csv file (generated with Waters' Apex3d).
log_transform_intensity : bool
Transform the intensities to logarithmic values.
Returns
-------
pd.DataFrame
A pd.DataFrame with as columns the PRECURSOR_RT, PRECURSOR_DT,
FRAGMENT_MZ and FRAGMENT_LOGINT dimensions.
"""
LOGGER.info(f"Reading swimdia dile {file_name}")
data = pd.read_csv(
file_name,
engine="c",
dtype=np.float,
usecols=["m_z", "rt", "mobility", "area"]
).values
if log_transform_intensity:
data[:, 2] = np.log2(data[:, 2])
dimensions = [
"FRAGMENT_MZ",
"PRECURSOR_RT",
"FRAGMENT_LOGINT",
"PRECURSOR_DT"
]
return pd.DataFrame(data, columns=dimensions)
def read_data_from_diapasef_file(
file_name,
min_intensity=1000,
min_cluster_size=10,
log_transform_intensity=True,
):
"""
Convert a [diapasef_input_centroids.hdf] file to a pd.DataFrame with as
columns the PRECURSOR_RT, PRECURSOR_DT, FRAGMENT_MZ and FRAGMENT_LOGINT
dimensions.
Parameters
----------
file_name : str
The file name of the DIAPASEF _centroids.hdf file (generated with
diapasef.py).
min_intensity : float
The minimimum intensity of an ion to retain it.
min_cluster_size : int
The minimimum cluster size of an ion to retain it.
log_transform_intensity : bool
Transform the intensities to logarithmic values.
Returns
-------
pd.DataFrame
A pd.DataFrame with as columns the PRECURSOR_RT, PRECURSOR_DT,
PRECURSOR_MZ, FRAGMENT_MZ and FRAGMENT_LOGINT dimensions.
"""
LOGGER.info(f"Reading diapasef file {file_name}")
with h5py.File(file_name, "r") as hdf_file:
centroided_fragment_mzs = hdf_file["fragment_mz_values"][...]
centroided_fragment_intensities = hdf_file[
"fragment_intensity_values"
][...]
centroided_precursor_mzs = hdf_file["precursor_mz_values"][...]
centroided_precursor_dts = hdf_file["precursor_dt_values"][...]
centroided_precursor_rts = hdf_file["precursor_rt_values"][...]
cluster_sizes = hdf_file["cluster_sizes"][...]
selection = (cluster_sizes > min_cluster_size)
if min_intensity > 0:
selection &= (centroided_fragment_intensities > min_intensity)
selection = np.flatnonzero(selection)
if log_transform_intensity:
centroided_fragment_intensities = np.log2(
centroided_fragment_intensities
)
return pd.DataFrame(
np.stack(
[
centroided_fragment_mzs[selection],
centroided_fragment_intensities[selection],
centroided_precursor_mzs[selection],
centroided_precursor_dts[selection],
centroided_precursor_rts[selection] / 60,
]
).T,
columns=[
"FRAGMENT_MZ",
"FRAGMENT_LOGINT",
"PRECURSOR_MZ",
"PRECURSOR_DT",
"PRECURSOR_RT",
]
)
def read_centroided_csv_file(
centroided_csv_file_name,
parameters,
):
"""
Read a centroided .csv file and return this as a pd.DataFrame.
Parameters
----------
centroided_csv_file_name : str
The name of a .csv file with centroided ion peaks.
parameters : dict
A dictionary with optional parameters for the creation of an
ion-network.
Returns
-------
pd.Dataframe
A pd.Dataframe with centroided ion peaks.
Raises
-------
KeyError
If the PRECURSOR_RT, FRAGMENT_MZ or FRAGMENT_LOGINT column is
missing.
"""
LOGGER.info(f"Reading centroided csv file {centroided_csv_file_name}")
data = pd.read_csv(
centroided_csv_file_name,
engine="c",
)
if "PRECURSOR_RT" not in data:
raise KeyError("No PRECURSOR_RT column present")
if "FRAGMENT_MZ" not in data:
raise KeyError("No FRAGMENT_MZ column present")
if "FRAGMENT_LOGINT" not in data:
raise KeyError("No FRAGMENT_LOGINT column present")
data.sort_values(
by=["PRECURSOR_RT", "FRAGMENT_MZ"],
inplace=True
)
return data
def write_data_to_csv_file(
data,
out_file_name,
):
"""
Save a pandas dataframe with ion coordinates to a file.
Parameters
----------
data : pd.DataFrame
A pd.DataFrame with as columns the selection / separation dimensions.
out_file_name : str
The file name of the .csv file in which to save the data.
"""
LOGGER.info(f"Writing to centroided csv file {out_file_name}")
data.to_csv(out_file_name, index=False)
def get_github_version():
# TODO: Docstring
try:
with urllib.request.urlopen(GITHUB_VERSION_FILE) as version_file:
for line in version_file.read().decode('utf-8').split("\n"):
if line.startswith("__version__"):
github_version = line.split("\"")[1]
return github_version
else:
return None
except IndexError:
return None
except urllib.error.URLError:
return None
def verify_version():
github_version = get_github_version()
if github_version is None:
return (
f'{"*" * 50}\n'
f'{"Failed to check if version update is possible"}\n'
f'{"*" * 50}\n'
)
elif github_version != VERSION:
return (
f'{("*" * 50)}\n'
f"Github is at version {github_version}, "
f"while local version is {VERSION}\n"
f'{("Update by reinstalling or running the following command:")}\n'
f"bash '{UPDATE_COMMAND}'\n"
f'{("*" * 50)}\n'
)
else:
return ""
def annotate_mgf(
mgf_file_name,
database,
out_file_name,
parameters,
):
threads = MAX_THREADS
LOGGER.info(f"Reading spectra of {mgf_file_name}")
spectra = [spectrum for spectrum in pyteomics.mgf.read(mgf_file_name)]
spectra_indptr = np.empty(len(spectra) + 1, np.int64)
spectra_indptr[0] = 0
spectra_indptr[1:] = np.cumsum(
[len(spectrum["m/z array"]) for spectrum in spectra]
)
spectra_mzs = np.concatenate(
[spectrum["m/z array"] for spectrum in spectra]
)
if parameters["align_to_database"]:
LOGGER.info(f"Aligning {mgf_file_name} to {database.file_name}")
spectra_mzs_ = database.align_mz_values(
spectra_mzs,
np.repeat(
[
spectrum['params']['rtinseconds'] for spectrum in spectra
],
np.diff(spectra_indptr)
) / 60
)
else:
spectra_mzs_ = spectra_mzs
mz_order = np.argsort(spectra_mzs_)
spectra_log_mzs = np.log(spectra_mzs_[mz_order]) * 10**6
LOGGER.info(f"Reading database {database.file_name}")
peptide_pointers = database.get_fragment_coordinates("peptide_index")
database_log_mzs = np.log(database.get_fragment_coordinates("mz")) * 10**6
LOGGER.info(
f"Matching fragments of {mgf_file_name} with {database.file_name}"
)
low_limits = np.searchsorted(
database_log_mzs,
spectra_log_mzs - parameters["annotation_ppm"],
"left"
)
high_limits = np.searchsorted(
database_log_mzs,
spectra_log_mzs + parameters["annotation_ppm"],
"right"
)
inv_order = np.argsort(mz_order)
low_limits = low_limits[inv_order]
high_limits = high_limits[inv_order]
LOGGER.info(
f"Annotating fragments of {mgf_file_name} with {database.file_name}"
)
with multiprocessing.pool.ThreadPool(threads) as p:
results = p.starmap(
numba_functions.annotate_mgf,
[
(
np.arange(i, spectra_indptr.shape[0] - 1, threads),
spectra_indptr,
low_limits,
high_limits,
peptide_pointers,
) for i in range(threads)
]
)
scores = np.concatenate([r[0] for r in results])
fragments = np.concatenate([r[1] for r in results])
ion_indices = np.concatenate([r[2] for r in results])
count_results = np.concatenate([r[3] for r in results])
candidate_counts = np.concatenate([r[4] for r in results])
spectrum_sizes = np.concatenate([r[5] for r in results])
del results
LOGGER.info("Calculating scores")
modified_scores, fdr_values = calculate_modified_score(
scores,
count_results,
spectrum_sizes,
database,
peptide_pointers[fragments]
)
export_annotated_csv(
scores=scores,
fragments=fragments,
ion_indices=ion_indices,
count_results=count_results,
candidate_counts=candidate_counts,
spectrum_sizes=spectrum_sizes,
spectra=spectra,
spectra_indptr=spectra_indptr,
spectra_mzs=spectra_mzs,
database=database,
peptide_pointers=peptide_pointers,
out_file_name=out_file_name,
export_decoys=parameters['export_decoys'],
fdr_filter=parameters['fdr_filter'],
fdr_values=fdr_values,
modified_scores=modified_scores,
calibrated_mzs=spectra_mzs_
)
def calculate_modified_score(
likelihoods,
hit_counts,
neighbors,
database,
peptide_indices
):
modified_scores = hit_counts.copy()
modified_scores = hit_counts ** likelihoods
modified_scores /= np.log2(1 + neighbors)
sequence_lengths = np.array(
[
len(s) for s in database.read_dataset(
"sequence",
"peptides"
)
]
)[peptide_indices]
modified_scores /= np.log2(sequence_lengths * 2 - 2)
decoys = database.read_dataset(
"decoy",
"peptides"
)[peptide_indices]
order = np.argsort(modified_scores)[::-1]
decoy_count = np.cumsum(decoys[order])
fdr_values = decoy_count / np.arange(1, decoy_count.shape[0] + 1)
inv_order = np.argsort(order)
fdr_values = fdr_values[inv_order]
return modified_scores, fdr_values
def export_annotated_csv(
scores,
fragments,
ion_indices,
count_results,
candidate_counts,
spectrum_sizes,
spectra,
spectra_indptr,
spectra_mzs,
database,
peptide_pointers,
# score_cutoff,
out_file_name,
export_decoys,
fdr_filter,
fdr_values,
modified_scores,
calibrated_mzs,
):
LOGGER.info(f"Exporting {out_file_name}")
peptides = peptide_pointers[fragments]
decoys = database.read_dataset("decoy", "peptides")
peptide_modifications = database.read_dataset("modifications", "peptides")
peptide_sequences = database.read_dataset("sequence", "peptides")
peptide_masses = database.read_dataset("mass", "peptides")
# selection = np.flatnonzero((scores < score_cutoff) & (~decoys[peptides]))
fragment_ion_numbers = database.get_fragment_coordinates("ionnumber")
fragment_ion_mz_database = database.get_fragment_coordinates("mz")
fragment_is_y_ion = database.get_fragment_coordinates("y_ion")
self_ints = np.concatenate(
[spectrum["intensity array"] for spectrum in spectra]
)
spectrum_indices1 = np.searchsorted(
spectra_indptr,
ion_indices,
"right"
) - 1
with open(out_file_name, "w", newline='') as raw_outfile:
outfile = csv.writer(raw_outfile)
header = [
"Fragment_index",
"Fragment_mz",
"Fragment_mz_calibrated",
"Fragment_int",
"Spectrum_title",
"Spectrum_pepmass",
"Spectrum_rtinseconds",
"Database_index",
"Database_mz",
"Ion_type",
"Ion_number",
"Peptide_sequence",
"Peptide_mods",
"Peptide_length",
"Peptide_mass",
"Likelihood",
"Count",
"Candidates",
"Spectrum_size",
"Modified_score",
"Decoy",
"FDR",
]
outfile.writerow(header)
for i, ion_index in enumerate(ion_indices):
fdr = fdr_values[i]
if fdr > fdr_filter:
continue
fragment_index = fragments[i]
peptide_index = peptides[i]
if (not export_decoys):
if decoys[peptide_index]:
continue
spectrum_index = spectrum_indices1[i]
peptide_sequence = peptide_sequences[peptide_index]
peptide_mass = peptide_masses[peptide_index]
row = [
ion_index,
spectra_mzs[ion_index],
calibrated_mzs[ion_index],
self_ints[ion_index],
spectra[spectrum_index]['params']['title'],
spectra[spectrum_index]['params']['pepmass'][0],
spectra[spectrum_index]['params']['rtinseconds'],
fragment_index,
fragment_ion_mz_database[fragment_index],
"Y" if fragment_is_y_ion[fragment_index] else "B",
fragment_ion_numbers[fragment_index],
peptide_sequence,
peptide_modifications[peptide_index],
len(peptide_sequence),
peptide_mass,
scores[i],
count_results[i],
candidate_counts[i],
spectrum_sizes[i],
modified_scores[i],
decoys[peptide_index],
fdr,
]
outfile.writerow(row)
class HDF_File(object):
# TODO: Docstring
@property
def directory(self):
return os.path.dirname(self.file_name)
@property
def file_name(self):
return self.__file_name
@property
def original_file_name(self):
return self.read_attr("original_file_name")
@property
def creation_time(self):
return self.read_attr("creation_time")
@property
def last_updated(self):
return self.read_attr("last_updated")
@property
def version(self):
try:
return self.read_attr("version")
except KeyError:
return "0.0.0"
@property
def is_read_only(self):
return self.__is_read_only
def __init__(
self,
file_name,
is_read_only=True,
new_file=False,
):
# TODO: Docstring
self.__file_name = os.path.abspath(file_name)
if not isinstance(new_file, bool):
raise ValueError(
f"Could not determine if HDF_file {self.file_name} is read, "
f"write or truncate."
)
if new_file:
is_read_only = False
if not os.path.exists(self.directory):
os.makedirs(self.directory)
with h5py.File(self.file_name, "w") as hdf_file:
hdf_file.attrs["creation_time"] = time.asctime()
hdf_file.attrs["version"] = VERSION
hdf_file.attrs["original_file_name"] = self.__file_name
self.__update_timestamp(hdf_file)
else:
with h5py.File(self.file_name, "r") as hdf_file:
self.check_version()
self.__is_read_only = is_read_only
def check_version(self):
if self.version != VERSION:
LOGGER.warning(
f"WARNING: {self.file_name} was created with version "
f"{self.version} instead of {VERSION}."
)
def __eq__(self, other):
return self.file_name == other.file_name
def __hash__(self):
return hash(self.file_name)
def __str__(self):
return f"<{self.file_name}>"
def __repr__(self):
return str(self)
def __update_timestamp(self, hdf_file):
hdf_file.attrs["last_updated"] = time.asctime()
def __get_parent_group(self, hdf_file, parent_group_name):
if parent_group_name == "":
parent_group = hdf_file
else:
parent_group = hdf_file[parent_group_name]
return parent_group
def read_group(self, parent_group_name=""):
# TODO: Docstring
with h5py.File(self.file_name, "r") as hdf_file:
parent_group = self.__get_parent_group(hdf_file, parent_group_name)
group = sorted(parent_group)
return group
def read_attr(self, attr_key=None, parent_group_name=""):
# TODO: Docstring
with h5py.File(self.file_name, "r") as hdf_file:
parent_group = self.__get_parent_group(hdf_file, parent_group_name)
if attr_key is not None:
attr = parent_group.attrs[attr_key]
else:
attr = sorted(parent_group.attrs)
return attr
def read_dataset(
self,
dataset_name,
parent_group_name="",
indices=Ellipsis,
return_length=False,
return_dtype=False,
):
# TODO: Docstring
try:
iter(indices)
except TypeError:
fancy_indices = False
else:
fancy_indices = True
with h5py.File(self.file_name, "r") as hdf_file:
parent_group = self.__get_parent_group(hdf_file, parent_group_name)
array = parent_group[dataset_name]
if return_length:
return len(parent_group[dataset_name])
if return_dtype:
return len(parent_group[dataset_name].dtype)
if fancy_indices:
array = array[...]
return array[indices]
def write_group(self, group_name, parent_group_name="", overwrite=False):
# TODO: Docstring
if self.is_read_only:
raise IOError(f"HDF {self.file_name} file is opened as read only")
with h5py.File(self.file_name, "a") as hdf_file:
parent_group = self.__get_parent_group(hdf_file, parent_group_name)
if group_name not in parent_group:
hdf_group = parent_group.create_group(group_name)
elif overwrite:
del parent_group[group_name]
hdf_group = parent_group.create_group(group_name)
else:
return
hdf_group.attrs["creation_time"] = time.asctime()
self.__update_timestamp(hdf_file)
def write_attr(self, attr_key, attr_value, parent_group_name=""):
# TODO: Docstring
if self.is_read_only:
raise IOError(f"HDF {self.file_name} file is opened as read only")
with h5py.File(self.file_name, "a") as hdf_file:
parent_group = self.__get_parent_group(hdf_file, parent_group_name)
if isinstance(attr_value, str):
parent_group.attrs[attr_key] = attr_value
else:
try:
iter(attr_value)
except TypeError:
parent_group.attrs[attr_key] = attr_value
else:
parent_group.attrs[attr_key] = str(attr_value)
self.__update_timestamp(hdf_file)
def write_dataset(
self,
dataset_name,
dataset,
parent_group_name="",
overwrite=True,
# compression="lzf" # Fails for windows with pyinstaller for some reason
compression=None,
):
# TODO: Docstring
if self.is_read_only:
raise IOError(f"HDF {self.file_name} file is opened as read only")
if isinstance(dataset, pd.core.frame.DataFrame):
self.write_group(dataset_name, parent_group_name, overwrite)
for column in dataset.columns:
self.write_dataset(
column,
dataset[column].values,
dataset_name,
overwrite,
compression
)
else:
with h5py.File(self.file_name, "a") as hdf_file:
parent_group = self.__get_parent_group(
hdf_file,
parent_group_name
)
if overwrite and (dataset_name in parent_group):
del parent_group[dataset_name]
if dataset_name not in parent_group:
if dataset.dtype.type == np.str_:
dataset = dataset.astype(np.dtype('O'))
if dataset.dtype == np.dtype('O'):
hdf_dataset = parent_group.create_dataset(
dataset_name,
data=dataset,
compression=compression,
dtype=h5py.string_dtype()
)
else:
hdf_dataset = parent_group.create_dataset(
dataset_name,
data=dataset,
compression=compression,
)
hdf_dataset.attrs["creation_time"] = time.asctime()
self.__update_timestamp(hdf_file)
| 32.080392 | 111 | 0.606503 |
30450f2ad0e5ebd695a8166de46746c3932be80c | 4,102 | py | Python | python/simple-linked-list/simple_linked_list_test.py | tamireinhorn/exercism | 3ca78b262ad590b67c75c5d1cd83db02bc2d1e6e | [
"MIT"
] | null | null | null | python/simple-linked-list/simple_linked_list_test.py | tamireinhorn/exercism | 3ca78b262ad590b67c75c5d1cd83db02bc2d1e6e | [
"MIT"
] | 2 | 2021-12-18T16:31:51.000Z | 2021-12-18T16:33:33.000Z | python/simple-linked-list/simple_linked_list_test.py | tamireinhorn/Exercism | 3a3d5744e88ab4457df4e6ac20d772d8c50c43da | [
"MIT"
] | null | null | null | import unittest
from simple_linked_list import LinkedList, EmptyListException
# No canonical data available for this exercise
class SimpleLinkedListTest(unittest.TestCase):
def test_empty_list_has_len_zero(self):
sut = LinkedList()
self.assertEqual(len(sut), 0)
def test_singleton_list_has_len_one(self):
sut = LinkedList([1])
self.assertEqual(len(sut), 1)
def test_non_empty_list_has_correct_len(self):
sut = LinkedList([1, 2, 3])
self.assertEqual(len(sut), 3)
def test_error_on_empty_list_head(self):
sut = LinkedList()
with self.assertRaises(EmptyListException) as err:
sut.head()
self.assertEqual(type(err.exception), EmptyListException)
self.assertEqual(err.exception.args[0], "The list is empty.")
def test_singleton_list_has_head(self):
sut = LinkedList([1])
self.assertEqual(sut.head().value(), 1)
def test_non_empty_list_has_correct_head(self):
sut = LinkedList([1, 2])
self.assertEqual(sut.head().value(), 2)
def test_can_push_to_non_empty_list(self):
sut = LinkedList([1, 2, 3])
sut.push(4)
self.assertEqual(len(sut), 4)
def test_pushing_to_empty_list_changes_head(self):
sut = LinkedList()
sut.push(5)
self.assertEqual(len(sut), 1)
self.assertEqual(sut.head().value(), 5)
def test_can_pop_from_non_empty_list(self):
sut = LinkedList([3, 4, 5])
self.assertEqual(sut.pop(), 5)
self.assertEqual(len(sut), 2)
self.assertEqual(sut.head().value(), 4)
def test_pop_from_singleton_list_removes_head(self):
sut = LinkedList([1])
self.assertEqual(sut.pop(), 1)
with self.assertRaises(EmptyListException) as err:
sut.head()
self.assertEqual(type(err.exception), EmptyListException)
self.assertEqual(err.exception.args[0], "The list is empty.")
def test_error_on_empty_list_pop(self):
sut = LinkedList()
with self.assertRaises(EmptyListException) as err:
sut.pop()
self.assertEqual(type(err.exception), EmptyListException)
self.assertEqual(err.exception.args[0], "The list is empty.")
def test_push_and_pop(self):
sut = LinkedList([1, 2])
sut.push(3)
self.assertEqual(len(sut), 3)
self.assertEqual(sut.pop(), 3)
self.assertEqual(sut.pop(), 2)
self.assertEqual(sut.pop(), 1)
self.assertEqual(len(sut), 0)
sut.push(4)
self.assertEqual(len(sut), 1)
self.assertEqual(sut.head().value(), 4)
def test_singleton_list_head_has_no_next(self):
sut = LinkedList([1])
self.assertIsNone(sut.head().next())
def test_non_empty_list_traverse(self):
sut = LinkedList(range(10))
current = sut.head()
for i in range(10):
self.assertEqual(current.value(), 9 - i)
current = current.next()
self.assertIsNone(current)
def test_empty_linked_list_to_list_is_empty(self):
sut = LinkedList()
self.assertEqual(list(sut), [])
def test_singleton_linked_list_to_list_list_with_singular_element(self):
# For some reason, this is calling next TWICE, and therefore runs into an error. Why?
sut = LinkedList([1])
self.assertEqual(list(sut), [1])
def test_non_empty_linked_list_to_list_is_list_with_all_elements(self):
sut = LinkedList([1, 2, 3])
self.assertEqual(list(sut), [3, 2, 1])
def test_reversed_empty_list_is_empty_list(self):
sut = LinkedList([])
self.assertEqual(list(sut.reversed()), [])
def test_reversed_singleton_list_is_same_list(self):
sut = LinkedList([1])
self.assertEqual(list(sut.reversed()), [1])
def test_reverse_non_empty_list(self):
sut = LinkedList([1, 2, 3])
self.assertEqual(list(sut.reversed()), [1, 2, 3])
def test_even_bigger_guy(self):
sut = LinkedList([1,2,3,4])
self.assertEqual(list(sut.reversed()), [1,2,3,4])
| 34.183333 | 93 | 0.641638 |
5811021a3bf72605de259a0722fc305d8c0e5a8f | 734 | py | Python | retirement/migrations/0010_auto_20190515_1732.py | Jerome-Celle/Blitz-API | 7dfb7b837ed47b11afcfaa5f5aee831c1aa4e5e0 | [
"MIT"
] | 3 | 2019-10-22T00:16:49.000Z | 2021-07-15T07:44:43.000Z | retirement/migrations/0010_auto_20190515_1732.py | Jerome-Celle/Blitz-API | 7dfb7b837ed47b11afcfaa5f5aee831c1aa4e5e0 | [
"MIT"
] | 1,183 | 2018-04-19T18:40:30.000Z | 2022-03-31T21:05:05.000Z | retirement/migrations/0010_auto_20190515_1732.py | Jerome-Celle/Blitz-API | 7dfb7b837ed47b11afcfaa5f5aee831c1aa4e5e0 | [
"MIT"
] | 12 | 2018-04-17T19:16:42.000Z | 2022-01-27T00:19:59.000Z | # Generated by Django 2.0.8 on 2019-05-15 21:32
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('retirement', '0009_reservation_orderline_allow_null'),
]
operations = [
migrations.AddField(
model_name='historicalretirement',
name='has_shared_rooms',
field=models.BooleanField(default=False),
preserve_default=False,
),
migrations.AddField(
model_name='retirement',
name='has_shared_rooms',
field=models.BooleanField(default=False),
preserve_default=False,
),
]
| 26.214286 | 64 | 0.632153 |
efbb1d5ef2574c6c57d58e5ebcebeab836c610eb | 152 | py | Python | Automate the boring stuff/Chapter_6/Regular_Expression.py | maainul/Paython | c72d7fff3b00bc4f379ca6f9dbef0678f01b55f9 | [
"DOC"
] | null | null | null | Automate the boring stuff/Chapter_6/Regular_Expression.py | maainul/Paython | c72d7fff3b00bc4f379ca6f9dbef0678f01b55f9 | [
"DOC"
] | null | null | null | Automate the boring stuff/Chapter_6/Regular_Expression.py | maainul/Paython | c72d7fff3b00bc4f379ca6f9dbef0678f01b55f9 | [
"DOC"
] | null | null | null | import re
phone=re.compile(r'\d\d\d-\d\d\d-\d\d\d\d')
mo=phone.search('My phone number is 412-444-9870')
print(mo.group())
"""
OUTPUT:
412-444-9870
"""
| 16.888889 | 50 | 0.651316 |
aee928c1a0bf28567fb0747743e8556660fa9507 | 1,015 | py | Python | abstractBaseUser/abstract_base_user/admin.py | amateur-dev/Django_CustomUser | 6bb7a8676c48d80c0817a164ca801a1008e874dc | [
"BSD-3-Clause"
] | null | null | null | abstractBaseUser/abstract_base_user/admin.py | amateur-dev/Django_CustomUser | 6bb7a8676c48d80c0817a164ca801a1008e874dc | [
"BSD-3-Clause"
] | null | null | null | abstractBaseUser/abstract_base_user/admin.py | amateur-dev/Django_CustomUser | 6bb7a8676c48d80c0817a164ca801a1008e874dc | [
"BSD-3-Clause"
] | null | null | null | # from django.contrib import admin
# Register your models here.
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .forms import CustomUserCreationForm, CustomUserChangeForm
from .models import CustomUser
class CustomUserAdmin(UserAdmin):
add_form = CustomUserCreationForm
form = CustomUserChangeForm
model = CustomUser
list_display = ('email', 'is_staff', 'is_active',)
list_filter = ('email', 'is_staff', 'is_active',)
fieldsets = (
(None, {'fields': ('email', 'password', 'f_name', 'l_name', 'condo_name', 'unit_floor', 'unit_unit', 'has_access_to_facility')}),
('Permissions', {'fields': ('is_staff', 'is_active')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2', 'is_staff', 'is_active')}
),
)
search_fields = ('email', 'condo_name')
ordering = ('email', 'condo_name')
admin.site.register(CustomUser, CustomUserAdmin) | 30.757576 | 137 | 0.653202 |
fa916a8d925503a3882ae31069e8c47191115590 | 3,785 | py | Python | tools/gen.py | ikrivosheev/aiohttp | 42ffdfc7bd70aedec57c5f44a6b1fc84ae565625 | [
"Apache-2.0"
] | 1 | 2021-11-11T04:05:06.000Z | 2021-11-11T04:05:06.000Z | tools/gen.py | ikrivosheev/aiohttp | 42ffdfc7bd70aedec57c5f44a6b1fc84ae565625 | [
"Apache-2.0"
] | 199 | 2020-11-01T08:02:46.000Z | 2022-03-31T07:05:31.000Z | tools/gen.py | ikrivosheev/aiohttp | 42ffdfc7bd70aedec57c5f44a6b1fc84ae565625 | [
"Apache-2.0"
] | 1 | 2021-11-11T04:04:59.000Z | 2021-11-11T04:04:59.000Z | #!/usr/bin/env python
import io
import pathlib
from collections import defaultdict
import multidict
ROOT = pathlib.Path.cwd()
while ROOT.parent != ROOT and not (ROOT / ".git").exists():
ROOT = ROOT.parent
def calc_headers(root):
hdrs_file = root / "aiohttp/hdrs.py"
code = compile(hdrs_file.read_text(), str(hdrs_file), "exec")
globs = {}
exec(code, globs)
headers = [val for val in globs.values() if isinstance(val, multidict.istr)]
return sorted(headers)
headers = calc_headers(ROOT)
def factory():
return defaultdict(factory)
TERMINAL = object()
def build(headers):
dct = defaultdict(factory)
for hdr in headers:
d = dct
for ch in hdr:
d = d[ch]
d[TERMINAL] = hdr
return dct
dct = build(headers)
HEADER = """\
/* The file is autogenerated from aiohttp/hdrs.py
Run ./tools/gen.py to update it after the origin changing. */
#include "_find_header.h"
#define NEXT_CHAR() \\
{ \\
count++; \\
if (count == size) { \\
/* end of search */ \\
return -1; \\
} \\
pchar++; \\
ch = *pchar; \\
last = (count == size -1); \\
} while(0);
int
find_header(const char *str, int size)
{
char *pchar = str;
int last;
char ch;
int count = -1;
pchar--;
"""
BLOCK = """
{label}
NEXT_CHAR();
switch (ch) {{
{cases}
default:
return -1;
}}
"""
CASE = """\
case '{char}':
if (last) {{
return {index};
}}
goto {next};"""
FOOTER = """
{missing}
missing:
/* nothing found */
return -1;
}}
"""
def gen_prefix(prefix, k):
if k == "-":
return prefix + "_"
else:
return prefix + k.upper()
def gen_block(dct, prefix, used_blocks, missing, out):
cases = {}
for k, v in dct.items():
if k is TERMINAL:
continue
next_prefix = gen_prefix(prefix, k)
term = v.get(TERMINAL)
if term is not None:
index = headers.index(term)
else:
index = -1
hi = k.upper()
lo = k.lower()
case = CASE.format(char=hi, index=index, next=next_prefix)
cases[hi] = case
if lo != hi:
case = CASE.format(char=lo, index=index, next=next_prefix)
cases[lo] = case
label = prefix + ":" if prefix else ""
if cases:
block = BLOCK.format(label=label, cases="\n".join(cases.values()))
out.write(block)
else:
missing.add(label)
for k, v in dct.items():
if not isinstance(v, defaultdict):
continue
block_name = gen_prefix(prefix, k)
if block_name in used_blocks:
continue
used_blocks.add(block_name)
gen_block(v, block_name, used_blocks, missing, out)
def gen(dct):
out = io.StringIO()
out.write(HEADER)
missing = set()
gen_block(dct, "", set(), missing, out)
missing_labels = "\n".join(m for m in sorted(missing))
out.write(FOOTER.format(missing=missing_labels))
return out
def gen_headers(headers):
out = io.StringIO()
out.write("# The file is autogenerated from aiohttp/hdrs.py\n")
out.write("# Run ./tools/gen.py to update it after the origin changing.")
out.write("\n\n")
out.write("from . import hdrs\n")
out.write("cdef tuple headers = (\n")
for hdr in headers:
out.write(" hdrs.{},\n".format(hdr.upper().replace("-", "_")))
out.write(")\n")
return out
# print(gen(dct).getvalue())
# print(gen_headers(headers).getvalue())
folder = ROOT / "aiohttp"
with (folder / "_find_header.c").open("w") as f:
f.write(gen(dct).getvalue())
with (folder / "_headers.pxi").open("w") as f:
f.write(gen_headers(headers).getvalue())
| 21.628571 | 80 | 0.567239 |
cef06c01bf66f3405274c3dec0e9c75b0cbe287a | 2,074 | py | Python | task-library/veeam/VeeamGetHierarchyRoots.py | vNugget/blueprints | 17183beebf8bc3da1d9d3ed4b8260dd18fdc1516 | [
"MIT"
] | null | null | null | task-library/veeam/VeeamGetHierarchyRoots.py | vNugget/blueprints | 17183beebf8bc3da1d9d3ed4b8260dd18fdc1516 | [
"MIT"
] | null | null | null | task-library/veeam/VeeamGetHierarchyRoots.py | vNugget/blueprints | 17183beebf8bc3da1d9d3ed4b8260dd18fdc1516 | [
"MIT"
] | null | null | null | # region headers
# * author: igor.zecevic@nutanix.com
# * version: v1.0 - initial version
# * date: 11/03/2020
# task_name: VeeamGetHierarchyRoots
# description: Get the hierarchyRoot UID
# The script retreives the hierarchyRoots UID
# input vars: veeam_session_cookie, vc_server, api_server
# output vars: veeam_hierarchyRoot_uid
# endregion
# region capture Calm variables
veeam_session_cookie = "@@{veeam_session_cookie}@@"
api_server = "@@{veeam_endpoint}@@"
vc_server = "@@{vc_endpoint}@@"
# endregion
# region prepare api call
api_server_port = "9398"
api_server_endpoint = "/api/hierarchyRoots"
method = "GET"
url = "https://{}:{}{}".format(api_server, api_server_port, api_server_endpoint)
headers = {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-RestSvcSessionId': veeam_session_cookie}
# endregion
# region API call function
def process_request(url, method, headers, payload=None):
if (payload is not None):
payload = json.dumps(payload)
r = urlreq(url, verb=method, params=payload, verify=False, headers=headers)
if r.ok:
print("Request was successful")
print("Status code: {}".format(r.status_code))
else:
print("Request failed")
print('Status code: {}'.format(r.status_code))
print("Headers: {}".format(headers))
print("Payload: {}".format(json.dumps(payload)))
print('Response: {}'.format(json.dumps(json.loads(r.content), indent=4)))
exit(1)
return r
# endregion
# region login
print("Making a {} API call to {}".format(method, url))
resp = process_request(url, method, headers)
# endregion
# pass the repo_uid so that it may be captured by Calm.
obj_uid = ""
resp_parse = json.loads(resp.content)
for obj in resp_parse['Refs']:
if obj['Name'] == vc_server:
obj_uid = obj['UID']
if obj_uid:
print ("veeam_hierarchyroot_uid={}".format(obj_uid.rsplit(':', 1)[1]))
exit(0)
else:
print("Error: Managed Server "+vc_server+" doesn't is not present ..")
exit(1) | 34.566667 | 120 | 0.66972 |
ddeff27a1f0c4531bd84ecb842b4950d10175b55 | 1,869 | py | Python | watchman/test/async/test_dead_socket.py | 47-studio-org/watchman | c50631dcf9a9e7d27b2bc05cd32649546add836e | [
"MIT"
] | 3 | 2022-02-10T10:48:36.000Z | 2022-02-21T23:18:10.000Z | watchman/test/async/test_dead_socket.py | 47-studio-org/watchman | c50631dcf9a9e7d27b2bc05cd32649546add836e | [
"MIT"
] | null | null | null | watchman/test/async/test_dead_socket.py | 47-studio-org/watchman | c50631dcf9a9e7d27b2bc05cd32649546add836e | [
"MIT"
] | 1 | 2022-02-06T10:29:46.000Z | 2022-02-06T10:29:46.000Z | #!/usr/bin/env python3
# vim:ts=4:sw=4:et:
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import asyncio
import os
import unittest
import pywatchman_aio
import WatchmanInstance
# Note this does not extend AsyncWatchmanTestCase as it wants to start its
# own Watchman server instances per test.
class TestDeadSocket(unittest.TestCase):
@unittest.skipIf(os.name == "nt", "not supported on windows")
def test_query_dead_socket(self):
async def test_core(wminst):
with await pywatchman_aio.AIOClient.from_socket(
sockname=wminst.getSockPath()
) as client:
wminst.stop()
with self.assertRaises(ConnectionResetError):
await client.query("version")
self._async_runner(test_core)
@unittest.skipIf(os.name == "nt", "not supported on windows")
def test_subscription_dead_socket(self):
async def test_core(wminst):
with await pywatchman_aio.AIOClient.from_socket(
sockname=wminst.getSockPath()
) as client:
root = f"{wminst.base_dir}/work"
os.makedirs(root)
await client.query("watch", root)
await client.query("subscribe", root, "sub", {"expression": ["exists"]})
wminst.stop()
with self.assertRaises(ConnectionResetError):
await client.get_subscription("sub", root)
self._async_runner(test_core)
def _async_runner(self, test_core):
wminst = WatchmanInstance.Instance()
wminst.start()
try:
return asyncio.new_event_loop().run_until_complete(test_core(wminst))
finally:
wminst.stop()
| 34.611111 | 88 | 0.635099 |
4f2601b4cc671dc9d5e402d735f8cd71a8ec14ff | 21,054 | py | Python | libs/models/detectors/scrdet/build_whole_network.py | PauliKarl/RotationDetection | 84bbfe5b1a3ee36e8ad66fd0f36a5ef7b9b0019e | [
"Apache-2.0"
] | 850 | 2020-10-27T08:51:54.000Z | 2022-03-30T15:12:06.000Z | libs/models/detectors/scrdet/build_whole_network.py | PauliKarl/RotationDetection | 84bbfe5b1a3ee36e8ad66fd0f36a5ef7b9b0019e | [
"Apache-2.0"
] | 94 | 2020-12-01T02:18:47.000Z | 2022-03-30T08:14:27.000Z | libs/models/detectors/scrdet/build_whole_network.py | PauliKarl/RotationDetection | 84bbfe5b1a3ee36e8ad66fd0f36a5ef7b9b0019e | [
"Apache-2.0"
] | 149 | 2020-10-29T03:30:32.000Z | 2022-03-29T09:53:23.000Z | # -*-coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from libs.models.detectors.two_stage_base_network import DetectionNetworkBase
from libs.models.losses.losses import Loss
from libs.utils import bbox_transform, nms_rotate
from libs.models.anchor_heads import generate_h_anchors, anchor_utils
from libs.models.samplers.r2cnn.anchor_sampler_r2cnn import AnchorSamplerR2CNN
from libs.models.samplers.r2cnn.proposal_sampler_r2cnn import ProposalSamplerR2CNN
from libs.models.roi_extractors.roi_extractors import RoIExtractor
from libs.models.box_heads.box_head_base import BoxHead
from utils.box_ops import clip_boxes_to_img_boundaries
class DetectionNetworkSCRDet(DetectionNetworkBase):
def __init__(self, cfgs, is_training):
super(DetectionNetworkSCRDet, self).__init__(cfgs, is_training)
self.proposal_sampler_r2cnn = ProposalSamplerR2CNN(cfgs)
self.anchor_sampler_r2cnn = AnchorSamplerR2CNN(cfgs)
self.losses = Loss(cfgs)
self.roi_extractor = RoIExtractor(cfgs)
self.box_head = BoxHead(cfgs)
def rpn(self, inputs):
rpn_conv3x3 = slim.conv2d(inputs, 512, [3, 3],
trainable=self.is_training,
weights_initializer=self.cfgs.INITIALIZER,
activation_fn=tf.nn.relu,
scope='rpn_conv/3x3')
rpn_cls_score = slim.conv2d(rpn_conv3x3, self.num_anchors_per_location * 2, [1, 1], stride=1,
trainable=self.is_training, weights_initializer=self.cfgs.INITIALIZER,
activation_fn=None,
scope='rpn_cls_score')
rpn_box_pred = slim.conv2d(rpn_conv3x3, self.num_anchors_per_location * 4, [1, 1], stride=1,
trainable=self.is_training, weights_initializer=self.cfgs.BBOX_INITIALIZER,
activation_fn=None,
scope='rpn_bbox_pred')
rpn_cls_prob = slim.softmax(rpn_cls_score, scope='rpn_cls_prob')
return rpn_box_pred, rpn_cls_score, rpn_cls_prob
def make_anchors(self, feature_to_cropped):
featuremap_height, featuremap_width = tf.shape(feature_to_cropped)[1], tf.shape(feature_to_cropped)[2]
featuremap_height = tf.cast(featuremap_height, tf.float32)
featuremap_width = tf.cast(featuremap_width, tf.float32)
anchors = anchor_utils.make_anchors(base_anchor_size=self.cfgs.BASE_ANCHOR_SIZE_LIST,
anchor_scales=self.cfgs.ANCHOR_SCALES, anchor_ratios=self.cfgs.ANCHOR_RATIOS,
featuremap_height=featuremap_height,
featuremap_width=featuremap_width,
stride=self.cfgs.ANCHOR_STRIDE,
name="make_anchors_forRPN")
return anchors
def build_loss(self, rpn_box_pred, rpn_bbox_targets, rpn_cls_score, rpn_labels,
bbox_pred_h, bbox_targets_h, cls_score_h, bbox_pred_r, bbox_targets_r, rois, target_gt_r,
cls_score_r, labels, mask_gt, pa_mask_pred):
'''
:param rpn_box_pred: [-1, 4]
:param rpn_bbox_targets: [-1, 4]
:param rpn_cls_score: [-1]
:param rpn_labels: [-1]
:param bbox_pred_h: [-1, 4*(cls_num+1)]
:param bbox_targets_h: [-1, 4*(cls_num+1)]
:param cls_score_h: [-1, cls_num+1]
:param bbox_pred_r: [-1, 5*(cls_num+1)]
:param bbox_targets_r: [-1, 5*(cls_num+1)]
:param cls_score_r: [-1, cls_num+1]
:param labels: [-1]
:return:
'''
with tf.variable_scope('build_loss'):
with tf.variable_scope('rpn_loss'):
rpn_reg_loss = self.losses.smooth_l1_loss_rpn(bbox_pred=rpn_box_pred,
bbox_targets=rpn_bbox_targets,
label=rpn_labels,
sigma=self.cfgs.RPN_SIGMA)
rpn_select = tf.reshape(tf.where(tf.not_equal(rpn_labels, -1)), [-1])
rpn_cls_score = tf.reshape(tf.gather(rpn_cls_score, rpn_select), [-1, 2])
rpn_labels = tf.reshape(tf.gather(rpn_labels, rpn_select), [-1])
rpn_cls_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=rpn_cls_score,
labels=rpn_labels))
self.losses_dict['rpn_cls_loss'] = rpn_cls_loss * self.cfgs.RPN_CLASSIFICATION_LOSS_WEIGHT
self.losses_dict['rpn_reg_loss'] = rpn_reg_loss * self.cfgs.RPN_LOCATION_LOSS_WEIGHT
with tf.variable_scope('FastRCNN_loss'):
reg_loss_h = self.losses.smooth_l1_loss_rcnn_h(bbox_pred=bbox_pred_h,
bbox_targets=bbox_targets_h,
label=labels,
num_classes=self.cfgs.CLASS_NUM + 1,
sigma=self.cfgs.FASTRCNN_SIGMA)
if self.cfgs.USE_IOU_FACTOR:
reg_loss_r = self.losses.iou_smooth_l1_loss_rcnn_r(bbox_pred=bbox_pred_r,
bbox_targets=bbox_targets_r,
label=labels,
rois=rois, target_gt_r=target_gt_r,
num_classes=self.cfgs.CLASS_NUM + 1,
sigma=self.cfgs.FASTRCNN_SIGMA)
else:
reg_loss_r = self.losses.smooth_l1_loss_rcnn_r(bbox_pred=bbox_pred_r,
bbox_targets=bbox_targets_r,
label=labels,
num_classes=self.cfgs.CLASS_NUM + 1,
sigma=self.cfgs.FASTRCNN_SIGMA)
cls_loss_h = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=cls_score_h,
labels=labels)) # beacause already sample before
cls_loss_r = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=cls_score_r,
labels=labels))
self.losses_dict['fast_cls_loss_h'] = cls_loss_h * self.cfgs.FAST_RCNN_CLASSIFICATION_LOSS_WEIGHT
self.losses_dict['fast_reg_loss_h'] = reg_loss_h * self.cfgs.FAST_RCNN_LOCATION_LOSS_WEIGHT
self.losses_dict['fast_cls_loss_r'] = cls_loss_r * self.cfgs.FAST_RCNN_CLASSIFICATION_LOSS_WEIGHT
self.losses_dict['fast_reg_loss_r'] = reg_loss_r * self.cfgs.FAST_RCNN_LOCATION_LOSS_WEIGHT
with tf.variable_scope('build_attention_loss',
regularizer=slim.l2_regularizer(self.cfgs.WEIGHT_DECAY)):
attention_loss = self.losses.build_attention_loss(mask_gt, pa_mask_pred)
self.losses_dict['attention_loss'] = attention_loss
def build_whole_detection_network(self, input_img_batch, gtboxes_batch_h=None, gtboxes_batch_r=None,
mask_batch=None, gpu_id=0):
if self.is_training:
gtboxes_batch_h = tf.reshape(gtboxes_batch_h, [-1, 5])
gtboxes_batch_h = tf.cast(gtboxes_batch_h, tf.float32)
gtboxes_batch_r = tf.reshape(gtboxes_batch_r, [-1, 6])
gtboxes_batch_r = tf.cast(gtboxes_batch_r, tf.float32)
img_shape = tf.shape(input_img_batch)
# 1. build backbone
feature, pa_mask = self.build_backbone(input_img_batch)
# 2. build rpn
rpn_box_pred, rpn_cls_score, rpn_cls_prob = self.rpn(feature)
rpn_box_pred = tf.reshape(rpn_box_pred, [-1, 4])
rpn_cls_score = tf.reshape(rpn_cls_score, [-1, 2])
rpn_cls_prob = slim.softmax(rpn_cls_score, scope='rpn_cls_prob')
# 3. generate anchors
anchors = self.make_anchors(feature)
# 4. postprocess rpn proposals. such as: decode, clip, NMS
with tf.variable_scope('postprocess_RPN'):
rois, roi_scores = self.postprocess_rpn_proposals(rpn_bbox_pred=rpn_box_pred,
rpn_cls_prob=rpn_cls_prob,
img_shape=img_shape,
anchors=anchors,
is_training=self.is_training)
# 5. sample minibatch
if self.is_training:
with tf.variable_scope('sample_anchors_minibatch'):
rpn_labels, rpn_bbox_targets = \
tf.py_func(
self.anchor_sampler_r2cnn.anchor_target_layer,
[gtboxes_batch_h, img_shape, anchors],
[tf.float32, tf.float32])
rpn_bbox_targets = tf.reshape(rpn_bbox_targets, [-1, 4])
rpn_labels = tf.to_int32(rpn_labels, name="to_int32")
rpn_labels = tf.reshape(rpn_labels, [-1])
self.add_anchor_img_smry(input_img_batch, anchors, rpn_labels, method=0)
rpn_cls_category = tf.argmax(rpn_cls_prob, axis=1)
kept_rpppn = tf.reshape(tf.where(tf.not_equal(rpn_labels, -1)), [-1])
rpn_cls_category = tf.gather(rpn_cls_category, kept_rpppn)
acc = tf.reduce_mean(tf.to_float(tf.equal(rpn_cls_category,
tf.to_int64(tf.gather(rpn_labels, kept_rpppn)))))
tf.summary.scalar('ACC/fpn_accuracy', acc)
with tf.control_dependencies([rpn_labels]):
with tf.variable_scope('sample_RCNN_minibatch'):
rois, labels, bbox_targets_h, bbox_targets_r, target_gt_h, target_gt_r = \
tf.py_func(self.proposal_sampler_r2cnn.proposal_target_layer,
[rois, gtboxes_batch_h, gtboxes_batch_r],
[tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32])
rois = tf.reshape(rois, [-1, 4])
labels = tf.to_int32(labels)
labels = tf.reshape(labels, [-1])
bbox_targets_h = tf.reshape(bbox_targets_h, [-1, 4 * (self.cfgs.CLASS_NUM + 1)])
bbox_targets_r = tf.reshape(bbox_targets_r, [-1, 5 * (self.cfgs.CLASS_NUM + 1)])
self.add_roi_batch_img_smry(input_img_batch, rois, labels, method=0)
# 6. build Fast-RCNN, include roi align/pooling, box head
bbox_pred_h, cls_score_h, bbox_pred_r, cls_score_r = self.box_head.fc_head(self.roi_extractor, rois, feature,
img_shape, self.is_training, mode=0)
cls_prob_h = slim.softmax(cls_score_h, 'cls_prob_h')
cls_prob_r = slim.softmax(cls_score_r, 'cls_prob_r')
if self.is_training:
cls_category_h = tf.argmax(cls_prob_h, axis=1)
fast_acc_h = tf.reduce_mean(tf.to_float(tf.equal(cls_category_h, tf.to_int64(labels))))
tf.summary.scalar('ACC/fast_acc_h', fast_acc_h)
cls_category_r = tf.argmax(cls_prob_r, axis=1)
fast_acc_r = tf.reduce_mean(tf.to_float(tf.equal(cls_category_r, tf.to_int64(labels))))
tf.summary.scalar('ACC/fast_acc_r', fast_acc_r)
# 8. build loss
if self.is_training:
self.build_loss(rpn_box_pred=rpn_box_pred,
rpn_bbox_targets=rpn_bbox_targets,
rpn_cls_score=rpn_cls_score,
rpn_labels=rpn_labels,
bbox_pred_h=bbox_pred_h,
bbox_targets_h=bbox_targets_h,
cls_score_h=cls_score_h,
bbox_pred_r=bbox_pred_r,
bbox_targets_r=bbox_targets_r,
rois=rois,
target_gt_r=target_gt_r,
cls_score_r=cls_score_r,
labels=labels,
mask_gt=mask_batch,
pa_mask_pred=pa_mask)
# 9. postprocess_fastrcnn
final_boxes_h, final_scores_h, final_category_h = self.postprocess_fastrcnn_h(rois=rois,
bbox_ppred=bbox_pred_h,
scores=cls_prob_h,
img_shape=img_shape)
final_boxes_r, final_scores_r, final_category_r = self.postprocess_fastrcnn_r(rois=rois,
bbox_ppred=bbox_pred_r,
scores=cls_prob_r,
gpu_id=gpu_id)
if self.is_training:
return final_boxes_h, final_scores_h, final_category_h, \
final_boxes_r, final_scores_r, final_category_r, self.losses_dict
else:
return final_boxes_h, final_scores_h, final_category_h, \
final_boxes_r, final_scores_r, final_category_r,
def postprocess_fastrcnn_r(self, rois, bbox_ppred, scores, gpu_id):
'''
:param rois:[-1, 4]
:param bbox_ppred: [-1, (cfgs.Class_num+1) * 5]
:param scores: [-1, cfgs.Class_num + 1]
:return:
'''
with tf.name_scope('postprocess_fastrcnn'):
rois = tf.stop_gradient(rois)
scores = tf.stop_gradient(scores)
bbox_ppred = tf.reshape(bbox_ppred, [-1, self.cfgs.CLASS_NUM + 1, 5])
bbox_ppred = tf.stop_gradient(bbox_ppred)
bbox_pred_list = tf.unstack(bbox_ppred, axis=1)
score_list = tf.unstack(scores, axis=1)
allclasses_boxes = []
allclasses_scores = []
categories = []
x_c = (rois[:, 2] + rois[:, 0]) / 2
y_c = (rois[:, 3] + rois[:, 1]) / 2
h = rois[:, 2] - rois[:, 0] + 1
w = rois[:, 3] - rois[:, 1] + 1
theta = -90 * tf.ones_like(x_c)
rois = tf.transpose(tf.stack([x_c, y_c, w, h, theta]))
for i in range(1, self.cfgs.CLASS_NUM + 1):
# 1. decode boxes in each class
tmp_encoded_box = bbox_pred_list[i]
tmp_score = score_list[i]
tmp_decoded_boxes = bbox_transform.rbbox_transform_inv(boxes=rois, deltas=tmp_encoded_box,
scale_factors=self.cfgs.ROI_SCALE_FACTORS)
# 2. clip to img boundaries
# tmp_decoded_boxes = boxes_utils.clip_boxes_to_img_boundaries(decode_boxes=tmp_decoded_boxes,
# img_shape=img_shape)
# 3. NMS
if self.cfgs.SOFT_NMS:
print("Using Soft NMS.......")
raise NotImplementedError("soft NMS for rotate has not implemented")
else:
keep = nms_rotate.nms_rotate(decode_boxes=tmp_decoded_boxes,
scores=tmp_score,
iou_threshold=self.cfgs.FAST_RCNN_R_NMS_IOU_THRESHOLD,
max_output_size=self.cfgs.FAST_RCNN_NMS_MAX_BOXES_PER_CLASS,
use_gpu=self.cfgs.ROTATE_NMS_USE_GPU,
gpu_id=gpu_id)
perclass_boxes = tf.gather(tmp_decoded_boxes, keep)
perclass_scores = tf.gather(tmp_score, keep)
allclasses_boxes.append(perclass_boxes)
allclasses_scores.append(perclass_scores)
categories.append(tf.ones_like(perclass_scores) * i)
final_boxes = tf.concat(allclasses_boxes, axis=0)
final_scores = tf.concat(allclasses_scores, axis=0)
final_category = tf.concat(categories, axis=0)
if self.is_training:
'''
in training. We should show the detecitons in the tensorboard. So we add this.
'''
kept_indices = tf.reshape(tf.where(tf.greater_equal(final_scores, self.cfgs.VIS_SCORE)), [-1])
else:
kept_indices = tf.reshape(tf.where(tf.greater_equal(final_scores, self.cfgs.FILTERED_SCORE)), [-1])
final_boxes = tf.gather(final_boxes, kept_indices)
final_scores = tf.gather(final_scores, kept_indices)
final_category = tf.gather(final_category, kept_indices)
return final_boxes, final_scores, final_category
def postprocess_fastrcnn_h(self, rois, bbox_ppred, scores, img_shape):
'''
:param rois:[-1, 4]
:param bbox_ppred: [-1, (cfgs.Class_num+1) * 4]
:param scores: [-1, cfgs.Class_num + 1]
:return:
'''
with tf.name_scope('postprocess_fastrcnn_h'):
rois = tf.stop_gradient(rois)
scores = tf.stop_gradient(scores)
bbox_ppred = tf.reshape(bbox_ppred, [-1, self.cfgs.CLASS_NUM + 1, 4])
bbox_ppred = tf.stop_gradient(bbox_ppred)
bbox_pred_list = tf.unstack(bbox_ppred, axis=1)
score_list = tf.unstack(scores, axis=1)
allclasses_boxes = []
allclasses_scores = []
categories = []
for i in range(1, self.cfgs.CLASS_NUM + 1):
# 1. decode boxes in each class
tmp_encoded_box = bbox_pred_list[i]
tmp_score = score_list[i]
tmp_decoded_boxes = bbox_transform.bbox_transform_inv(boxes=rois, deltas=tmp_encoded_box,
scale_factors=self.cfgs.ROI_SCALE_FACTORS)
# 2. clip to img boundaries
tmp_decoded_boxes = clip_boxes_to_img_boundaries(decode_boxes=tmp_decoded_boxes,
img_shape=img_shape)
# 3. NMS
max_output_size = 4000 if 'DOTA' in self.cfgs.NET_NAME else 200
keep = tf.image.non_max_suppression(
boxes=tmp_decoded_boxes,
scores=tmp_score,
max_output_size=100 if self.is_training else max_output_size,
iou_threshold=self.cfgs.FAST_RCNN_H_NMS_IOU_THRESHOLD)
perclass_boxes = tf.gather(tmp_decoded_boxes, keep)
perclass_scores = tf.gather(tmp_score, keep)
allclasses_boxes.append(perclass_boxes)
allclasses_scores.append(perclass_scores)
categories.append(tf.ones_like(perclass_scores) * i)
final_boxes = tf.concat(allclasses_boxes, axis=0)
final_scores = tf.concat(allclasses_scores, axis=0)
final_category = tf.concat(categories, axis=0)
# if self.is_training:
'''
in training. We should show the detecitons in the tensorboard. So we add this.
'''
if self.is_training:
'''
in training. We should show the detecitons in the tensorboard. So we add this.
'''
kept_indices = tf.reshape(tf.where(tf.greater_equal(final_scores, self.cfgs.VIS_SCORE)), [-1])
else:
kept_indices = tf.reshape(tf.where(tf.greater_equal(final_scores, self.cfgs.FILTERED_SCORE)), [-1])
final_boxes = tf.gather(final_boxes, kept_indices)
final_scores = tf.gather(final_scores, kept_indices)
final_category = tf.gather(final_category, kept_indices)
return final_boxes, final_scores, final_category
| 53.572519 | 121 | 0.544125 |
c29a2b2b3f3cce6d71365a0fa58861d1769fd702 | 433 | py | Python | setup.py | majkrzak/kot | 1ef7ee448d460bb46613c8400743b7c4185a2ed2 | [
"MIT"
] | 1 | 2019-10-06T12:00:41.000Z | 2019-10-06T12:00:41.000Z | setup.py | majkrzak/kot | 1ef7ee448d460bb46613c8400743b7c4185a2ed2 | [
"MIT"
] | 14 | 2019-10-06T12:31:11.000Z | 2019-10-16T08:05:33.000Z | setup.py | majkrzak/kot | 1ef7ee448d460bb46613c8400743b7c4185a2ed2 | [
"MIT"
] | 4 | 2019-10-06T12:41:18.000Z | 2019-10-08T01:57:21.000Z | import setuptools
setuptools.setup(
name='kot',
author='Piotr Majrzak',
author_email='piotr@majkrzak.dev',
license='MIT',
data_files = [("", ["LICENSE"])],
classifiers=[
'License :: OSI Approved :: MIT License',
],
packages=[
'kot',
],
package_dir={'kot': './src'},
install_requires=[
'requests',
'lxml'
],
setup_requires=[
'wheel'
]
)
| 18.041667 | 49 | 0.51963 |
b4c2cbdf4813415960364a715dba4fb618d7ce93 | 7,138 | py | Python | aim/web/api/runs/utils.py | gorarakelyan/aim | eed0ac76f4bdcc81277cef4a4de1dfc3dd690644 | [
"Apache-2.0"
] | null | null | null | aim/web/api/runs/utils.py | gorarakelyan/aim | eed0ac76f4bdcc81277cef4a4de1dfc3dd690644 | [
"Apache-2.0"
] | null | null | null | aim/web/api/runs/utils.py | gorarakelyan/aim | eed0ac76f4bdcc81277cef4a4de1dfc3dd690644 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import struct
from typing import Iterator, Tuple, Optional, List
from aim.storage.context import Context
from aim.sdk.run import Run
from aim.sdk.metric import Metric
from aim.sdk.metric import MetricCollection
from aim.web.api.runs.pydantic_models import AlignedRunIn, TraceBase
from aim.storage.treeutils import encode_tree
def get_run_props(run: Run):
return {
'name': run.name if run.name else None,
'experiment': run.experiment.name if run.experiment else None,
'tags': [{'id': tag.uuid,
'name': tag.name,
'color': tag.color,
'description': tag.description}
for tag in run.props.tags],
'archived': run.archived if run.archived else False,
'creation_time': run.creation_time,
'end_time': run.end_time
}
def numpy_to_encodable(array: np.ndarray) -> Optional[dict]:
encoded_numpy = {
'type': 'numpy',
'shape': array.shape[0],
'dtype': 'float64', # hardcoded for now
}
if array.dtype == 'float64':
encoded_numpy['blob'] = array.tobytes()
elif array.dtype == 'object':
return None
else:
encoded_numpy['blob'] = array.astype('float64').tobytes()
return encoded_numpy
def sliced_np_array(array: np.ndarray, _slice: slice) -> np.ndarray:
last_step_needed = (_slice.stop - 1) % _slice.step != 0
if last_step_needed:
return np.append(array[_slice], array[-1])
else:
return array[_slice]
def sliced_array(array: list, _slice: slice) -> list:
last_step_needed = (_slice.stop - 1) % _slice.step != 0
if last_step_needed:
last_value = array[-1]
return array[_slice] + [last_value]
else:
return array[_slice]
def collect_x_axis_data(x_trace: Metric, iters: np.ndarray) -> Tuple[Optional[dict], Optional[dict]]:
if not x_trace:
return None, None
x_axis_values = []
x_axis_iters = []
for idx in iters:
x_val = x_trace.values[idx.item()]
if x_val:
x_axis_iters.append(idx.item())
x_axis_values.append(x_val)
if not x_axis_iters:
return None, None
return numpy_to_encodable(np.array(x_axis_iters, dtype='float64')),\
numpy_to_encodable(np.array(x_axis_values, dtype='float64'))
def collect_run_streamable_data(encoded_tree: Iterator[Tuple[bytes, bytes]]) -> bytes:
result = bytes()
for key, val in encoded_tree:
result += struct.pack('I', len(key)) + key + struct.pack('I', len(val)) + val
return result
def custom_aligned_metrics_streamer(requested_runs: List[AlignedRunIn], x_axis: str) -> bytes:
for run_data in requested_runs:
run_hashname = run_data.run_id
requested_traces = run_data.traces
run = Run(hashname=run_hashname, read_only=True)
traces_list = []
for trace_data in requested_traces:
context = Context(trace_data.context)
trace = run.get_metric(metric_name=trace_data.metric_name,
context=context)
x_axis_trace = run.get_metric(metric_name=x_axis,
context=context)
if not (trace and x_axis_trace):
continue
_slice = slice(*trace_data.slice)
iters = trace.values.sparse_numpy()[0]
sliced_iters = sliced_np_array(iters, _slice)
x_axis_iters, x_axis_values = collect_x_axis_data(x_axis_trace, sliced_iters)
traces_list.append({
'metric_name': trace.name,
'context': trace.context.to_dict(),
'x_axis_values': x_axis_values,
'x_axis_iters': x_axis_iters,
})
run_dict = {
run_hashname: traces_list
}
encoded_tree = encode_tree(run_dict)
yield collect_run_streamable_data(encoded_tree)
def metric_search_result_streamer(traces: MetricCollection, steps_num: int, x_axis: Optional[str]) -> bytes:
for run_trace_collection in traces.iter_runs():
run = None
traces_list = []
for trace in run_trace_collection.iter():
if not run:
run = run_trace_collection.run
iters, values = trace.values.sparse_numpy()
num_records = len(values)
step = (num_records // steps_num) or 1
_slice = slice(0, num_records, step)
sliced_iters = sliced_np_array(iters, _slice)
x_axis_trace = run.get_metric(x_axis, trace.context) if x_axis else None
x_axis_iters, x_axis_values = collect_x_axis_data(x_axis_trace, sliced_iters)
traces_list.append({
'metric_name': trace.name,
'context': trace.context.to_dict(),
'slice': [0, num_records, step],
'values': numpy_to_encodable(sliced_np_array(values, _slice)),
'iters': numpy_to_encodable(sliced_iters),
'epochs': numpy_to_encodable(sliced_np_array(trace.epochs.values_numpy(), _slice)),
'timestamps': numpy_to_encodable(sliced_np_array(trace.timestamps.values_numpy(), _slice)),
'x_axis_values': x_axis_values,
'x_axis_iters': x_axis_iters,
})
if run:
run_dict = {
run.hashname: {
'params': run.get(...),
'traces': traces_list,
'props': get_run_props(run)
}
}
encoded_tree = encode_tree(run_dict)
yield collect_run_streamable_data(encoded_tree)
def run_search_result_streamer(runs: MetricCollection, limit: int) -> bytes:
run_count = 0
for run_trace_collection in runs.iter_runs():
run = run_trace_collection.run
run_dict = {
run.hashname: {
'params': run.get(...),
'traces': run.collect_metrics_info(),
'props': get_run_props(run)
}
}
encoded_tree = encode_tree(run_dict)
yield collect_run_streamable_data(encoded_tree)
run_count += 1
if limit and run_count >= limit:
break
def collect_requested_traces(run: Run, requested_traces: List[TraceBase], steps_num: int = 200) -> List[dict]:
processed_traces_list = []
for requested_trace in requested_traces:
metric_name = requested_trace.metric_name
context = Context(requested_trace.context)
trace = run.get_metric(metric_name=metric_name, context=context)
if not trace:
continue
iters, values = trace.values.sparse_list()
num_records = len(values)
step = (num_records // steps_num) or 1
_slice = slice(0, num_records, step)
processed_traces_list.append({
'metric_name': trace.name,
'context': trace.context.to_dict(),
'values': sliced_array(values, _slice),
'iters': sliced_array(iters, _slice),
})
return processed_traces_list
| 34.990196 | 110 | 0.609414 |
7c7d26701cee124b0f80e46d6a66642c9a44b347 | 126 | py | Python | discoursesimplification/utils/ID_generator.py | kkatsamaktsis/PyDiscourseSimplification | 18d247894355b4b51f5abcced86e7a7292b17ac0 | [
"MIT"
] | null | null | null | discoursesimplification/utils/ID_generator.py | kkatsamaktsis/PyDiscourseSimplification | 18d247894355b4b51f5abcced86e7a7292b17ac0 | [
"MIT"
] | null | null | null | discoursesimplification/utils/ID_generator.py | kkatsamaktsis/PyDiscourseSimplification | 18d247894355b4b51f5abcced86e7a7292b17ac0 | [
"MIT"
] | null | null | null | import uuid
class IDGenerator:
@staticmethod
def generate_uuid():
return str(uuid.uuid4()).replace("-", "")
| 15.75 | 49 | 0.626984 |
795a60daa3fea4af0f505d9ce73f9436f32951c8 | 2,618 | py | Python | tests/rules/test_git_push_pull.py | WorkInProgress-Development/theplease | 9b9a2dcee3efa0e1b4f197fc55904c9327dc13ba | [
"MIT"
] | null | null | null | tests/rules/test_git_push_pull.py | WorkInProgress-Development/theplease | 9b9a2dcee3efa0e1b4f197fc55904c9327dc13ba | [
"MIT"
] | null | null | null | tests/rules/test_git_push_pull.py | WorkInProgress-Development/theplease | 9b9a2dcee3efa0e1b4f197fc55904c9327dc13ba | [
"MIT"
] | null | null | null | import pytest
from theplease.rules.git_push_pull import match, get_new_command
from theplease.types import Command
git_err = '''
To /tmp/foo
! [rejected] master -> master (non-fast-forward)
error: failed to push some refs to '/tmp/bar'
hint: Updates were rejected because the tip of your current branch is behind
hint: its remote counterpart. Integrate the remote changes (e.g.
hint: 'git pull ...') before pushing again.
hint: See the 'Note about fast-forwards' in 'git push --help' for details.
'''
git_err2 = '''
To /tmp/foo
! [rejected] master -> master (non-fast-forward)
error: failed to push some refs to '/tmp/bar'
hint: Updates were rejected because the remote contains work that you do
hint: not have locally. This is usually caused by another repository pushing
hint: to the same ref. You may want to first integrate the remote changes
hint: (e.g., 'git pull ...') before pushing again.
hint: See the 'Note about fast-forwards' in 'git push --help' for details.
'''
git_uptodate = 'Everything up-to-date'
git_ok = '''
Counting objects: 3, done.
Delta compression using up to 4 threads.
Compressing objects: 100% (2/2), done.
Writing objects: 100% (3/3), 282 bytes | 0 bytes/s, done.
Total 3 (delta 0), reused 0 (delta 0)
To /tmp/bar
514eed3..f269c79 master -> master
'''
@pytest.mark.parametrize('command', [
Command('git push', git_err),
Command('git push nvbn', git_err),
Command('git push nvbn master', git_err),
Command('git push', git_err2),
Command('git push nvbn', git_err2),
Command('git push nvbn master', git_err2)])
def test_match(command):
assert match(command)
@pytest.mark.parametrize('command', [
Command('git push', git_ok),
Command('git push', git_uptodate),
Command('git push nvbn', git_ok),
Command('git push nvbn master', git_uptodate),
Command('git push nvbn', git_ok),
Command('git push nvbn master', git_uptodate)])
def test_not_match(command):
assert not match(command)
@pytest.mark.parametrize('command, output', [
(Command('git push', git_err), 'git pull && git push'),
(Command('git push nvbn', git_err),
'git pull nvbn && git push nvbn'),
(Command('git push nvbn master', git_err),
'git pull nvbn master && git push nvbn master'),
(Command('git push', git_err2), 'git pull && git push'),
(Command('git push nvbn', git_err2),
'git pull nvbn && git push nvbn'),
(Command('git push nvbn master', git_err2),
'git pull nvbn master && git push nvbn master')])
def test_get_new_command(command, output):
assert get_new_command(command) == output
| 35.378378 | 77 | 0.689076 |
0830e8d5aeea10e0c2437007c6d9fbaf7b16ac1c | 986 | py | Python | tests/test_deprecations.py | MattToast/SmartSim | 4bd5e231445abd9b888561930db859062708678a | [
"BSD-2-Clause"
] | null | null | null | tests/test_deprecations.py | MattToast/SmartSim | 4bd5e231445abd9b888561930db859062708678a | [
"BSD-2-Clause"
] | null | null | null | tests/test_deprecations.py | MattToast/SmartSim | 4bd5e231445abd9b888561930db859062708678a | [
"BSD-2-Clause"
] | null | null | null | import pytest
from smartsim.database import (
CobaltOrchestrator,
LSFOrchestrator,
PBSOrchestrator,
SlurmOrchestrator,
)
tf_available = True
try:
import tensorflow
except ImportError:
tf_available = False
def test_deprecated_orchestrators(wlmutils):
with pytest.deprecated_call():
_ = SlurmOrchestrator(interface=wlmutils.get_test_interface())
with pytest.deprecated_call():
_ = LSFOrchestrator(interface=wlmutils.get_test_interface())
with pytest.deprecated_call():
_ = CobaltOrchestrator(interface=wlmutils.get_test_interface())
with pytest.deprecated_call():
_ = PBSOrchestrator(interface=wlmutils.get_test_interface())
@pytest.mark.skipif(not tf_available, reason="Requires TF to run")
def test_deprecated_tf():
with pytest.deprecated_call():
from smartsim.tf import freeze_model
def test_deprecated_constants():
with pytest.deprecated_call():
from smartsim import constants
| 24.65 | 71 | 0.741379 |
32ee58937d90dd04aec0da61c72555dc6b062f04 | 1,501 | py | Python | sources_oop.py | Joefdez/gravitaionalLensing1 | 213e1e62b4f2d3881f3e2df64ea8d09998eb7189 | [
"MIT"
] | null | null | null | sources_oop.py | Joefdez/gravitaionalLensing1 | 213e1e62b4f2d3881f3e2df64ea8d09998eb7189 | [
"MIT"
] | null | null | null | sources_oop.py | Joefdez/gravitaionalLensing1 | 213e1e62b4f2d3881f3e2df64ea8d09998eb7189 | [
"MIT"
] | null | null | null | from source_generators import * # includes numpy import, np
import matplotlib.pylab as plt
class modelSource ():
'Source class to represent objects to be lensed'
def __init__(self, name, stype, side, radius=0.0, aspectRatio = 1.0, maxLum = 1.0):
""" Constructor method """
self.name = name
self.type = stype
self.aspectRatio = aspectRatio
self.maxLum = maxLum
if aspectRatio == 1.0:
self.xsize, self.ysize = side, side
else:
self.xsize, self.ysize = side, side*aspectRatio
self.radius = radius
if stype == "square":
self.view = square_source( radius, self.xsize, self.ysize, maxLum )
elif stype == "circular":
self.view = circular_source( radius, self.xsize, self.ysize)
elif stype == "discs":
self.view = discs_source( radius, self.xsize, self.ysize)
self.lensedView = None
print "Source array " + self.name + " generated."
def plotSource(self):
""" Plot the source """
plt.figure('lens') #Declare figure
ax1=plt.axes() #Declare axis
ax1.xaxis.set_ticklabels([]) #Remove ticks
ax1.yaxis.set_ticklabels([])
#plt.figtext(-2.5, -2.5, pn)
#plt.title(pn,loc='center')
plt.imshow(self.view)
class imageSource():
Class for handling actual images as sources
def __init__(self, file ):
#Remember to open and close properly
| 30.632653 | 87 | 0.593604 |
6ec6852d793e6bb34994bd1cceab2494ed84c024 | 1,737 | py | Python | src/condor_tests/test_htcondor_dags/writer/test_subdag_edges.py | shamoya/htcondor | c3bbc0eb8f72b863eda2d6d0a2e92594f7346b02 | [
"Apache-2.0"
] | null | null | null | src/condor_tests/test_htcondor_dags/writer/test_subdag_edges.py | shamoya/htcondor | c3bbc0eb8f72b863eda2d6d0a2e92594f7346b02 | [
"Apache-2.0"
] | null | null | null | src/condor_tests/test_htcondor_dags/writer/test_subdag_edges.py | shamoya/htcondor | c3bbc0eb8f72b863eda2d6d0a2e92594f7346b02 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 HTCondor Team, Computer Sciences Department,
# University of Wisconsin-Madison, WI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from .conftest import s, dagfile_lines
def test_one_parent_one_child(dag, writer):
parent = dag.subdag(name="parent", dag_file="parent.dag")
child = parent.child_subdag(name="child", dag_file="child.dag")
assert "PARENT parent CHILD child" in dagfile_lines(writer)
def test_two_parents_one_child(dag, writer):
parent1 = dag.subdag(name="parent1", dag_file="parent.dag")
parent2 = dag.subdag(name="parent2", dag_file="parent.dag")
child = parent1.child_subdag(name="child", dag_file="child.dag")
child.add_parents(parent2)
lines = dagfile_lines(writer)
assert "PARENT parent1 CHILD child" in lines
assert "PARENT parent2 CHILD child" in lines
def test_one_parent_two_children(dag, writer):
parent1 = dag.subdag(name="parent", dag_file="parent.dag")
child1 = parent1.child_subdag(name="child1", dag_file="child.dag")
child2 = parent1.child_subdag(name="child2", dag_file="child.dag")
lines = dagfile_lines(writer)
assert "PARENT parent CHILD child1" in lines
assert "PARENT parent CHILD child2" in lines
| 36.957447 | 74 | 0.743811 |
41ded2cad2689c283588b8cc60a59360703cfd05 | 4,527 | py | Python | goldcoin/pools/pool_wallet_info.py | DevMau5x/goldcoin-blockchain-2 | ed223dd16fa290ea710db7202d6c52a056242cfa | [
"Apache-2.0"
] | 17 | 2021-09-08T17:07:54.000Z | 2022-03-30T04:11:58.000Z | goldcoin/pools/pool_wallet_info.py | DevMau5x/goldcoin-blockchain-2 | ed223dd16fa290ea710db7202d6c52a056242cfa | [
"Apache-2.0"
] | 15 | 2021-09-28T21:09:49.000Z | 2022-03-22T21:13:23.000Z | goldcoin/pools/pool_wallet_info.py | Pierre21dd/gold2 | 4a35f207ed4c8a7745bfbc73fd3c190bd8b60a3f | [
"Apache-2.0"
] | 9 | 2021-09-12T10:03:23.000Z | 2022-03-15T08:35:11.000Z | from dataclasses import dataclass
from enum import IntEnum
from typing import Optional, Dict
from blspy import G1Element
from goldcoin.protocols.pool_protocol import POOL_PROTOCOL_VERSION
from goldcoin.types.blockchain_format.coin import Coin
from goldcoin.types.blockchain_format.program import Program
from goldcoin.types.blockchain_format.sized_bytes import bytes32
from goldcoin.util.byte_types import hexstr_to_bytes
from goldcoin.util.ints import uint32, uint8
from goldcoin.util.streamable import streamable, Streamable
class PoolSingletonState(IntEnum):
"""
From the user's point of view, a pool group can be in these states:
`SELF_POOLING`: The singleton exists on the blockchain, and we are farming
block rewards to a wallet address controlled by the user
`LEAVING_POOL`: The singleton exists, and we have entered the "escaping" state, which
means we are waiting for a number of blocks = `relative_lock_height` to pass, so we can leave.
`FARMING_TO_POOL`: The singleton exists, and it is assigned to a pool.
`CLAIMING_SELF_POOLED_REWARDS`: We have submitted a transaction to sweep our
self-pooled funds.
"""
SELF_POOLING = 1
LEAVING_POOL = 2
FARMING_TO_POOL = 3
SELF_POOLING = PoolSingletonState.SELF_POOLING
LEAVING_POOL = PoolSingletonState.LEAVING_POOL
FARMING_TO_POOL = PoolSingletonState.FARMING_TO_POOL
@dataclass(frozen=True)
@streamable
class PoolState(Streamable):
"""
`PoolState` is a type that is serialized to the blockchain to track the state of the user's pool singleton
`target_puzzle_hash` is either the pool address, or the self-pooling address that pool rewards will be paid to.
`target_puzzle_hash` is NOT the p2_singleton puzzle that block rewards are sent to.
The `p2_singleton` address is the initial address, and the `target_puzzle_hash` is the final destination.
`relative_lock_height` is zero when in SELF_POOLING state
"""
version: uint8
state: uint8 # PoolSingletonState
# `target_puzzle_hash`: A puzzle_hash we pay to
# When self-farming, this is a main wallet address
# When farming-to-pool, the pool sends this to the farmer during pool protocol setup
target_puzzle_hash: bytes32 # TODO: rename target_puzzle_hash -> pay_to_address
# owner_pubkey is set by the wallet, once
owner_pubkey: G1Element
pool_url: Optional[str]
relative_lock_height: uint32
def initial_pool_state_from_dict(state_dict: Dict, owner_pubkey: G1Element, owner_puzzle_hash: bytes32) -> PoolState:
state_str = state_dict["state"]
singleton_state: PoolSingletonState = PoolSingletonState[state_str]
if singleton_state == SELF_POOLING:
target_puzzle_hash = owner_puzzle_hash
pool_url: str = ""
relative_lock_height = uint32(0)
elif singleton_state == FARMING_TO_POOL:
target_puzzle_hash = bytes32(hexstr_to_bytes(state_dict["target_puzzle_hash"]))
pool_url = state_dict["pool_url"]
relative_lock_height = uint32(state_dict["relative_lock_height"])
else:
raise ValueError("Initial state must be SELF_POOLING or FARMING_TO_POOL")
# TODO: change create_pool_state to return error messages, as well
assert relative_lock_height is not None
return create_pool_state(singleton_state, target_puzzle_hash, owner_pubkey, pool_url, relative_lock_height)
def create_pool_state(
state: PoolSingletonState,
target_puzzle_hash: bytes32,
owner_pubkey: G1Element,
pool_url: Optional[str],
relative_lock_height: uint32,
) -> PoolState:
if state not in set(s.value for s in PoolSingletonState):
raise AssertionError("state {state} is not a valid PoolSingletonState,")
ps = PoolState(
POOL_PROTOCOL_VERSION, uint8(state), target_puzzle_hash, owner_pubkey, pool_url, relative_lock_height
)
# TODO Move verify here
return ps
@dataclass(frozen=True)
@streamable
class PoolWalletInfo(Streamable):
"""
Internal Pool Wallet state, not destined for the blockchain. This can be completely derived with
the Singleton's CoinSpends list, or with the information from the WalletPoolStore.
"""
current: PoolState
target: Optional[PoolState]
launcher_coin: Coin
launcher_id: bytes32
p2_singleton_puzzle_hash: bytes32
current_inner: Program # Inner puzzle in current singleton, not revealed yet
tip_singleton_coin_id: bytes32
singleton_block_height: uint32 # Block height that current PoolState is from
| 39.025862 | 117 | 0.759222 |
cb34afdbeb2d437767063fef7996f3093f16732c | 8,179 | py | Python | sdk/python/pulumi_azure_native/securityandcompliance/latest/private_endpoint_connections_adt_api.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/securityandcompliance/latest/private_endpoint_connections_adt_api.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/securityandcompliance/latest/private_endpoint_connections_adt_api.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['PrivateEndpointConnectionsAdtAPI']
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-native:securityandcompliance:PrivateEndpointConnectionsAdtAPI'.""", DeprecationWarning)
class PrivateEndpointConnectionsAdtAPI(pulumi.CustomResource):
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-native:securityandcompliance:PrivateEndpointConnectionsAdtAPI'.""", DeprecationWarning)
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStateArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
The Private Endpoint Connection resource.
Latest API Version: 2021-01-11.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection associated with the Azure resource
:param pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStateArgs']] private_link_service_connection_state: A collection of information about the state of the connection between service consumer and provider.
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the service instance.
:param pulumi.Input[str] resource_name_: The name of the service instance.
"""
pulumi.log.warn("""PrivateEndpointConnectionsAdtAPI is deprecated: The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-native:securityandcompliance:PrivateEndpointConnectionsAdtAPI'.""")
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['private_endpoint_connection_name'] = private_endpoint_connection_name
if private_link_service_connection_state is None and not opts.urn:
raise TypeError("Missing required property 'private_link_service_connection_state'")
__props__['private_link_service_connection_state'] = private_link_service_connection_state
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if resource_name_ is None and not opts.urn:
raise TypeError("Missing required property 'resource_name_'")
__props__['resource_name'] = resource_name_
__props__['name'] = None
__props__['private_endpoint'] = None
__props__['provisioning_state'] = None
__props__['system_data'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:securityandcompliance/latest:PrivateEndpointConnectionsAdtAPI"), pulumi.Alias(type_="azure-native:securityandcompliance:PrivateEndpointConnectionsAdtAPI"), pulumi.Alias(type_="azure-nextgen:securityandcompliance:PrivateEndpointConnectionsAdtAPI"), pulumi.Alias(type_="azure-native:securityandcompliance/v20210111:PrivateEndpointConnectionsAdtAPI"), pulumi.Alias(type_="azure-nextgen:securityandcompliance/v20210111:PrivateEndpointConnectionsAdtAPI")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PrivateEndpointConnectionsAdtAPI, __self__).__init__(
'azure-native:securityandcompliance/latest:PrivateEndpointConnectionsAdtAPI',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PrivateEndpointConnectionsAdtAPI':
"""
Get an existing PrivateEndpointConnectionsAdtAPI resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["name"] = None
__props__["private_endpoint"] = None
__props__["private_link_service_connection_state"] = None
__props__["provisioning_state"] = None
__props__["system_data"] = None
__props__["type"] = None
return PrivateEndpointConnectionsAdtAPI(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> pulumi.Output[Optional['outputs.PrivateEndpointResponse']]:
"""
The resource of private end point.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> pulumi.Output['outputs.PrivateLinkServiceConnectionStateResponse']:
"""
A collection of information about the state of the connection between service consumer and provider.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the private endpoint connection resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Required property for system data
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 50.487654 | 538 | 0.697885 |
b2ed2c41e875080089f3e3aa514f5454512e494a | 2,653 | py | Python | setup.py | Neradoc/discotool | b242be28f490eabcbd115b6ca90f4c14e3b9184b | [
"MIT"
] | 16 | 2021-07-15T20:09:51.000Z | 2022-03-20T04:32:38.000Z | setup.py | Neradoc/discotool | b242be28f490eabcbd115b6ca90f4c14e3b9184b | [
"MIT"
] | 5 | 2021-04-03T06:34:48.000Z | 2022-02-16T18:01:28.000Z | setup.py | Neradoc/discotool | b242be28f490eabcbd115b6ca90f4c14e3b9184b | [
"MIT"
] | 1 | 2022-01-25T07:15:18.000Z | 2022-01-25T07:15:18.000Z | import os
import re
import setuptools
import subprocess
import sys
here = os.path.abspath(os.path.dirname(__file__))
repository_name = "Neradoc/discotool"
current_tag = subprocess.run("git describe --tags --abbrev=0",
capture_output = True,
encoding = "utf-8",
shell = True,
).stdout.strip()
with open(os.path.join(here,"README.md"), "r", encoding="utf-8") as fh:
long_description = fh.read()
# long_description = long_description.split("## Screenshots")[0].strip()
long_description = re.sub(r'\(docs/(.*.png)\)',
r'(https://raw.githubusercontent.com/' + repository_name
+ '/' + current_tag + r'/docs/\1)',
long_description)
long_description = re.sub(r'\(docs/(.*.md)\)',
r'(https://github.com/' + repository_name
+ '/blob/' + current_tag+r'/docs/\1)',
long_description)
# with open(os.path.join(here,"requirements.txt"), "r", encoding="utf-8") as fp:
# required_modules = fp.read().split("\n")
#
# platform_req = os.path.join(here,f"requirements-{sys.platform}.txt")
# if os.path.exists(platform_req):
# with open(platform_req, "r", encoding="utf-8") as fp:
# required_modules += fp.read().split("\n")
# required_modules = [mod for mod in required_modules if mod]
required_modules = [
"click >= 7.1.2",
"click-aliases == 1.0.1",
"psutil >= 5.8.0",
"pyserial >= 3.4",
"wmi;platform_system=='Windows'",
"pywin32;platform_system=='Windows'",
"pyudev;platform_system=='Linux'",
]
setuptools.setup(
name="discotool",
author="Neradoc",
author_email="neraOnGit@ri1.fr",
description="Discover, list, and use USB microcontoller boards.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Neradoc/discotool",
license="MIT",
project_urls={
"Bug Tracker": "https://github.com/Neradoc/discotool/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
],
packages=setuptools.find_packages(where="."),
python_requires=">=3.6",
use_scm_version={
'write_to': 'discotool/_version.py'
},
setup_requires=["setuptools_scm"],
install_requires=required_modules,
entry_points={"console_scripts": ["discotool=discotool.discotool:main"]},
keywords="circuitpython, micropython",
)
| 33.1625 | 80 | 0.643423 |
110d4d3ef5a6d86a9c343a64fedadc4f1c685d0c | 1,128 | py | Python | kruiser_palace/users/tests/test_forms.py | nickblitz/kruisers_palace | bee4a14d3cdbc9501ec02d371199d648776065ee | [
"MIT"
] | null | null | null | kruiser_palace/users/tests/test_forms.py | nickblitz/kruisers_palace | bee4a14d3cdbc9501ec02d371199d648776065ee | [
"MIT"
] | null | null | null | kruiser_palace/users/tests/test_forms.py | nickblitz/kruisers_palace | bee4a14d3cdbc9501ec02d371199d648776065ee | [
"MIT"
] | null | null | null | import pytest
from kruiser_palace.users.forms import UserCreationForm
from kruiser_palace.users.tests.factories import UserFactory
pytestmark = pytest.mark.django_db
class TestUserCreationForm:
def test_clean_username(self):
# A user with proto_user params does not exist yet.
proto_user = UserFactory.build()
form = UserCreationForm(
{
"username": proto_user.username,
"password1": proto_user._password,
"password2": proto_user._password,
}
)
assert form.is_valid()
assert form.clean_username() == proto_user.username
# Creating a user.
form.save()
# The user with proto_user params already exists,
# hence cannot be created.
form = UserCreationForm(
{
"username": proto_user.username,
"password1": proto_user._password,
"password2": proto_user._password,
}
)
assert not form.is_valid()
assert len(form.errors) == 1
assert "username" in form.errors
| 26.857143 | 60 | 0.597518 |
4af08ca9147157b907bffd27adc2bdb19c5cdfe1 | 3,827 | py | Python | src/prefect/engine/__init__.py | skyline-ai/prefect | 92430f2f91215d6c27d92ad67df67ccd639e587c | [
"Apache-2.0"
] | null | null | null | src/prefect/engine/__init__.py | skyline-ai/prefect | 92430f2f91215d6c27d92ad67df67ccd639e587c | [
"Apache-2.0"
] | null | null | null | src/prefect/engine/__init__.py | skyline-ai/prefect | 92430f2f91215d6c27d92ad67df67ccd639e587c | [
"Apache-2.0"
] | null | null | null | from warnings import warn
from prefect import config
import prefect.engine.executors
import prefect.engine.state
import prefect.engine.signals
import prefect.engine.result
import prefect.engine.result_handlers
from prefect.engine.flow_runner import FlowRunner
from prefect.engine.task_runner import TaskRunner
import prefect.engine.cloud
def get_default_executor_class() -> type:
"""
Returns the `Executor` class specified in
`prefect.config.engine.executor.default_class`. If the value is a string, it will
attempt to load the already-imported object. Otherwise, the value is returned.
Defaults to `SynchronousExecutor` if the string config value can not be loaded
"""
config_value = config.get_nested("engine.executor.default_class")
if isinstance(config_value, str):
try:
return prefect.utilities.serialization.from_qualified_name(config_value)
except ValueError:
warn(
"Could not import {}; using "
"prefect.engine.executors.SynchronousExecutor instead.".format(
config_value
)
)
return prefect.engine.executors.SynchronousExecutor
else:
return config_value
def get_default_flow_runner_class() -> type:
"""
Returns the `FlowRunner` class specified in
`prefect.config.engine.flow_runner.default_class` If the value is a string, it will
attempt to load the already-imported object. Otherwise, the value is returned.
Defaults to `FlowRunner` if the string config value can not be loaded
"""
config_value = config.get_nested("engine.flow_runner.default_class")
if isinstance(config_value, str):
try:
return prefect.utilities.serialization.from_qualified_name(config_value)
except ValueError:
warn(
"Could not import {}; using "
"prefect.engine.flow_runner.FlowRunner instead.".format(config_value)
)
return prefect.engine.flow_runner.FlowRunner
else:
return config_value
def get_default_task_runner_class() -> type:
"""
Returns the `TaskRunner` class specified in `prefect.config.engine.task_runner.default_class` If the
value is a string, it will attempt to load the already-imported object. Otherwise, the
value is returned.
Defaults to `TaskRunner` if the string config value can not be loaded
"""
config_value = config.get_nested("engine.task_runner.default_class")
if isinstance(config_value, str):
try:
return prefect.utilities.serialization.from_qualified_name(config_value)
except ValueError:
warn(
"Could not import {}; using "
"prefect.engine.task_runner.TaskRunner instead.".format(config_value)
)
return prefect.engine.task_runner.TaskRunner
else:
return config_value
def get_default_result_handler_class() -> type:
"""
Returns the `ResultHandler` class specified in `prefect.config.engine.result_handler.default_class` If the
value is a string, it will attempt to load the already-imported object. Otherwise, the
value is returned.
Defaults to `None` if the string config value can not be loaded
"""
config_value = config.get_nested("engine.result_handler.default_class")
if isinstance(config_value, str):
if not config_value:
return lambda *args, **kwargs: None # type: ignore
try:
return prefect.utilities.serialization.from_qualified_name(config_value)
except ValueError:
warn("Could not import {}; using " "None instead.".format(config_value))
return lambda *args, **kwargs: None # type: ignore
else:
return config_value
| 37.15534 | 110 | 0.686438 |
dd092c5dc8b1449a7fbac1f6ed5820fefb4bab83 | 851 | py | Python | email_split/test/test.py | underdogio/python-email-split | a5fa8a657ee90db68740cfe5d028b8c92afae00d | [
"MIT"
] | 5 | 2016-02-04T01:37:51.000Z | 2019-01-28T12:11:47.000Z | email_split/test/test.py | underdogio/python-email-split | a5fa8a657ee90db68740cfe5d028b8c92afae00d | [
"MIT"
] | 2 | 2016-02-04T13:00:29.000Z | 2016-12-30T20:45:15.000Z | email_split/test/test.py | underdogio/python-email-split | a5fa8a657ee90db68740cfe5d028b8c92afae00d | [
"MIT"
] | 3 | 2019-12-12T16:30:27.000Z | 2022-01-19T08:36:19.000Z | # Load in our dependencies
from unittest import TestCase
from email_split import email_split
# Define our tests
class TestEmailSplitFunction(TestCase):
def test_top_level_domain(self):
"""
email-split splitting an email with a top-level domain
returns the local part
returns the domain part
"""
email = email_split('todd@underdog.io')
self.assertEqual(email.local, 'todd')
self.assertEqual(email.domain, 'underdog.io')
def test_subdomain(self):
"""
email-split splitting an email on a subdomain
returns the local part
returns the domain part (including subdomain)
"""
email = email_split('you@are.super.cool')
self.assertEqual(email.local, 'you')
self.assertEqual(email.domain, 'are.super.cool')
| 31.518519 | 62 | 0.643948 |
16fb78c0ef273b23bd6665eb29fe0e213baa47af | 16,713 | py | Python | ppdet/modeling/backbones/ghostnet.py | cristicmf/PaddleDetection | 818533bb299d49f114d36b60b1bff199d0231055 | [
"Apache-2.0"
] | 2 | 2021-07-06T09:09:12.000Z | 2021-07-08T08:06:40.000Z | ppdet/modeling/backbones/ghostnet.py | cristicmf/PaddleDetection | 818533bb299d49f114d36b60b1bff199d0231055 | [
"Apache-2.0"
] | null | null | null | ppdet/modeling/backbones/ghostnet.py | cristicmf/PaddleDetection | 818533bb299d49f114d36b60b1bff199d0231055 | [
"Apache-2.0"
] | 3 | 2021-09-30T02:50:21.000Z | 2021-11-16T12:38:15.000Z | # copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import paddle
from paddle import ParamAttr
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.nn import Conv2D, BatchNorm, AdaptiveAvgPool2D, Linear
from paddle.regularizer import L2Decay
from paddle.nn.initializer import Uniform, KaimingNormal
from ppdet.core.workspace import register, serializable
from numbers import Integral
from ..shape_spec import ShapeSpec
from .mobilenet_v3 import make_divisible, ConvBNLayer
__all__ = ['GhostNet']
class ExtraBlockDW(nn.Layer):
def __init__(self,
in_c,
ch_1,
ch_2,
stride,
lr_mult,
conv_decay=0.,
norm_type='bn',
norm_decay=0.,
freeze_norm=False,
name=None):
super(ExtraBlockDW, self).__init__()
self.pointwise_conv = ConvBNLayer(
in_c=in_c,
out_c=ch_1,
filter_size=1,
stride=1,
padding=0,
act='relu6',
lr_mult=lr_mult,
conv_decay=conv_decay,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
name=name + "_extra1")
self.depthwise_conv = ConvBNLayer(
in_c=ch_1,
out_c=ch_2,
filter_size=3,
stride=stride,
padding=1, #
num_groups=int(ch_1),
act='relu6',
lr_mult=lr_mult,
conv_decay=conv_decay,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
name=name + "_extra2_dw")
self.normal_conv = ConvBNLayer(
in_c=ch_2,
out_c=ch_2,
filter_size=1,
stride=1,
padding=0,
act='relu6',
lr_mult=lr_mult,
conv_decay=conv_decay,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
name=name + "_extra2_sep")
def forward(self, inputs):
x = self.pointwise_conv(inputs)
x = self.depthwise_conv(x)
x = self.normal_conv(x)
return x
class SEBlock(nn.Layer):
def __init__(self, num_channels, lr_mult, reduction_ratio=4, name=None):
super(SEBlock, self).__init__()
self.pool2d_gap = AdaptiveAvgPool2D(1)
self._num_channels = num_channels
stdv = 1.0 / math.sqrt(num_channels * 1.0)
med_ch = num_channels // reduction_ratio
self.squeeze = Linear(
num_channels,
med_ch,
weight_attr=ParamAttr(
learning_rate=lr_mult,
initializer=Uniform(-stdv, stdv),
name=name + "_1_weights"),
bias_attr=ParamAttr(
learning_rate=lr_mult, name=name + "_1_offset"))
stdv = 1.0 / math.sqrt(med_ch * 1.0)
self.excitation = Linear(
med_ch,
num_channels,
weight_attr=ParamAttr(
learning_rate=lr_mult,
initializer=Uniform(-stdv, stdv),
name=name + "_2_weights"),
bias_attr=ParamAttr(
learning_rate=lr_mult, name=name + "_2_offset"))
def forward(self, inputs):
pool = self.pool2d_gap(inputs)
pool = paddle.squeeze(pool, axis=[2, 3])
squeeze = self.squeeze(pool)
squeeze = F.relu(squeeze)
excitation = self.excitation(squeeze)
excitation = paddle.clip(x=excitation, min=0, max=1)
excitation = paddle.unsqueeze(excitation, axis=[2, 3])
out = paddle.multiply(inputs, excitation)
return out
class GhostModule(nn.Layer):
def __init__(self,
in_channels,
output_channels,
kernel_size=1,
ratio=2,
dw_size=3,
stride=1,
relu=True,
lr_mult=1.,
conv_decay=0.,
norm_type='bn',
norm_decay=0.,
freeze_norm=False,
name=None):
super(GhostModule, self).__init__()
init_channels = int(math.ceil(output_channels / ratio))
new_channels = int(init_channels * (ratio - 1))
self.primary_conv = ConvBNLayer(
in_c=in_channels,
out_c=init_channels,
filter_size=kernel_size,
stride=stride,
padding=int((kernel_size - 1) // 2),
num_groups=1,
act="relu" if relu else None,
lr_mult=lr_mult,
conv_decay=conv_decay,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
name=name + "_primary_conv")
self.cheap_operation = ConvBNLayer(
in_c=init_channels,
out_c=new_channels,
filter_size=dw_size,
stride=1,
padding=int((dw_size - 1) // 2),
num_groups=init_channels,
act="relu" if relu else None,
lr_mult=lr_mult,
conv_decay=conv_decay,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
name=name + "_cheap_operation")
def forward(self, inputs):
x = self.primary_conv(inputs)
y = self.cheap_operation(x)
out = paddle.concat([x, y], axis=1)
return out
class GhostBottleneck(nn.Layer):
def __init__(self,
in_channels,
hidden_dim,
output_channels,
kernel_size,
stride,
use_se,
lr_mult,
conv_decay=0.,
norm_type='bn',
norm_decay=0.,
freeze_norm=False,
return_list=False,
name=None):
super(GhostBottleneck, self).__init__()
self._stride = stride
self._use_se = use_se
self._num_channels = in_channels
self._output_channels = output_channels
self.return_list = return_list
self.ghost_module_1 = GhostModule(
in_channels=in_channels,
output_channels=hidden_dim,
kernel_size=1,
stride=1,
relu=True,
lr_mult=lr_mult,
conv_decay=conv_decay,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
name=name + "_ghost_module_1")
if stride == 2:
self.depthwise_conv = ConvBNLayer(
in_c=hidden_dim,
out_c=hidden_dim,
filter_size=kernel_size,
stride=stride,
padding=int((kernel_size - 1) // 2),
num_groups=hidden_dim,
act=None,
lr_mult=lr_mult,
conv_decay=conv_decay,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
name=name +
"_depthwise_depthwise" # looks strange due to an old typo, will be fixed later.
)
if use_se:
self.se_block = SEBlock(hidden_dim, lr_mult, name=name + "_se")
self.ghost_module_2 = GhostModule(
in_channels=hidden_dim,
output_channels=output_channels,
kernel_size=1,
relu=False,
lr_mult=lr_mult,
conv_decay=conv_decay,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
name=name + "_ghost_module_2")
if stride != 1 or in_channels != output_channels:
self.shortcut_depthwise = ConvBNLayer(
in_c=in_channels,
out_c=in_channels,
filter_size=kernel_size,
stride=stride,
padding=int((kernel_size - 1) // 2),
num_groups=in_channels,
act=None,
lr_mult=lr_mult,
conv_decay=conv_decay,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
name=name +
"_shortcut_depthwise_depthwise" # looks strange due to an old typo, will be fixed later.
)
self.shortcut_conv = ConvBNLayer(
in_c=in_channels,
out_c=output_channels,
filter_size=1,
stride=1,
padding=0,
num_groups=1,
act=None,
lr_mult=lr_mult,
conv_decay=conv_decay,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
name=name + "_shortcut_conv")
def forward(self, inputs):
y = self.ghost_module_1(inputs)
x = y
if self._stride == 2:
x = self.depthwise_conv(x)
if self._use_se:
x = self.se_block(x)
x = self.ghost_module_2(x)
if self._stride == 1 and self._num_channels == self._output_channels:
shortcut = inputs
else:
shortcut = self.shortcut_depthwise(inputs)
shortcut = self.shortcut_conv(shortcut)
x = paddle.add(x=x, y=shortcut)
if self.return_list:
return [y, x]
else:
return x
@register
@serializable
class GhostNet(nn.Layer):
__shared__ = ['norm_type']
def __init__(
self,
scale=1.3,
feature_maps=[6, 12, 15],
with_extra_blocks=False,
extra_block_filters=[[256, 512], [128, 256], [128, 256], [64, 128]],
lr_mult_list=[1.0, 1.0, 1.0, 1.0, 1.0],
conv_decay=0.,
norm_type='bn',
norm_decay=0.0,
freeze_norm=False):
super(GhostNet, self).__init__()
if isinstance(feature_maps, Integral):
feature_maps = [feature_maps]
if norm_type == 'sync_bn' and freeze_norm:
raise ValueError(
"The norm_type should not be sync_bn when freeze_norm is True")
self.feature_maps = feature_maps
self.with_extra_blocks = with_extra_blocks
self.extra_block_filters = extra_block_filters
inplanes = 16
self.cfgs = [
# k, t, c, SE, s
[3, 16, 16, 0, 1],
[3, 48, 24, 0, 2],
[3, 72, 24, 0, 1],
[5, 72, 40, 1, 2],
[5, 120, 40, 1, 1],
[3, 240, 80, 0, 2],
[3, 200, 80, 0, 1],
[3, 184, 80, 0, 1],
[3, 184, 80, 0, 1],
[3, 480, 112, 1, 1],
[3, 672, 112, 1, 1],
[5, 672, 160, 1, 2], # SSDLite output
[5, 960, 160, 0, 1],
[5, 960, 160, 1, 1],
[5, 960, 160, 0, 1],
[5, 960, 160, 1, 1]
]
self.scale = scale
conv1_out_ch = int(make_divisible(inplanes * self.scale, 4))
self.conv1 = ConvBNLayer(
in_c=3,
out_c=conv1_out_ch,
filter_size=3,
stride=2,
padding=1,
num_groups=1,
act="relu",
lr_mult=1.,
conv_decay=conv_decay,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
name="conv1")
# build inverted residual blocks
self._out_channels = []
self.ghost_bottleneck_list = []
idx = 0
inplanes = conv1_out_ch
for k, exp_size, c, use_se, s in self.cfgs:
lr_idx = min(idx // 3, len(lr_mult_list) - 1)
lr_mult = lr_mult_list[lr_idx]
# for SSD/SSDLite, first head input is after ResidualUnit expand_conv
return_list = self.with_extra_blocks and idx + 2 in self.feature_maps
ghost_bottleneck = self.add_sublayer(
"_ghostbottleneck_" + str(idx),
sublayer=GhostBottleneck(
in_channels=inplanes,
hidden_dim=int(make_divisible(exp_size * self.scale, 4)),
output_channels=int(make_divisible(c * self.scale, 4)),
kernel_size=k,
stride=s,
use_se=use_se,
lr_mult=lr_mult,
conv_decay=conv_decay,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
return_list=return_list,
name="_ghostbottleneck_" + str(idx)))
self.ghost_bottleneck_list.append(ghost_bottleneck)
inplanes = int(make_divisible(c * self.scale, 4))
idx += 1
self._update_out_channels(
int(make_divisible(exp_size * self.scale, 4))
if return_list else inplanes, idx + 1, feature_maps)
if self.with_extra_blocks:
self.extra_block_list = []
extra_out_c = int(make_divisible(self.scale * self.cfgs[-1][1], 4))
lr_idx = min(idx // 3, len(lr_mult_list) - 1)
lr_mult = lr_mult_list[lr_idx]
conv_extra = self.add_sublayer(
"conv" + str(idx + 2),
sublayer=ConvBNLayer(
in_c=inplanes,
out_c=extra_out_c,
filter_size=1,
stride=1,
padding=0,
num_groups=1,
act="relu6",
lr_mult=lr_mult,
conv_decay=conv_decay,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
name="conv" + str(idx + 2)))
self.extra_block_list.append(conv_extra)
idx += 1
self._update_out_channels(extra_out_c, idx + 1, feature_maps)
for j, block_filter in enumerate(self.extra_block_filters):
in_c = extra_out_c if j == 0 else self.extra_block_filters[j -
1][1]
conv_extra = self.add_sublayer(
"conv" + str(idx + 2),
sublayer=ExtraBlockDW(
in_c,
block_filter[0],
block_filter[1],
stride=2,
lr_mult=lr_mult,
conv_decay=conv_decay,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
name='conv' + str(idx + 2)))
self.extra_block_list.append(conv_extra)
idx += 1
self._update_out_channels(block_filter[1], idx + 1,
feature_maps)
def _update_out_channels(self, channel, feature_idx, feature_maps):
if feature_idx in feature_maps:
self._out_channels.append(channel)
def forward(self, inputs):
x = self.conv1(inputs['image'])
outs = []
for idx, ghost_bottleneck in enumerate(self.ghost_bottleneck_list):
x = ghost_bottleneck(x)
if idx + 2 in self.feature_maps:
if isinstance(x, list):
outs.append(x[0])
x = x[1]
else:
outs.append(x)
if not self.with_extra_blocks:
return outs
for i, block in enumerate(self.extra_block_list):
idx = i + len(self.ghost_bottleneck_list)
x = block(x)
if idx + 2 in self.feature_maps:
outs.append(x)
return outs
@property
def out_shape(self):
return [ShapeSpec(channels=c) for c in self._out_channels]
| 35.037736 | 105 | 0.520074 |
dedd5492d697c0bfb2a1d4e8a1d0cc2649cea10a | 1,015 | py | Python | setup.py | wazizian/torch_spspmm_out | b97297cec7263ec9a34e4230d867a59bf01b3a4b | [
"MIT"
] | null | null | null | setup.py | wazizian/torch_spspmm_out | b97297cec7263ec9a34e4230d867a59bf01b3a4b | [
"MIT"
] | null | null | null | setup.py | wazizian/torch_spspmm_out | b97297cec7263ec9a34e4230d867a59bf01b3a4b | [
"MIT"
] | null | null | null | from setuptools import setup
import os
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension
cuda_support = torch.cuda.is_available()
def get_extensions():
extra_link_args = []
extension_dir = os.path.join(os.path.dirname(__file__), "csrc")
if cuda_support:
Extension = CUDAExtension
extra_link_args += ["-lcusparse"]
else:
Extension = CppExtension
extension = Extension(name="torch_spspmm_out._spspmm_out",
sources=[
"csrc/spspmm_out.cpp",
],
include_dirs=[extension_dir],
extra_link_args=extra_link_args,
)
return [extension]
setup(
name="torch_spspmm_out",
install_requires=["torch"],
ext_modules=get_extensions(),
cmdclass={"build_ext": BuildExtension.with_options(use_ninja=False, no_python_abi_suffix=True)}
)
| 29.852941 | 103 | 0.6 |
d262781e2f1ae6d216bcc38a963b29a8b3531299 | 631 | py | Python | ArubaOS-Sw_API_Scripts/python_and_rest_api_vid_scripts/get_cookie_create_vlan.py | smallfount/scriptsonly | cabdfc301da4e1653705d713b306f3fbf7f6934d | [
"Apache-2.0"
] | 32 | 2016-05-24T23:32:02.000Z | 2021-11-17T07:53:50.000Z | ArubaOS-Sw_API_Scripts/python_and_rest_api_vid_scripts/get_cookie_create_vlan.py | posai8701/scriptsonly | cabdfc301da4e1653705d713b306f3fbf7f6934d | [
"Apache-2.0"
] | 5 | 2016-09-25T15:55:02.000Z | 2018-09-06T10:54:45.000Z | ArubaOS-Sw_API_Scripts/python_and_rest_api_vid_scripts/get_cookie_create_vlan.py | posai8701/scriptsonly | cabdfc301da4e1653705d713b306f3fbf7f6934d | [
"Apache-2.0"
] | 34 | 2016-03-02T17:37:07.000Z | 2021-11-17T07:54:04.000Z | import requests
vlan_number = input('Enter VLAN number:')
vlan_name = input('Enter VLAN name:')
url_login = "http://192.168.1.29/rest/v1/login-sessions"
url_vlans = "http://192.168.1.29/rest/v1/vlans"
payload_login = "{\"userName\": \"joe\", \"password\": \"x\"}"
get_cookie = requests.request("POST", url_login, data=payload_login)
r_cookie = get_cookie.json()['cookie']
print(r_cookie)
payload_vlan = "{\"vlan_id\":"+vlan_number+",\"name\":\""+vlan_name+"\"}"
print(payload_vlan)
headers = {'cookie': r_cookie }
config_vlan = requests.request("POST", url_vlans, data=payload_vlan, headers=headers)
print(config_vlan)
| 23.37037 | 85 | 0.698891 |
21a75d48ee5d30d0bbc8bd951cda3ac93a14af4b | 4,576 | py | Python | tests/bhive/test_block.py | TheCrazyGM/bhive | 1494e90a99123ecfc5efbd927258f9ba59443e2e | [
"MIT"
] | 2 | 2020-03-21T23:50:22.000Z | 2020-03-25T19:10:48.000Z | tests/bhive/test_block.py | TheCrazyGM/bhive | 1494e90a99123ecfc5efbd927258f9ba59443e2e | [
"MIT"
] | null | null | null | tests/bhive/test_block.py | TheCrazyGM/bhive | 1494e90a99123ecfc5efbd927258f9ba59443e2e | [
"MIT"
] | 1 | 2020-03-21T23:50:25.000Z | 2020-03-21T23:50:25.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import super
import unittest
from parameterized import parameterized
from pprint import pprint
from bhive import Hive, exceptions
from bhive.block import Block, BlockHeader
from datetime import datetime
from bhive.instance import set_shared_hive_instance
from bhive.nodelist import NodeList
wif = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"
class Testcases(unittest.TestCase):
@classmethod
def setUpClass(cls):
nodelist = NodeList()
nodelist.update_nodes(hive_instance=Hive(node=nodelist.get_nodes(exclude_limited=False), num_retries=10))
cls.bts = Hive(
node=nodelist.get_nodes(exclude_limited=True),
nobroadcast=True,
keys={"active": wif},
num_retries=10
)
cls.test_block_id = 19273700
# from getpass import getpass
# self.bts.wallet.unlock(getpass())
set_shared_hive_instance(cls.bts)
cls.bts.set_default_account("test")
def test_block(self):
bts = self.bts
test_block_id = self.test_block_id
block = Block(test_block_id, hive_instance=bts)
self.assertEqual(block.identifier, test_block_id)
self.assertTrue(isinstance(block.time(), datetime))
self.assertTrue(isinstance(block, dict))
self.assertTrue(len(block.operations))
self.assertTrue(isinstance(block.ops_statistics(), dict))
block2 = Block(test_block_id + 1, hive_instance=bts)
self.assertTrue(block2.time() > block.time())
with self.assertRaises(
exceptions.BlockDoesNotExistsException
):
Block(0, hive_instance=bts)
def test_block_only_ops(self):
bts = self.bts
test_block_id = self.test_block_id
block = Block(test_block_id, only_ops=True, hive_instance=bts)
self.assertEqual(block.identifier, test_block_id)
self.assertTrue(isinstance(block.time(), datetime))
self.assertTrue(isinstance(block, dict))
self.assertTrue(len(block.operations))
self.assertTrue(isinstance(block.ops_statistics(), dict))
block2 = Block(test_block_id + 1, hive_instance=bts)
self.assertTrue(block2.time() > block.time())
with self.assertRaises(
exceptions.BlockDoesNotExistsException
):
Block(0, hive_instance=bts)
def test_block_header(self):
bts = self.bts
test_block_id = self.test_block_id
block = BlockHeader(test_block_id, hive_instance=bts)
self.assertEqual(block.identifier, test_block_id)
self.assertTrue(isinstance(block.time(), datetime))
self.assertTrue(isinstance(block, dict))
block2 = BlockHeader(test_block_id + 1, hive_instance=bts)
self.assertTrue(block2.time() > block.time())
with self.assertRaises(
exceptions.BlockDoesNotExistsException
):
BlockHeader(0, hive_instance=bts)
def test_export(self):
bts = self.bts
block_num = 2000000
if bts.rpc.get_use_appbase():
block = bts.rpc.get_block({"block_num": block_num}, api="block")
if block and "block" in block:
block = block["block"]
else:
block = bts.rpc.get_block(block_num)
b = Block(block_num, hive_instance=bts)
keys = list(block.keys())
json_content = b.json()
for k in keys:
if k not in "json_metadata":
if isinstance(block[k], dict) and isinstance(json_content[k], list):
self.assertEqual(list(block[k].values()), json_content[k])
else:
self.assertEqual(block[k], json_content[k])
if bts.rpc.get_use_appbase():
block = bts.rpc.get_block_header({"block_num": block_num}, api="block")
if "header" in block:
block = block["header"]
else:
block = bts.rpc.get_block_header(block_num)
b = BlockHeader(block_num, hive_instance=bts)
keys = list(block.keys())
json_content = b.json()
for k in keys:
if k not in "json_metadata":
if isinstance(block[k], dict) and isinstance(json_content[k], list):
self.assertEqual(list(block[k].values()), json_content[k])
else:
self.assertEqual(block[k], json_content[k])
| 36.608 | 113 | 0.641827 |
6e1fffc0dc7165bf31e251806efb26ae83f9d194 | 3,017 | py | Python | UMLRT2Kiltera_MM/graph_MT_post__Trigger_S.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
] | 3 | 2017-06-02T19:26:27.000Z | 2021-06-14T04:25:45.000Z | UMLRT2Kiltera_MM/graph_MT_post__Trigger_S.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
] | 8 | 2016-08-24T07:04:07.000Z | 2017-05-26T16:22:47.000Z | UMLRT2Kiltera_MM/graph_MT_post__Trigger_S.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
] | 1 | 2019-10-31T06:00:23.000Z | 2019-10-31T06:00:23.000Z | """
__graph_MT_post__Trigger_S.py___________________________________________________________
Automatically generated graphical appearance ---> MODIFY DIRECTLY WITH CAUTION
_____________________________________________________________________________
"""
import tkFont
from graphEntity import *
from GraphicalForm import *
from ATOM3Constraint import *
class graph_MT_post__Trigger_S(graphEntity):
def __init__(self, x, y, semObject = None):
self.semanticObject = semObject
self.sizeX, self.sizeY = 172, 82
graphEntity.__init__(self, x, y)
self.ChangesAtRunTime = 0
self.constraintList = []
if self.semanticObject: atribs = self.semanticObject.attributesToDraw()
else: atribs = None
self.graphForms = []
self.imageDict = self.getImageDict()
def DrawObject(self, drawing, showGG = 0):
self.dc = drawing
if showGG and self.semanticObject: self.drawGGLabel(drawing)
h = drawing.create_oval(self.translate([189.0, 62.0, 189.0, 62.0]), tags = (self.tag, 'connector'), outline = '', fill = '' )
self.connectors.append( h )
h = drawing.create_rectangle(self.translate([20.0, 20.0, 190.0, 100.0]), tags = self.tag, stipple = '', width = 1, outline = 'black', fill = 'moccasin')
self.gf4 = GraphicalForm(drawing, h, "gf4")
self.graphForms.append(self.gf4)
font = tkFont.Font( family='Arial', size=12, weight='normal', slant='roman', underline=0)
h = drawing.create_text(self.translate([30.0, 31.0, 30.0, 12.0])[:2], tags = self.tag, font=font, fill = 'grey45', anchor = 'center', text = '', width = '0', justify= 'left', stipple='' )
self.gf7 = GraphicalForm(drawing, h, 'gf7', fontObject=font)
self.graphForms.append(self.gf7)
font = tkFont.Font( family='Arial', size=12, weight='normal', slant='roman', underline=0)
h = drawing.create_text(self.translate([100.0, 40.0, 100.0, 12.0])[:2], tags = self.tag, font=font, fill = 'black', anchor = 'center', text = 'Element', width = '0', justify= 'left', stipple='' )
self.gf8 = GraphicalForm(drawing, h, 'gf8', fontObject=font)
self.graphForms.append(self.gf8)
helv12 = tkFont.Font ( family="Helvetica", size=12, weight="bold" )
h = drawing.create_text(self.translate([-3, -3]), font=helv12,
tags = (self.tag, self.semanticObject.getClass()),
fill = "black",
text=self.semanticObject.MT_label__.toString())
self.attr_display["MT_label__"] = h
self.gf_label = GraphicalForm(drawing, h, 'gf_label', fontObject=helv12)
self.graphForms.append(self.gf_label)
def postCondition( self, actionID, * params):
return None
def preCondition( self, actionID, * params):
return None
def getImageDict( self ):
imageDict = dict()
return imageDict
new_class = graph_MT_post__Trigger_S
| 43.724638 | 203 | 0.639045 |
cb896d37c24a13ca79091ec139a8c83b9a297798 | 6,337 | py | Python | test/rpc_spentindex.py | odavila466/Kron-Project | 8a915e6287ac6d21ac0a32ff69f6f04e260bd1f5 | [
"MIT"
] | 3 | 2021-05-18T05:11:56.000Z | 2021-12-05T11:25:38.000Z | test/rpc_spentindex.py | BaymaxValero/Kron-Project | e56e596ee36e4b6949ebb75a01867c08481139e2 | [
"MIT"
] | 1 | 2021-05-13T19:01:05.000Z | 2021-05-13T19:01:57.000Z | test/rpc_spentindex.py | BaymaxValero/Kron-Project | e56e596ee36e4b6949ebb75a01867c08481139e2 | [
"MIT"
] | 1 | 2021-05-18T05:11:58.000Z | 2021-05-18T05:11:58.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Copyright (c) 2017-2020 The Kron Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPC addressindex generation and fetching"""
import binascii
from test_framework.test_framework import KronTestFramework
from test_framework.util import connect_nodes_bi, assert_equal
from test_framework.script import CScript, OP_DUP, OP_HASH160, OP_EQUALVERIFY, OP_CHECKSIG
from test_framework.mininode import CTransaction, CTxIn, COutPoint, CTxOut
class SpentIndexTest(KronTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 4
def setup_network(self):
self.add_nodes(4, [
# Nodes 0/1 are "wallet" nodes
[],
["-spentindex"],
# Nodes 2/3 are used for testing
["-spentindex"],
["-spentindex", "-txindex"]])
self.start_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
self.sync_all()
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(105)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
# Check that
self.log.info("Testing spent index...")
fee_satoshis = 192000
privkey = "cSdkPxkAjA4HDr5VHgsebAPDEh9Gyub4HK8UJr2DFGGqKKy4K5sG"
#address = "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW"
address_hash = bytes([11,47,10,12,49,191,224,64,107,12,204,19,129,253,190,49,25,70,218,220])
script_pub_key = CScript([OP_DUP, OP_HASH160, address_hash, OP_EQUALVERIFY, OP_CHECKSIG])
unspent = self.nodes[0].listunspent()
tx = CTransaction()
amount = int(unspent[0]["amount"] * 100000000 - fee_satoshis)
tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))]
tx.vout = [CTxOut(amount, script_pub_key)]
tx.rehash()
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.nodes[0].generate(1)
self.sync_all()
self.log.info("Testing getspentinfo method...")
# Check that the spentinfo works standalone
info = self.nodes[1].getspentinfo({"txid": unspent[0]["txid"], "index": unspent[0]["vout"]})
assert_equal(info["txid"], txid)
assert_equal(info["index"], 0)
assert_equal(info["height"], 106)
self.log.info("Testing getrawtransaction method...")
# Check that verbose raw transaction includes spent info
tx_verbose = self.nodes[3].getrawtransaction(unspent[0]["txid"], 1)
assert_equal(tx_verbose["vout"][unspent[0]["vout"]]["spentTxId"], txid)
assert_equal(tx_verbose["vout"][unspent[0]["vout"]]["spentIndex"], 0)
assert_equal(tx_verbose["vout"][unspent[0]["vout"]]["spentHeight"], 106)
# Check that verbose raw transaction includes input values
tx_verbose2 = self.nodes[3].getrawtransaction(txid, 1)
assert_equal(float(tx_verbose2["vin"][0]["value"]), (amount + fee_satoshis) / 100000000)
assert_equal(tx_verbose2["vin"][0]["valueSat"], amount + fee_satoshis)
# Check that verbose raw transaction includes address values and input values
#privkey2 = "cSdkPxkAjA4HDr5VHgsebAPDEh9Gyub4HK8UJr2DFGGqKKy4K5sG"
address2 = "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW"
address_hash2 = bytes([11, 47, 10, 12, 49, 191, 224, 64, 107, 12, 204, 19, 129, 253, 190, 49, 25, 70, 218, 220])
script_pub_key2 = CScript([OP_DUP, OP_HASH160, address_hash2, OP_EQUALVERIFY, OP_CHECKSIG])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(int(txid, 16), 0))]
amount = int(amount - fee_satoshis)
tx2.vout = [CTxOut(amount, script_pub_key2)]
tx.rehash()
self.nodes[0].importprivkey(privkey)
signed_tx2 = self.nodes[0].signrawtransaction(binascii.hexlify(tx2.serialize()).decode("utf-8"))
txid2 = self.nodes[0].sendrawtransaction(signed_tx2["hex"], True)
# Check the mempool index
self.sync_all()
tx_verbose3 = self.nodes[1].getrawtransaction(txid2, 1)
assert_equal(tx_verbose3["vin"][0]["address"], address2)
assert_equal(tx_verbose3["vin"][0]["valueSat"], amount + fee_satoshis)
assert_equal(float(tx_verbose3["vin"][0]["value"]), (amount + fee_satoshis) / 100000000)
# Check the database index
block_hash = self.nodes[0].generate(1)
self.sync_all()
tx_verbose4 = self.nodes[3].getrawtransaction(txid2, 1)
assert_equal(tx_verbose4["vin"][0]["address"], address2)
assert_equal(tx_verbose4["vin"][0]["valueSat"], amount + fee_satoshis)
assert_equal(float(tx_verbose4["vin"][0]["value"]), (amount + fee_satoshis) / 100000000)
# Check block deltas
self.log.info("Testing getblockdeltas...")
block = self.nodes[3].getblockdeltas(block_hash[0])
assert_equal(len(block["deltas"]), 2)
assert_equal(block["deltas"][0]["index"], 0)
assert_equal(len(block["deltas"][0]["inputs"]), 0)
assert_equal(len(block["deltas"][0]["outputs"]), 0)
assert_equal(block["deltas"][1]["index"], 1)
assert_equal(block["deltas"][1]["txid"], txid2)
assert_equal(block["deltas"][1]["inputs"][0]["index"], 0)
assert_equal(block["deltas"][1]["inputs"][0]["address"], "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW")
assert_equal(block["deltas"][1]["inputs"][0]["satoshis"], (amount + fee_satoshis) * -1)
assert_equal(block["deltas"][1]["inputs"][0]["prevtxid"], txid)
assert_equal(block["deltas"][1]["inputs"][0]["prevout"], 0)
assert_equal(block["deltas"][1]["outputs"][0]["index"], 0)
assert_equal(block["deltas"][1]["outputs"][0]["address"], "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW")
assert_equal(block["deltas"][1]["outputs"][0]["satoshis"], amount)
self.log.info("All Tests Passed")
if __name__ == '__main__':
SpentIndexTest().main()
| 44.626761 | 120 | 0.648887 |
7d4b1b7ef34660509d17409582e068d17d33c3a0 | 20,598 | py | Python | accounts/migrations/0002_auto_20211112_0737.py | shakori999/Django_CRM | 82789878b679e68e993295fde0040b16a1c56767 | [
"Apache-2.0"
] | null | null | null | accounts/migrations/0002_auto_20211112_0737.py | shakori999/Django_CRM | 82789878b679e68e993295fde0040b16a1c56767 | [
"Apache-2.0"
] | 2 | 2022-03-21T08:48:46.000Z | 2022-03-21T08:49:57.000Z | accounts/migrations/0002_auto_20211112_0737.py | shakori999/Django_CRM | 82789878b679e68e993295fde0040b16a1c56767 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.2.9 on 2021-11-12 04:37
from django.db import migrations, models
import djmoney.models.fields
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='customer',
name='wallet_currency',
field=djmoney.models.fields.CurrencyField(choices=[('XUA', 'ADB Unit of Account'), ('AFN', 'Afghan Afghani'), ('AFA', 'Afghan Afghani (1927–2002)'), ('ALL', 'Albanian Lek'), ('ALK', 'Albanian Lek (1946–1965)'), ('DZD', 'Algerian Dinar'), ('ADP', 'Andorran Peseta'), ('AOA', 'Angolan Kwanza'), ('AOK', 'Angolan Kwanza (1977–1991)'), ('AON', 'Angolan New Kwanza (1990–2000)'), ('AOR', 'Angolan Readjusted Kwanza (1995–1999)'), ('ARA', 'Argentine Austral'), ('ARS', 'Argentine Peso'), ('ARM', 'Argentine Peso (1881–1970)'), ('ARP', 'Argentine Peso (1983–1985)'), ('ARL', 'Argentine Peso Ley (1970–1983)'), ('AMD', 'Armenian Dram'), ('AWG', 'Aruban Florin'), ('AUD', 'Australian Dollar'), ('ATS', 'Austrian Schilling'), ('AZN', 'Azerbaijani Manat'), ('AZM', 'Azerbaijani Manat (1993–2006)'), ('BSD', 'Bahamian Dollar'), ('BHD', 'Bahraini Dinar'), ('BDT', 'Bangladeshi Taka'), ('BBD', 'Barbadian Dollar'), ('BYN', 'Belarusian Ruble'), ('BYB', 'Belarusian Ruble (1994–1999)'), ('BYR', 'Belarusian Ruble (2000–2016)'), ('BEF', 'Belgian Franc'), ('BEC', 'Belgian Franc (convertible)'), ('BEL', 'Belgian Franc (financial)'), ('BZD', 'Belize Dollar'), ('BMD', 'Bermudan Dollar'), ('BTN', 'Bhutanese Ngultrum'), ('BOB', 'Bolivian Boliviano'), ('BOL', 'Bolivian Boliviano (1863–1963)'), ('BOV', 'Bolivian Mvdol'), ('BOP', 'Bolivian Peso'), ('BAM', 'Bosnia-Herzegovina Convertible Mark'), ('BAD', 'Bosnia-Herzegovina Dinar (1992–1994)'), ('BAN', 'Bosnia-Herzegovina New Dinar (1994–1997)'), ('BWP', 'Botswanan Pula'), ('BRC', 'Brazilian Cruzado (1986–1989)'), ('BRZ', 'Brazilian Cruzeiro (1942–1967)'), ('BRE', 'Brazilian Cruzeiro (1990–1993)'), ('BRR', 'Brazilian Cruzeiro (1993–1994)'), ('BRN', 'Brazilian New Cruzado (1989–1990)'), ('BRB', 'Brazilian New Cruzeiro (1967–1986)'), ('BRL', 'Brazilian Real'), ('GBP', 'British Pound'), ('BND', 'Brunei Dollar'), ('BGL', 'Bulgarian Hard Lev'), ('BGN', 'Bulgarian Lev'), ('BGO', 'Bulgarian Lev (1879–1952)'), ('BGM', 'Bulgarian Socialist Lev'), ('BUK', 'Burmese Kyat'), ('BIF', 'Burundian Franc'), ('XPF', 'CFP Franc'), ('KHR', 'Cambodian Riel'), ('CAD', 'Canadian Dollar'), ('CVE', 'Cape Verdean Escudo'), ('KYD', 'Cayman Islands Dollar'), ('XAF', 'Central African CFA Franc'), ('CLE', 'Chilean Escudo'), ('CLP', 'Chilean Peso'), ('CLF', 'Chilean Unit of Account (UF)'), ('CNX', 'Chinese People’s Bank Dollar'), ('CNY', 'Chinese Yuan'), ('CNH', 'Chinese Yuan (offshore)'), ('COP', 'Colombian Peso'), ('COU', 'Colombian Real Value Unit'), ('KMF', 'Comorian Franc'), ('CDF', 'Congolese Franc'), ('CRC', 'Costa Rican Colón'), ('HRD', 'Croatian Dinar'), ('HRK', 'Croatian Kuna'), ('CUC', 'Cuban Convertible Peso'), ('CUP', 'Cuban Peso'), ('CYP', 'Cypriot Pound'), ('CZK', 'Czech Koruna'), ('CSK', 'Czechoslovak Hard Koruna'), ('DKK', 'Danish Krone'), ('DJF', 'Djiboutian Franc'), ('DOP', 'Dominican Peso'), ('NLG', 'Dutch Guilder'), ('XCD', 'East Caribbean Dollar'), ('DDM', 'East German Mark'), ('ECS', 'Ecuadorian Sucre'), ('ECV', 'Ecuadorian Unit of Constant Value'), ('EGP', 'Egyptian Pound'), ('GQE', 'Equatorial Guinean Ekwele'), ('ERN', 'Eritrean Nakfa'), ('EEK', 'Estonian Kroon'), ('ETB', 'Ethiopian Birr'), ('EUR', 'Euro'), ('XBA', 'European Composite Unit'), ('XEU', 'European Currency Unit'), ('XBB', 'European Monetary Unit'), ('XBC', 'European Unit of Account (XBC)'), ('XBD', 'European Unit of Account (XBD)'), ('FKP', 'Falkland Islands Pound'), ('FJD', 'Fijian Dollar'), ('FIM', 'Finnish Markka'), ('FRF', 'French Franc'), ('XFO', 'French Gold Franc'), ('XFU', 'French UIC-Franc'), ('GMD', 'Gambian Dalasi'), ('GEK', 'Georgian Kupon Larit'), ('GEL', 'Georgian Lari'), ('DEM', 'German Mark'), ('GHS', 'Ghanaian Cedi'), ('GHC', 'Ghanaian Cedi (1979–2007)'), ('GIP', 'Gibraltar Pound'), ('XAU', 'Gold'), ('GRD', 'Greek Drachma'), ('GTQ', 'Guatemalan Quetzal'), ('GWP', 'Guinea-Bissau Peso'), ('GNF', 'Guinean Franc'), ('GNS', 'Guinean Syli'), ('GYD', 'Guyanaese Dollar'), ('HTG', 'Haitian Gourde'), ('HNL', 'Honduran Lempira'), ('HKD', 'Hong Kong Dollar'), ('HUF', 'Hungarian Forint'), ('IMP', 'IMP'), ('ISK', 'Icelandic Króna'), ('ISJ', 'Icelandic Króna (1918–1981)'), ('INR', 'Indian Rupee'), ('IDR', 'Indonesian Rupiah'), ('IRR', 'Iranian Rial'), ('IQD', 'Iraqi Dinar'), ('IEP', 'Irish Pound'), ('ILS', 'Israeli New Shekel'), ('ILP', 'Israeli Pound'), ('ILR', 'Israeli Shekel (1980–1985)'), ('ITL', 'Italian Lira'), ('JMD', 'Jamaican Dollar'), ('JPY', 'Japanese Yen'), ('JOD', 'Jordanian Dinar'), ('KZT', 'Kazakhstani Tenge'), ('KES', 'Kenyan Shilling'), ('KWD', 'Kuwaiti Dinar'), ('KGS', 'Kyrgystani Som'), ('LAK', 'Laotian Kip'), ('LVL', 'Latvian Lats'), ('LVR', 'Latvian Ruble'), ('LBP', 'Lebanese Pound'), ('LSL', 'Lesotho Loti'), ('LRD', 'Liberian Dollar'), ('LYD', 'Libyan Dinar'), ('LTL', 'Lithuanian Litas'), ('LTT', 'Lithuanian Talonas'), ('LUL', 'Luxembourg Financial Franc'), ('LUC', 'Luxembourgian Convertible Franc'), ('LUF', 'Luxembourgian Franc'), ('MOP', 'Macanese Pataca'), ('MKD', 'Macedonian Denar'), ('MKN', 'Macedonian Denar (1992–1993)'), ('MGA', 'Malagasy Ariary'), ('MGF', 'Malagasy Franc'), ('MWK', 'Malawian Kwacha'), ('MYR', 'Malaysian Ringgit'), ('MVR', 'Maldivian Rufiyaa'), ('MVP', 'Maldivian Rupee (1947–1981)'), ('MLF', 'Malian Franc'), ('MTL', 'Maltese Lira'), ('MTP', 'Maltese Pound'), ('MRU', 'Mauritanian Ouguiya'), ('MRO', 'Mauritanian Ouguiya (1973–2017)'), ('MUR', 'Mauritian Rupee'), ('MXV', 'Mexican Investment Unit'), ('MXN', 'Mexican Peso'), ('MXP', 'Mexican Silver Peso (1861–1992)'), ('MDC', 'Moldovan Cupon'), ('MDL', 'Moldovan Leu'), ('MCF', 'Monegasque Franc'), ('MNT', 'Mongolian Tugrik'), ('MAD', 'Moroccan Dirham'), ('MAF', 'Moroccan Franc'), ('MZE', 'Mozambican Escudo'), ('MZN', 'Mozambican Metical'), ('MZM', 'Mozambican Metical (1980–2006)'), ('MMK', 'Myanmar Kyat'), ('NAD', 'Namibian Dollar'), ('NPR', 'Nepalese Rupee'), ('ANG', 'Netherlands Antillean Guilder'), ('TWD', 'New Taiwan Dollar'), ('NZD', 'New Zealand Dollar'), ('NIO', 'Nicaraguan Córdoba'), ('NIC', 'Nicaraguan Córdoba (1988–1991)'), ('NGN', 'Nigerian Naira'), ('KPW', 'North Korean Won'), ('NOK', 'Norwegian Krone'), ('OMR', 'Omani Rial'), ('PKR', 'Pakistani Rupee'), ('XPD', 'Palladium'), ('PAB', 'Panamanian Balboa'), ('PGK', 'Papua New Guinean Kina'), ('PYG', 'Paraguayan Guarani'), ('PEI', 'Peruvian Inti'), ('PEN', 'Peruvian Sol'), ('PES', 'Peruvian Sol (1863–1965)'), ('PHP', 'Philippine Piso'), ('XPT', 'Platinum'), ('PLN', 'Polish Zloty'), ('PLZ', 'Polish Zloty (1950–1995)'), ('PTE', 'Portuguese Escudo'), ('GWE', 'Portuguese Guinea Escudo'), ('QAR', 'Qatari Rial'), ('XRE', 'RINET Funds'), ('RHD', 'Rhodesian Dollar'), ('RON', 'Romanian Leu'), ('ROL', 'Romanian Leu (1952–2006)'), ('RUB', 'Russian Ruble'), ('RUR', 'Russian Ruble (1991–1998)'), ('RWF', 'Rwandan Franc'), ('SVC', 'Salvadoran Colón'), ('WST', 'Samoan Tala'), ('SAR', 'Saudi Riyal'), ('RSD', 'Serbian Dinar'), ('CSD', 'Serbian Dinar (2002–2006)'), ('SCR', 'Seychellois Rupee'), ('SLL', 'Sierra Leonean Leone'), ('XAG', 'Silver'), ('SGD', 'Singapore Dollar'), ('SKK', 'Slovak Koruna'), ('SIT', 'Slovenian Tolar'), ('SBD', 'Solomon Islands Dollar'), ('SOS', 'Somali Shilling'), ('ZAR', 'South African Rand'), ('ZAL', 'South African Rand (financial)'), ('KRH', 'South Korean Hwan (1953–1962)'), ('KRW', 'South Korean Won'), ('KRO', 'South Korean Won (1945–1953)'), ('SSP', 'South Sudanese Pound'), ('SUR', 'Soviet Rouble'), ('ESP', 'Spanish Peseta'), ('ESA', 'Spanish Peseta (A account)'), ('ESB', 'Spanish Peseta (convertible account)'), ('XDR', 'Special Drawing Rights'), ('LKR', 'Sri Lankan Rupee'), ('SHP', 'St. Helena Pound'), ('XSU', 'Sucre'), ('SDD', 'Sudanese Dinar (1992–2007)'), ('SDG', 'Sudanese Pound'), ('SDP', 'Sudanese Pound (1957–1998)'), ('SRD', 'Surinamese Dollar'), ('SRG', 'Surinamese Guilder'), ('SZL', 'Swazi Lilangeni'), ('SEK', 'Swedish Krona'), ('CHF', 'Swiss Franc'), ('SYP', 'Syrian Pound'), ('STN', 'São Tomé & Príncipe Dobra'), ('STD', 'São Tomé & Príncipe Dobra (1977–2017)'), ('TVD', 'TVD'), ('TJR', 'Tajikistani Ruble'), ('TJS', 'Tajikistani Somoni'), ('TZS', 'Tanzanian Shilling'), ('XTS', 'Testing Currency Code'), ('THB', 'Thai Baht'), ('XXX', 'The codes assigned for transactions where no currency is involved'), ('TPE', 'Timorese Escudo'), ('TOP', 'Tongan Paʻanga'), ('TTD', 'Trinidad & Tobago Dollar'), ('TND', 'Tunisian Dinar'), ('TRY', 'Turkish Lira'), ('TRL', 'Turkish Lira (1922–2005)'), ('TMT', 'Turkmenistani Manat'), ('TMM', 'Turkmenistani Manat (1993–2009)'), ('USD', 'US Dollar'), ('USN', 'US Dollar (Next day)'), ('USS', 'US Dollar (Same day)'), ('UGX', 'Ugandan Shilling'), ('UGS', 'Ugandan Shilling (1966–1987)'), ('UAH', 'Ukrainian Hryvnia'), ('UAK', 'Ukrainian Karbovanets'), ('AED', 'United Arab Emirates Dirham'), ('UYW', 'Uruguayan Nominal Wage Index Unit'), ('UYU', 'Uruguayan Peso'), ('UYP', 'Uruguayan Peso (1975–1993)'), ('UYI', 'Uruguayan Peso (Indexed Units)'), ('UZS', 'Uzbekistani Som'), ('VUV', 'Vanuatu Vatu'), ('VES', 'Venezuelan Bolívar'), ('VEB', 'Venezuelan Bolívar (1871–2008)'), ('VEF', 'Venezuelan Bolívar (2008–2018)'), ('VND', 'Vietnamese Dong'), ('VNN', 'Vietnamese Dong (1978–1985)'), ('CHE', 'WIR Euro'), ('CHW', 'WIR Franc'), ('XOF', 'West African CFA Franc'), ('YDD', 'Yemeni Dinar'), ('YER', 'Yemeni Rial'), ('YUN', 'Yugoslavian Convertible Dinar (1990–1992)'), ('YUD', 'Yugoslavian Hard Dinar (1966–1990)'), ('YUM', 'Yugoslavian New Dinar (1994–2002)'), ('YUR', 'Yugoslavian Reformed Dinar (1992–1993)'), ('ZWN', 'ZWN'), ('ZRN', 'Zairean New Zaire (1993–1998)'), ('ZRZ', 'Zairean Zaire (1971–1993)'), ('ZMW', 'Zambian Kwacha'), ('ZMK', 'Zambian Kwacha (1968–2012)'), ('ZWD', 'Zimbabwean Dollar (1980–2008)'), ('ZWR', 'Zimbabwean Dollar (2008)'), ('ZWL', 'Zimbabwean Dollar (2009)')], default='IQD', editable=False, max_length=3),
),
migrations.AddField(
model_name='order',
name='price_currency',
field=djmoney.models.fields.CurrencyField(choices=[('XUA', 'ADB Unit of Account'), ('AFN', 'Afghan Afghani'), ('AFA', 'Afghan Afghani (1927–2002)'), ('ALL', 'Albanian Lek'), ('ALK', 'Albanian Lek (1946–1965)'), ('DZD', 'Algerian Dinar'), ('ADP', 'Andorran Peseta'), ('AOA', 'Angolan Kwanza'), ('AOK', 'Angolan Kwanza (1977–1991)'), ('AON', 'Angolan New Kwanza (1990–2000)'), ('AOR', 'Angolan Readjusted Kwanza (1995–1999)'), ('ARA', 'Argentine Austral'), ('ARS', 'Argentine Peso'), ('ARM', 'Argentine Peso (1881–1970)'), ('ARP', 'Argentine Peso (1983–1985)'), ('ARL', 'Argentine Peso Ley (1970–1983)'), ('AMD', 'Armenian Dram'), ('AWG', 'Aruban Florin'), ('AUD', 'Australian Dollar'), ('ATS', 'Austrian Schilling'), ('AZN', 'Azerbaijani Manat'), ('AZM', 'Azerbaijani Manat (1993–2006)'), ('BSD', 'Bahamian Dollar'), ('BHD', 'Bahraini Dinar'), ('BDT', 'Bangladeshi Taka'), ('BBD', 'Barbadian Dollar'), ('BYN', 'Belarusian Ruble'), ('BYB', 'Belarusian Ruble (1994–1999)'), ('BYR', 'Belarusian Ruble (2000–2016)'), ('BEF', 'Belgian Franc'), ('BEC', 'Belgian Franc (convertible)'), ('BEL', 'Belgian Franc (financial)'), ('BZD', 'Belize Dollar'), ('BMD', 'Bermudan Dollar'), ('BTN', 'Bhutanese Ngultrum'), ('BOB', 'Bolivian Boliviano'), ('BOL', 'Bolivian Boliviano (1863–1963)'), ('BOV', 'Bolivian Mvdol'), ('BOP', 'Bolivian Peso'), ('BAM', 'Bosnia-Herzegovina Convertible Mark'), ('BAD', 'Bosnia-Herzegovina Dinar (1992–1994)'), ('BAN', 'Bosnia-Herzegovina New Dinar (1994–1997)'), ('BWP', 'Botswanan Pula'), ('BRC', 'Brazilian Cruzado (1986–1989)'), ('BRZ', 'Brazilian Cruzeiro (1942–1967)'), ('BRE', 'Brazilian Cruzeiro (1990–1993)'), ('BRR', 'Brazilian Cruzeiro (1993–1994)'), ('BRN', 'Brazilian New Cruzado (1989–1990)'), ('BRB', 'Brazilian New Cruzeiro (1967–1986)'), ('BRL', 'Brazilian Real'), ('GBP', 'British Pound'), ('BND', 'Brunei Dollar'), ('BGL', 'Bulgarian Hard Lev'), ('BGN', 'Bulgarian Lev'), ('BGO', 'Bulgarian Lev (1879–1952)'), ('BGM', 'Bulgarian Socialist Lev'), ('BUK', 'Burmese Kyat'), ('BIF', 'Burundian Franc'), ('XPF', 'CFP Franc'), ('KHR', 'Cambodian Riel'), ('CAD', 'Canadian Dollar'), ('CVE', 'Cape Verdean Escudo'), ('KYD', 'Cayman Islands Dollar'), ('XAF', 'Central African CFA Franc'), ('CLE', 'Chilean Escudo'), ('CLP', 'Chilean Peso'), ('CLF', 'Chilean Unit of Account (UF)'), ('CNX', 'Chinese People’s Bank Dollar'), ('CNY', 'Chinese Yuan'), ('CNH', 'Chinese Yuan (offshore)'), ('COP', 'Colombian Peso'), ('COU', 'Colombian Real Value Unit'), ('KMF', 'Comorian Franc'), ('CDF', 'Congolese Franc'), ('CRC', 'Costa Rican Colón'), ('HRD', 'Croatian Dinar'), ('HRK', 'Croatian Kuna'), ('CUC', 'Cuban Convertible Peso'), ('CUP', 'Cuban Peso'), ('CYP', 'Cypriot Pound'), ('CZK', 'Czech Koruna'), ('CSK', 'Czechoslovak Hard Koruna'), ('DKK', 'Danish Krone'), ('DJF', 'Djiboutian Franc'), ('DOP', 'Dominican Peso'), ('NLG', 'Dutch Guilder'), ('XCD', 'East Caribbean Dollar'), ('DDM', 'East German Mark'), ('ECS', 'Ecuadorian Sucre'), ('ECV', 'Ecuadorian Unit of Constant Value'), ('EGP', 'Egyptian Pound'), ('GQE', 'Equatorial Guinean Ekwele'), ('ERN', 'Eritrean Nakfa'), ('EEK', 'Estonian Kroon'), ('ETB', 'Ethiopian Birr'), ('EUR', 'Euro'), ('XBA', 'European Composite Unit'), ('XEU', 'European Currency Unit'), ('XBB', 'European Monetary Unit'), ('XBC', 'European Unit of Account (XBC)'), ('XBD', 'European Unit of Account (XBD)'), ('FKP', 'Falkland Islands Pound'), ('FJD', 'Fijian Dollar'), ('FIM', 'Finnish Markka'), ('FRF', 'French Franc'), ('XFO', 'French Gold Franc'), ('XFU', 'French UIC-Franc'), ('GMD', 'Gambian Dalasi'), ('GEK', 'Georgian Kupon Larit'), ('GEL', 'Georgian Lari'), ('DEM', 'German Mark'), ('GHS', 'Ghanaian Cedi'), ('GHC', 'Ghanaian Cedi (1979–2007)'), ('GIP', 'Gibraltar Pound'), ('XAU', 'Gold'), ('GRD', 'Greek Drachma'), ('GTQ', 'Guatemalan Quetzal'), ('GWP', 'Guinea-Bissau Peso'), ('GNF', 'Guinean Franc'), ('GNS', 'Guinean Syli'), ('GYD', 'Guyanaese Dollar'), ('HTG', 'Haitian Gourde'), ('HNL', 'Honduran Lempira'), ('HKD', 'Hong Kong Dollar'), ('HUF', 'Hungarian Forint'), ('IMP', 'IMP'), ('ISK', 'Icelandic Króna'), ('ISJ', 'Icelandic Króna (1918–1981)'), ('INR', 'Indian Rupee'), ('IDR', 'Indonesian Rupiah'), ('IRR', 'Iranian Rial'), ('IQD', 'Iraqi Dinar'), ('IEP', 'Irish Pound'), ('ILS', 'Israeli New Shekel'), ('ILP', 'Israeli Pound'), ('ILR', 'Israeli Shekel (1980–1985)'), ('ITL', 'Italian Lira'), ('JMD', 'Jamaican Dollar'), ('JPY', 'Japanese Yen'), ('JOD', 'Jordanian Dinar'), ('KZT', 'Kazakhstani Tenge'), ('KES', 'Kenyan Shilling'), ('KWD', 'Kuwaiti Dinar'), ('KGS', 'Kyrgystani Som'), ('LAK', 'Laotian Kip'), ('LVL', 'Latvian Lats'), ('LVR', 'Latvian Ruble'), ('LBP', 'Lebanese Pound'), ('LSL', 'Lesotho Loti'), ('LRD', 'Liberian Dollar'), ('LYD', 'Libyan Dinar'), ('LTL', 'Lithuanian Litas'), ('LTT', 'Lithuanian Talonas'), ('LUL', 'Luxembourg Financial Franc'), ('LUC', 'Luxembourgian Convertible Franc'), ('LUF', 'Luxembourgian Franc'), ('MOP', 'Macanese Pataca'), ('MKD', 'Macedonian Denar'), ('MKN', 'Macedonian Denar (1992–1993)'), ('MGA', 'Malagasy Ariary'), ('MGF', 'Malagasy Franc'), ('MWK', 'Malawian Kwacha'), ('MYR', 'Malaysian Ringgit'), ('MVR', 'Maldivian Rufiyaa'), ('MVP', 'Maldivian Rupee (1947–1981)'), ('MLF', 'Malian Franc'), ('MTL', 'Maltese Lira'), ('MTP', 'Maltese Pound'), ('MRU', 'Mauritanian Ouguiya'), ('MRO', 'Mauritanian Ouguiya (1973–2017)'), ('MUR', 'Mauritian Rupee'), ('MXV', 'Mexican Investment Unit'), ('MXN', 'Mexican Peso'), ('MXP', 'Mexican Silver Peso (1861–1992)'), ('MDC', 'Moldovan Cupon'), ('MDL', 'Moldovan Leu'), ('MCF', 'Monegasque Franc'), ('MNT', 'Mongolian Tugrik'), ('MAD', 'Moroccan Dirham'), ('MAF', 'Moroccan Franc'), ('MZE', 'Mozambican Escudo'), ('MZN', 'Mozambican Metical'), ('MZM', 'Mozambican Metical (1980–2006)'), ('MMK', 'Myanmar Kyat'), ('NAD', 'Namibian Dollar'), ('NPR', 'Nepalese Rupee'), ('ANG', 'Netherlands Antillean Guilder'), ('TWD', 'New Taiwan Dollar'), ('NZD', 'New Zealand Dollar'), ('NIO', 'Nicaraguan Córdoba'), ('NIC', 'Nicaraguan Córdoba (1988–1991)'), ('NGN', 'Nigerian Naira'), ('KPW', 'North Korean Won'), ('NOK', 'Norwegian Krone'), ('OMR', 'Omani Rial'), ('PKR', 'Pakistani Rupee'), ('XPD', 'Palladium'), ('PAB', 'Panamanian Balboa'), ('PGK', 'Papua New Guinean Kina'), ('PYG', 'Paraguayan Guarani'), ('PEI', 'Peruvian Inti'), ('PEN', 'Peruvian Sol'), ('PES', 'Peruvian Sol (1863–1965)'), ('PHP', 'Philippine Piso'), ('XPT', 'Platinum'), ('PLN', 'Polish Zloty'), ('PLZ', 'Polish Zloty (1950–1995)'), ('PTE', 'Portuguese Escudo'), ('GWE', 'Portuguese Guinea Escudo'), ('QAR', 'Qatari Rial'), ('XRE', 'RINET Funds'), ('RHD', 'Rhodesian Dollar'), ('RON', 'Romanian Leu'), ('ROL', 'Romanian Leu (1952–2006)'), ('RUB', 'Russian Ruble'), ('RUR', 'Russian Ruble (1991–1998)'), ('RWF', 'Rwandan Franc'), ('SVC', 'Salvadoran Colón'), ('WST', 'Samoan Tala'), ('SAR', 'Saudi Riyal'), ('RSD', 'Serbian Dinar'), ('CSD', 'Serbian Dinar (2002–2006)'), ('SCR', 'Seychellois Rupee'), ('SLL', 'Sierra Leonean Leone'), ('XAG', 'Silver'), ('SGD', 'Singapore Dollar'), ('SKK', 'Slovak Koruna'), ('SIT', 'Slovenian Tolar'), ('SBD', 'Solomon Islands Dollar'), ('SOS', 'Somali Shilling'), ('ZAR', 'South African Rand'), ('ZAL', 'South African Rand (financial)'), ('KRH', 'South Korean Hwan (1953–1962)'), ('KRW', 'South Korean Won'), ('KRO', 'South Korean Won (1945–1953)'), ('SSP', 'South Sudanese Pound'), ('SUR', 'Soviet Rouble'), ('ESP', 'Spanish Peseta'), ('ESA', 'Spanish Peseta (A account)'), ('ESB', 'Spanish Peseta (convertible account)'), ('XDR', 'Special Drawing Rights'), ('LKR', 'Sri Lankan Rupee'), ('SHP', 'St. Helena Pound'), ('XSU', 'Sucre'), ('SDD', 'Sudanese Dinar (1992–2007)'), ('SDG', 'Sudanese Pound'), ('SDP', 'Sudanese Pound (1957–1998)'), ('SRD', 'Surinamese Dollar'), ('SRG', 'Surinamese Guilder'), ('SZL', 'Swazi Lilangeni'), ('SEK', 'Swedish Krona'), ('CHF', 'Swiss Franc'), ('SYP', 'Syrian Pound'), ('STN', 'São Tomé & Príncipe Dobra'), ('STD', 'São Tomé & Príncipe Dobra (1977–2017)'), ('TVD', 'TVD'), ('TJR', 'Tajikistani Ruble'), ('TJS', 'Tajikistani Somoni'), ('TZS', 'Tanzanian Shilling'), ('XTS', 'Testing Currency Code'), ('THB', 'Thai Baht'), ('XXX', 'The codes assigned for transactions where no currency is involved'), ('TPE', 'Timorese Escudo'), ('TOP', 'Tongan Paʻanga'), ('TTD', 'Trinidad & Tobago Dollar'), ('TND', 'Tunisian Dinar'), ('TRY', 'Turkish Lira'), ('TRL', 'Turkish Lira (1922–2005)'), ('TMT', 'Turkmenistani Manat'), ('TMM', 'Turkmenistani Manat (1993–2009)'), ('USD', 'US Dollar'), ('USN', 'US Dollar (Next day)'), ('USS', 'US Dollar (Same day)'), ('UGX', 'Ugandan Shilling'), ('UGS', 'Ugandan Shilling (1966–1987)'), ('UAH', 'Ukrainian Hryvnia'), ('UAK', 'Ukrainian Karbovanets'), ('AED', 'United Arab Emirates Dirham'), ('UYW', 'Uruguayan Nominal Wage Index Unit'), ('UYU', 'Uruguayan Peso'), ('UYP', 'Uruguayan Peso (1975–1993)'), ('UYI', 'Uruguayan Peso (Indexed Units)'), ('UZS', 'Uzbekistani Som'), ('VUV', 'Vanuatu Vatu'), ('VES', 'Venezuelan Bolívar'), ('VEB', 'Venezuelan Bolívar (1871–2008)'), ('VEF', 'Venezuelan Bolívar (2008–2018)'), ('VND', 'Vietnamese Dong'), ('VNN', 'Vietnamese Dong (1978–1985)'), ('CHE', 'WIR Euro'), ('CHW', 'WIR Franc'), ('XOF', 'West African CFA Franc'), ('YDD', 'Yemeni Dinar'), ('YER', 'Yemeni Rial'), ('YUN', 'Yugoslavian Convertible Dinar (1990–1992)'), ('YUD', 'Yugoslavian Hard Dinar (1966–1990)'), ('YUM', 'Yugoslavian New Dinar (1994–2002)'), ('YUR', 'Yugoslavian Reformed Dinar (1992–1993)'), ('ZWN', 'ZWN'), ('ZRN', 'Zairean New Zaire (1993–1998)'), ('ZRZ', 'Zairean Zaire (1971–1993)'), ('ZMW', 'Zambian Kwacha'), ('ZMK', 'Zambian Kwacha (1968–2012)'), ('ZWD', 'Zimbabwean Dollar (1980–2008)'), ('ZWR', 'Zimbabwean Dollar (2008)'), ('ZWL', 'Zimbabwean Dollar (2009)')], default='IQD', editable=False, max_length=3),
),
migrations.AlterField(
model_name='customer',
name='gifts',
field=models.IntegerField(editable=False, null=True),
),
migrations.AlterField(
model_name='customer',
name='wallet',
field=djmoney.models.fields.MoneyField(decimal_places=0, default_currency='IQD', max_digits=14, null=True),
),
migrations.AlterField(
model_name='order',
name='price',
field=djmoney.models.fields.MoneyField(decimal_places=0, default_currency='IQD', max_digits=14),
),
]
| 514.95 | 9,762 | 0.613312 |
aab312bbb68dba1597d1ea79afa0ecdbd4110f03 | 6,902 | py | Python | wsme/tests/test_restxml.py | Kjir/wsme | 0135b7dac67668815bf34f15894f05beb0c94faa | [
"MIT"
] | null | null | null | wsme/tests/test_restxml.py | Kjir/wsme | 0135b7dac67668815bf34f15894f05beb0c94faa | [
"MIT"
] | null | null | null | wsme/tests/test_restxml.py | Kjir/wsme | 0135b7dac67668815bf34f15894f05beb0c94faa | [
"MIT"
] | null | null | null | import decimal
import datetime
import base64
from six import u, b
import six
import wsme.tests.protocol
from wsme.utils import parse_isodatetime, parse_isodate, parse_isotime
from wsme.types import isarray, isdict, isusertype, register_type
from wsme.rest.xml import fromxml, toxml
try:
import xml.etree.ElementTree as et
except:
import cElementTree as et # noqa
def dumpxml(key, obj, datatype=None):
el = et.Element(key)
if isinstance(obj, tuple):
obj, datatype = obj
if isinstance(datatype, list):
for item in obj:
el.append(dumpxml('item', item, datatype[0]))
elif isinstance(datatype, dict):
key_type, value_type = list(datatype.items())[0]
for item in obj.items():
node = et.SubElement(el, 'item')
node.append(dumpxml('key', item[0], key_type))
node.append(dumpxml('value', item[1], value_type))
elif datatype == wsme.types.binary:
el.text = base64.encodestring(obj).decode('ascii')
elif isinstance(obj, wsme.types.bytes):
el.text = obj.decode('ascii')
elif isinstance(obj, wsme.types.text):
el.text = obj
elif type(obj) in (int, float, bool, decimal.Decimal):
el.text = six.text_type(obj)
elif type(obj) in (datetime.date, datetime.time, datetime.datetime):
el.text = obj.isoformat()
elif isinstance(obj, type(None)):
el.set('nil', 'true')
elif hasattr(datatype, '_wsme_attributes'):
for attr in datatype._wsme_attributes:
name = attr.name
if name not in obj:
continue
o = obj[name]
el.append(dumpxml(name, o, attr.datatype))
elif type(obj) == dict:
for name, value in obj.items():
el.append(dumpxml(name, value))
print(obj, datatype, et.tostring(el))
return el
def loadxml(el, datatype):
print (el, datatype, len(el))
if el.get('nil') == 'true':
return None
if isinstance(datatype, list):
return [loadxml(item, datatype[0]) for item in el.findall('item')]
elif isarray(datatype):
return [
loadxml(item, datatype.item_type) for item in el.findall('item')
]
elif isinstance(datatype, dict):
key_type, value_type = list(datatype.items())[0]
return dict((
(loadxml(item.find('key'), key_type),
loadxml(item.find('value'), value_type))
for item in el.findall('item')
))
elif isdict(datatype):
return dict((
(loadxml(item.find('key'), datatype.key_type),
loadxml(item.find('value'), datatype.value_type))
for item in el.findall('item')
))
elif isdict(datatype):
return dict((
(loadxml(item.find('key'), datatype.key_type),
loadxml(item.find('value'), datatype.value_type))
for item in el.findall('item')
))
elif len(el):
d = {}
for attr in datatype._wsme_attributes:
name = attr.name
child = el.find(name)
print (name, attr, child)
if child is not None:
d[name] = loadxml(child, attr.datatype)
print (d)
return d
else:
if datatype == wsme.types.binary:
return base64.decodestring(el.text.encode('ascii'))
if isusertype(datatype):
datatype = datatype.basetype
if datatype == datetime.date:
return parse_isodate(el.text)
if datatype == datetime.time:
return parse_isotime(el.text)
if datatype == datetime.datetime:
return parse_isodatetime(el.text)
if datatype == wsme.types.text:
return datatype(el.text if el.text else u(''))
if datatype == bool:
return el.text.lower() != 'false'
if datatype is None:
return el.text
if datatype is wsme.types.bytes:
return el.text.encode('ascii')
return datatype(el.text)
class TestRestXML(wsme.tests.protocol.RestOnlyProtocolTestCase):
protocol = 'restxml'
def call(self, fpath, _rt=None, _accept=None, _no_result_decode=False,
body=None, **kw):
if body:
el = dumpxml('body', body)
else:
el = dumpxml('parameters', kw)
content = et.tostring(el)
headers = {
'Content-Type': 'text/xml',
}
if _accept is not None:
headers['Accept'] = _accept
res = self.app.post(
'/' + fpath,
content,
headers=headers,
expect_errors=True)
print ("Received:", res.body)
if _no_result_decode:
return res
el = et.fromstring(res.body)
if el.tag == 'error':
raise wsme.tests.protocol.CallException(
el.find('faultcode').text,
el.find('faultstring').text,
el.find('debuginfo') is not None and
el.find('debuginfo').text or None
)
else:
return loadxml(et.fromstring(res.body), _rt)
def test_encode_sample_value(self):
class MyType(object):
aint = int
atext = wsme.types.text
register_type(MyType)
value = MyType()
value.aint = 5
value.atext = u('test')
language, sample = wsme.rest.xml.encode_sample_value(
MyType, value, True)
print (language, sample)
assert language == 'xml'
assert sample == b("""<value>
<aint>5</aint>
<atext>test</atext>
</value>""")
def test_encode_sample_params(self):
lang, content = wsme.rest.xml.encode_sample_params(
[('a', int, 2)], True)
assert lang == 'xml', lang
assert content == b('<parameters>\n <a>2</a>\n</parameters>'), content
def test_encode_sample_result(self):
lang, content = wsme.rest.xml.encode_sample_result(int, 2, True)
assert lang == 'xml', lang
assert content == b('<result>2</result>'), content
def test_nil_fromxml(self):
for dt in (
str, [int], {int: str}, bool,
datetime.date, datetime.time, datetime.datetime):
e = et.Element('value', nil='true')
assert fromxml(dt, e) is None
def test_nil_toxml(self):
for dt in (
wsme.types.bytes,
[int], {int: str}, bool,
datetime.date, datetime.time, datetime.datetime):
x = et.tostring(toxml(dt, 'value', None))
assert x == b('<value nil="true" />'), x
def test_unset_attrs(self):
class AType(object):
someattr = wsme.types.bytes
wsme.types.register_type(AType)
x = et.tostring(toxml(AType, 'value', AType()))
assert x == b('<value />'), x
| 32.556604 | 79 | 0.565923 |
4ca1af9c0083b4402e666ef57d901e82af7e7de1 | 2,544 | py | Python | main_app/migrations/0001_initial.py | SUMEKAGARWAL/Rescue | 98972f782846a2e82804dd35d371fb9799b8471d | [
"MIT"
] | null | null | null | main_app/migrations/0001_initial.py | SUMEKAGARWAL/Rescue | 98972f782846a2e82804dd35d371fb9799b8471d | [
"MIT"
] | null | null | null | main_app/migrations/0001_initial.py | SUMEKAGARWAL/Rescue | 98972f782846a2e82804dd35d371fb9799b8471d | [
"MIT"
] | null | null | null | # Generated by Django 3.0.8 on 2020-11-07 06:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="contact",
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.EmailField(blank= True,max_length=254)),
('relation', models.CharField(choices=[('Father', 'Father'), ('Mother', 'Mother'), ('Brother', 'Brother'), ('Sister', 'Sister'), ('Husband', 'Husband'), ('Friend', 'Friend'), ('Relative', 'Relative'), ('Other', 'Other')], default='Other', max_length=10)),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='contact', to=settings.AUTH_USER_MODEL)),
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=100)),
("email", models.EmailField(max_length=254)),
(
"relation",
models.CharField(
choices=[
("Father", "Father"),
("Mother", "Mother"),
("Brother", "Brother"),
("Sister", "Sister"),
("Husband", "Husband"),
("Friend", "Friend"),
("Relative", "Relative"),
("Other", "Other"),
],
default="Other",
max_length=10,
),
),
(
"user",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="contact",
to=settings.AUTH_USER_MODEL,
),
),
],
),
]
| 39.138462 | 271 | 0.434748 |
151b6df07659af4118256ba996e4e21df3b97605 | 1,349 | py | Python | 100-199/180-189/189.py | dcragusa/LeetCode | 01c30de0832b378a1b054d80d1ea1d3f09a2abd3 | [
"MIT"
] | null | null | null | 100-199/180-189/189.py | dcragusa/LeetCode | 01c30de0832b378a1b054d80d1ea1d3f09a2abd3 | [
"MIT"
] | null | null | null | 100-199/180-189/189.py | dcragusa/LeetCode | 01c30de0832b378a1b054d80d1ea1d3f09a2abd3 | [
"MIT"
] | null | null | null | """
Given an array, rotate the array to the right by k steps, where k is non-negative.
Example 1:
Input: nums = [1, 2, 3, 4, 5, 6, 7], k = 3, Output: [5, 6, 7, 1, 2, 3, 4]
Explanation:
rotate 1 steps to the right: [7, 1, 2, 3, 4, 5, 6]
rotate 2 steps to the right: [6, 7, 1, 2, 3, 4, 5]
rotate 3 steps to the right: [5, 6, 7, 1, 2, 3, 4]
Example 2:
Input: nums = [-1, -100, 3, 99], k = 2, Output: [3, 99, -1, -100]
Explanation:
rotate 1 steps to the right: [99, -1, -100, 3]
rotate 2 steps to the right: [3, 99, -1, -100]
"""
"""
Our first instinct might be to repeatedly pop items from the end of the list and insert them at the front, but this is
horrendously inefficient as inserts to the front are O(n) due to having to move the rest of the list in memory. Much
better is identifying the front part of the list, extending the list with a copy of that part, then deleting the entire
front part in one operation.
"""
# def rotate(nums, k):
# k %= len(nums)
# for _ in range(k):
# nums.insert(0, nums.pop())
def rotate(nums, k):
k = (len(nums) - k) % len(nums)
nums.extend(nums[:k])
del nums[:k]
nums = [1, 2, 3, 4, 5, 6, 7]
rotate(nums, 3)
assert nums == [5, 6, 7, 1, 2, 3, 4]
nums = [-1, -100, 3, 99]
rotate(nums, 2)
assert nums == [3, 99, -1, -100]
nums = [1, 2]
rotate(nums, 3)
assert nums == [2, 1]
| 27.530612 | 119 | 0.611564 |
b5061e6a58c17ca56e2671e8968df93b49ebdc93 | 16,791 | py | Python | site/flask/lib/python2.7/site-packages/whoosh/lang/isri.py | theholyhades1/tartanHacks2015 | a801b473f21cfbd136e2a5a74423e8c72d14f900 | [
"MIT"
] | 319 | 2016-09-22T15:54:48.000Z | 2022-03-18T02:36:58.000Z | site/flask/lib/python2.7/site-packages/whoosh/lang/isri.py | theholyhades1/tartanHacks2015 | a801b473f21cfbd136e2a5a74423e8c72d14f900 | [
"MIT"
] | 27 | 2017-04-01T15:06:36.000Z | 2021-02-08T20:19:58.000Z | site/flask/lib/python2.7/site-packages/whoosh/lang/isri.py | theholyhades1/tartanHacks2015 | a801b473f21cfbd136e2a5a74423e8c72d14f900 | [
"MIT"
] | 27 | 2016-10-06T16:05:32.000Z | 2022-03-18T02:37:00.000Z | # -*- coding: utf-8 -*-
#
# Natural Language Toolkit: The ISRI Arabic Stemmer
#
# Copyright (C) 2001-2012 NLTK Proejct
# Algorithm: Kazem Taghva, Rania Elkhoury, and Jeffrey Coombs (2005)
# Author: Hosam Algasaier <hosam_hme@yahoo.com>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
ISRI Arabic Stemmer
The algorithm for this stemmer is described in:
Taghva, K., Elkoury, R., and Coombs, J. 2005. Arabic Stemming without a root
dictionary. Information Science Research Institute. University of Nevada, Las
Vegas, USA.
The Information Science Research Institute’s (ISRI) Arabic stemmer shares many
features with the Khoja stemmer. However, the main difference is that ISRI
stemmer does not use root dictionary. Also, if a root is not found, ISRI
stemmer returned normalized form, rather than returning the original
unmodified word.
Additional adjustments were made to improve the algorithm:
1- Adding 60 stop words.
2- Adding the pattern (تفاعيل) to ISRI pattern set.
3- The step 2 in the original algorithm was normalizing all hamza. This step is
discarded because it increases the word ambiguities and changes the original
root.
"""
from __future__ import unicode_literals
import re
class ISRIStemmer(object):
'''
ISRI Arabic stemmer based on algorithm: Arabic Stemming without a root dictionary.
Information Science Research Institute. University of Nevada, Las Vegas, USA.
A few minor modifications have been made to ISRI basic algorithm.
See the source code of this module for more information.
isri.stem(token) returns Arabic root for the given token.
The ISRI Stemmer requires that all tokens have Unicode string types.
If you use Python IDLE on Arabic Windows you have to decode text first
using Arabic '1256' coding.
'''
def __init__(self):
self.stm = 'defult none'
self.p3 = ['\u0643\u0627\u0644', '\u0628\u0627\u0644',
'\u0648\u0644\u0644', '\u0648\u0627\u0644'] # length three prefixes
self.p2 = ['\u0627\u0644', '\u0644\u0644'] # length two prefixes
self.p1 = ['\u0644', '\u0628', '\u0641', '\u0633', '\u0648',
'\u064a', '\u062a', '\u0646', '\u0627'] # length one prefixes
self.s3 = ['\u062a\u0645\u0644', '\u0647\u0645\u0644',
'\u062a\u0627\u0646', '\u062a\u064a\u0646',
'\u0643\u0645\u0644'] # length three suffixes
self.s2 = ['\u0648\u0646', '\u0627\u062a', '\u0627\u0646',
'\u064a\u0646', '\u062a\u0646', '\u0643\u0645',
'\u0647\u0646', '\u0646\u0627', '\u064a\u0627',
'\u0647\u0627', '\u062a\u0645', '\u0643\u0646',
'\u0646\u064a', '\u0648\u0627', '\u0645\u0627',
'\u0647\u0645'] # length two suffixes
self.s1 = ['\u0629', '\u0647', '\u064a', '\u0643', '\u062a',
'\u0627', '\u0646'] # length one suffixes
self.pr4 = {0: ['\u0645'], 1:['\u0627'],
2: ['\u0627', '\u0648', '\u064A'], 3:['\u0629']} # groups of length four patterns
self.pr53 = {0: ['\u0627', '\u062a'],
1: ['\u0627', '\u064a', '\u0648'],
2: ['\u0627', '\u062a', '\u0645'],
3: ['\u0645', '\u064a', '\u062a'],
4: ['\u0645', '\u062a'],
5: ['\u0627', '\u0648'],
6: ['\u0627', '\u0645']} # Groups of length five patterns and length three roots
self.re_short_vowels = re.compile(r'[\u064B-\u0652]')
self.re_hamza = re.compile(r'[\u0621\u0624\u0626]')
self.re_intial_hamza = re.compile(r'^[\u0622\u0623\u0625]')
self.stop_words = ['\u064a\u0643\u0648\u0646',
'\u0648\u0644\u064a\u0633',
'\u0648\u0643\u0627\u0646',
'\u0643\u0630\u0644\u0643',
'\u0627\u0644\u062a\u064a',
'\u0648\u0628\u064a\u0646',
'\u0639\u0644\u064a\u0647\u0627',
'\u0645\u0633\u0627\u0621',
'\u0627\u0644\u0630\u064a',
'\u0648\u0643\u0627\u0646\u062a',
'\u0648\u0644\u0643\u0646',
'\u0648\u0627\u0644\u062a\u064a',
'\u062a\u0643\u0648\u0646',
'\u0627\u0644\u064a\u0648\u0645',
'\u0627\u0644\u0644\u0630\u064a\u0646',
'\u0639\u0644\u064a\u0647',
'\u0643\u0627\u0646\u062a',
'\u0644\u0630\u0644\u0643',
'\u0623\u0645\u0627\u0645',
'\u0647\u0646\u0627\u0643',
'\u0645\u0646\u0647\u0627',
'\u0645\u0627\u0632\u0627\u0644',
'\u0644\u0627\u0632\u0627\u0644',
'\u0644\u0627\u064a\u0632\u0627\u0644',
'\u0645\u0627\u064a\u0632\u0627\u0644',
'\u0627\u0635\u0628\u062d',
'\u0623\u0635\u0628\u062d',
'\u0623\u0645\u0633\u0649',
'\u0627\u0645\u0633\u0649',
'\u0623\u0636\u062d\u0649',
'\u0627\u0636\u062d\u0649',
'\u0645\u0627\u0628\u0631\u062d',
'\u0645\u0627\u0641\u062a\u0626',
'\u0645\u0627\u0627\u0646\u0641\u0643',
'\u0644\u0627\u0633\u064a\u0645\u0627',
'\u0648\u0644\u0627\u064a\u0632\u0627\u0644',
'\u0627\u0644\u062d\u0627\u0644\u064a',
'\u0627\u0644\u064a\u0647\u0627',
'\u0627\u0644\u0630\u064a\u0646',
'\u0641\u0627\u0646\u0647',
'\u0648\u0627\u0644\u0630\u064a',
'\u0648\u0647\u0630\u0627',
'\u0644\u0647\u0630\u0627',
'\u0641\u0643\u0627\u0646',
'\u0633\u062a\u0643\u0648\u0646',
'\u0627\u0644\u064a\u0647',
'\u064a\u0645\u0643\u0646',
'\u0628\u0647\u0630\u0627',
'\u0627\u0644\u0630\u0649']
def stem(self, token):
"""
Stemming a word token using the ISRI stemmer.
"""
self.stm = token
self.norm(1) # remove diacritics which representing Arabic short vowels
if self.stm in self.stop_words: return self.stm # exclude stop words from being processed
self.pre32() # remove length three and length two prefixes in this order
self.suf32() # remove length three and length two suffixes in this order
self.waw() # remove connective ‘و’ if it precedes a word beginning with ‘و’
self.norm(2) # normalize initial hamza to bare alif
if len(self.stm) <= 3: return self.stm # return stem if less than or equal to three
if len(self.stm) == 4: # length 4 word
self.pro_w4()
return self.stm
elif len(self.stm) == 5: # length 5 word
self.pro_w53()
self.end_w5()
return self.stm
elif len(self.stm) == 6: # length 6 word
self.pro_w6()
self.end_w6()
return self.stm
elif len(self.stm) == 7: # length 7 word
self.suf1()
if len(self.stm) == 7:
self.pre1()
if len(self.stm) == 6:
self.pro_w6()
self.end_w6()
return self.stm
return self.stm # if word length >7 , then no stemming
def norm(self, num):
"""
normalization:
num=1 normalize diacritics
num=2 normalize initial hamza
num=3 both 1&2
"""
self.k = num
if self.k == 1:
self.stm = self.re_short_vowels.sub('', self.stm)
return self.stm
elif self.k == 2:
self.stm = self.re_intial_hamza.sub(r'\u0627', self.stm)
return self.stm
elif self.k == 3:
self.stm = self.re_short_vowels.sub('', self.stm)
self.stm = self.re_intial_hamza.sub(r'\u0627', self.stm)
return self.stm
def pre32(self):
"""remove length three and length two prefixes in this order"""
if len(self.stm) >= 6:
for pre3 in self.p3:
if self.stm.startswith(pre3):
self.stm = self.stm[3:]
return self.stm
elif len(self.stm) >= 5:
for pre2 in self.p2:
if self.stm.startswith(pre2):
self.stm = self.stm[2:]
return self.stm
def suf32(self):
"""remove length three and length two suffixes in this order"""
if len(self.stm) >= 6:
for suf3 in self.s3:
if self.stm.endswith(suf3):
self.stm = self.stm[:-3]
return self.stm
elif len(self.stm) >= 5:
for suf2 in self.s2:
if self.stm.endswith(suf2):
self.stm = self.stm[:-2]
return self.stm
def waw(self):
"""remove connective ‘و’ if it precedes a word beginning with ‘و’ """
if (len(self.stm) >= 4) & (self.stm[:2] == '\u0648\u0648'):
self.stm = self.stm[1:]
return self.stm
def pro_w4(self):
"""process length four patterns and extract length three roots"""
if self.stm[0] in self.pr4[0]: # مفعل
self.stm = self.stm[1:]
return self.stm
elif self.stm[1] in self.pr4[1]: # فاعل
self.stm = self.stm[0] + self.stm[2:]
return self.stm
elif self.stm[2] in self.pr4[2]: # فعال - فعول - فعيل
self.stm = self.stm[:2] + self.stm[3]
return self.stm
elif self.stm[3] in self.pr4[3]: # فعلة
self.stm = self.stm[:-1]
return self.stm
else:
self.suf1() # do - normalize short sufix
if len(self.stm) == 4:
self.pre1() # do - normalize short prefix
return self.stm
def pro_w53(self):
"""process length five patterns and extract length three roots"""
if ((self.stm[2] in self.pr53[0]) & (self.stm[0] == '\u0627')): # افتعل - افاعل
self.stm = self.stm[1] + self.stm[3:]
return self.stm
elif ((self.stm[3] in self.pr53[1]) & (self.stm[0] == '\u0645')): # مفعول - مفعال - مفعيل
self.stm = self.stm[1:3] + self.stm[4]
return self.stm
elif ((self.stm[0] in self.pr53[2]) & (self.stm[4] == '\u0629')): # مفعلة - تفعلة - افعلة
self.stm = self.stm[1:4]
return self.stm
elif ((self.stm[0] in self.pr53[3]) & (self.stm[2] == '\u062a')): # مفتعل - يفتعل - تفتعل
self.stm = self.stm[1] + self.stm[3:]
return self.stm
elif ((self.stm[0] in self.pr53[4]) & (self.stm[2] == '\u0627')): #مفاعل - تفاعل
self.stm = self.stm[1] + self.stm[3:]
return self.stm
elif ((self.stm[2] in self.pr53[5]) & (self.stm[4] == '\u0629')): # فعولة - فعالة
self.stm = self.stm[:2] + self.stm[3]
return self.stm
elif ((self.stm[0] in self.pr53[6]) & (self.stm[1] == '\u0646')): # انفعل - منفعل
self.stm = self.stm[2:]
return self.stm
elif ((self.stm[3] == '\u0627') & (self.stm[0] == '\u0627')): # افعال
self.stm = self.stm[1:3] + self.stm[4]
return self.stm
elif ((self.stm[4] == '\u0646') & (self.stm[3] == '\u0627')): # فعلان
self.stm = self.stm[:3]
return self.stm
elif ((self.stm[3] == '\u064a') & (self.stm[0] == '\u062a')): # تفعيل
self.stm = self.stm[1:3] + self.stm[4]
return self.stm
elif ((self.stm[3] == '\u0648') & (self.stm[1] == '\u0627')): # فاعول
self.stm = self.stm[0] + self.stm[2] + self.stm[4]
return self.stm
elif ((self.stm[2] == '\u0627') & (self.stm[1] == '\u0648')): # فواعل
self.stm = self.stm[0] + self.stm[3:]
return self.stm
elif ((self.stm[3] == '\u0626') & (self.stm[2] == '\u0627')): # فعائل
self.stm = self.stm[:2] + self.stm[4]
return self.stm
elif ((self.stm[4] == '\u0629') & (self.stm[1] == '\u0627')): # فاعلة
self.stm = self.stm[0] + self.stm[2:4]
return self.stm
elif ((self.stm[4] == '\u064a') & (self.stm[2] == '\u0627')): # فعالي
self.stm = self.stm[:2] + self.stm[3]
return self.stm
else:
self.suf1() # do - normalize short sufix
if len(self.stm) == 5:
self.pre1() # do - normalize short prefix
return self.stm
def pro_w54(self):
"""process length five patterns and extract length four roots"""
if (self.stm[0] in self.pr53[2]): #تفعلل - افعلل - مفعلل
self.stm = self.stm[1:]
return self.stm
elif (self.stm[4] == '\u0629'): # فعللة
self.stm = self.stm[:4]
return self.stm
elif (self.stm[2] == '\u0627'): # فعالل
self.stm = self.stm[:2] + self.stm[3:]
return self.stm
def end_w5(self):
"""ending step (word of length five)"""
if len(self.stm) == 3:
return self.stm
elif len(self.stm) == 4:
self.pro_w4()
return self.stm
elif len(self.stm) == 5:
self.pro_w54()
return self.stm
def pro_w6(self):
"""process length six patterns and extract length three roots"""
if ((self.stm.startswith('\u0627\u0633\u062a')) or (self.stm.startswith('\u0645\u0633\u062a'))): # مستفعل - استفعل
self.stm = self.stm[3:]
return self.stm
elif (self.stm[0] == '\u0645' and self.stm[3] == '\u0627' and self.stm[5] == '\u0629'): # مفعالة
self.stm = self.stm[1:3] + self.stm[4]
return self.stm
elif (self.stm[0] == '\u0627' and self.stm[2] == '\u062a' and self.stm[4] == '\u0627'): # افتعال
self.stm = self.stm[1] + self.stm[3] + self.stm[5]
return self.stm
elif (self.stm[0] == '\u0627' and self.stm[3] == '\u0648' and self.stm[2] == self.stm[4]): # افعوعل
self.stm = self.stm[1] + self.stm[4:]
return self.stm
elif (self.stm[0] == '\u062a' and self.stm[2] == '\u0627' and self.stm[4] == '\u064a'): # تفاعيل new pattern
self.stm = self.stm[1] + self.stm[3] + self.stm[5]
return self.stm
else:
self.suf1() # do - normalize short sufix
if len(self.stm) == 6:
self.pre1() # do - normalize short prefix
return self.stm
def pro_w64(self):
"""process length six patterns and extract length four roots"""
if (self.stm[0] and self.stm[4]) == '\u0627': # افعلال
self.stm = self.stm[1:4] + self.stm[5]
return self.stm
elif (self.stm.startswith('\u0645\u062a')): # متفعلل
self.stm = self.stm[2:]
return self.stm
def end_w6(self):
"""ending step (word of length six)"""
if len(self.stm) == 3:
return self.stm
elif len(self.stm) == 5:
self.pro_w53()
self.end_w5()
return self.stm
elif len (self.stm) == 6:
self.pro_w64()
return self.stm
def suf1(self):
"""normalize short sufix"""
for sf1 in self.s1:
if self.stm.endswith(sf1):
self.stm = self.stm[:-1]
return self.stm
def pre1(self):
"""normalize short prefix"""
for sp1 in self.p1:
if self.stm.startswith(sp1):
self.stm = self.stm[1:]
return self.stm
| 43.840731 | 131 | 0.494968 |
30decdb588b901c970be0003cf8e42d9c9212676 | 230 | py | Python | sensu_plugin/compat.py | sufiyanghori/sensu-plugin-python | 6682163a2a2219e8132b4c9e1dd53663fa477ae5 | [
"MIT"
] | 35 | 2015-01-11T13:34:32.000Z | 2017-04-28T11:20:02.000Z | sensu_plugin/compat.py | sufiyanghori/sensu-plugin-python | 6682163a2a2219e8132b4c9e1dd53663fa477ae5 | [
"MIT"
] | 42 | 2017-10-02T12:05:15.000Z | 2021-03-22T21:20:54.000Z | sensu_plugin/compat.py | sufiyanghori/sensu-plugin-python | 6682163a2a2219e8132b4c9e1dd53663fa477ae5 | [
"MIT"
] | 14 | 2017-10-02T08:51:44.000Z | 2022-02-12T16:36:55.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=undefined-variable
"""
Python 2/3 compatibility code.
"""
try:
compat_basestring = basestring
except NameError: # Python 3
compat_basestring = (bytes, str)
| 19.166667 | 36 | 0.682609 |
041a341237c0f511dd15454bf4c1ef6c4e98d05c | 2,400 | py | Python | one_crm/users/tests/test_views.py | lianzhanshu/one_crm | 7320c38416dd05fa95400ef4f5d50b0a35125c33 | [
"MIT"
] | 4 | 2020-08-09T08:47:35.000Z | 2021-12-16T01:27:56.000Z | one_crm/users/tests/test_views.py | flingjie/one_crm | 7320c38416dd05fa95400ef4f5d50b0a35125c33 | [
"MIT"
] | null | null | null | one_crm/users/tests/test_views.py | flingjie/one_crm | 7320c38416dd05fa95400ef4f5d50b0a35125c33 | [
"MIT"
] | 4 | 2020-08-09T08:43:01.000Z | 2021-09-29T11:45:33.000Z | import pytest
from django.contrib.auth.models import AnonymousUser
from django.http.response import Http404
from django.test import RequestFactory
from one_crm.users.models import User
from one_crm.users.tests.factories import UserFactory
from one_crm.users.views import ( # isort:skip
UserRedirectView,
UserUpdateView,
user_detail_view,
)
pytestmark = pytest.mark.django_db
class TestUserUpdateView:
"""
TODO:
extracting view initialization code as class-scoped fixture
would be great if only pytest-django supported non-function-scoped
fixture db access -- this is a work-in-progress for now:
https://github.com/pytest-dev/pytest-django/pull/258
"""
def test_get_success_url(self, user: User, rf: RequestFactory):
view = UserUpdateView()
request = rf.get("/fake-url/")
request.user = user
view.request = request
assert view.get_success_url() == f"/users/{user.username}/"
def test_get_object(self, user: User, rf: RequestFactory):
view = UserUpdateView()
request = rf.get("/fake-url/")
request.user = user
view.request = request
assert view.get_object() == user
class TestUserRedirectView:
def test_get_redirect_url(self, user: User, rf: RequestFactory):
view = UserRedirectView()
request = rf.get("/fake-url")
request.user = user
view.request = request
assert view.get_redirect_url() == f"/users/{user.username}/"
class TestUserDetailView:
def test_authenticated(self, user: User, rf: RequestFactory):
request = rf.get("/fake-url/")
request.user = UserFactory()
response = user_detail_view(request, username=user.username)
assert response.status_code == 200
def test_not_authenticated(self, user: User, rf: RequestFactory):
request = rf.get("/fake-url/")
request.user = AnonymousUser() # type: ignore
response = user_detail_view(request, username=user.username)
assert response.status_code == 302
assert response.url == "/accounts/login/?next=/fake-url/"
def test_case_sensitivity(self, rf: RequestFactory):
request = rf.get("/fake-url/")
request.user = UserFactory(username="UserName")
with pytest.raises(Http404):
user_detail_view(request, username="username")
| 29.62963 | 74 | 0.6725 |
0d1c9e41c2abba66cecc17ba27fb2a124ac892cf | 3,923 | py | Python | app.py | striker43/storjWidget-exporter | e4bb98580b3d547ed4e0f7c6523e20ee584c7b41 | [
"Apache-2.0"
] | 4 | 2020-11-25T15:14:56.000Z | 2021-09-20T07:20:41.000Z | app.py | striker43/storjWidget-exporter | e4bb98580b3d547ed4e0f7c6523e20ee584c7b41 | [
"Apache-2.0"
] | null | null | null | app.py | striker43/storjWidget-exporter | e4bb98580b3d547ed4e0f7c6523e20ee584c7b41 | [
"Apache-2.0"
] | 1 | 2020-11-25T03:43:21.000Z | 2020-11-25T03:43:21.000Z | from flask import Flask
import requests
import os
import time
import datetime
import json
from datetime import date
from requests.adapters import HTTPAdapter
app = Flask(__name__)
persistencePath = '/var/www/storjWidgetVolume/payoutData.txt'
nodes = os.environ.get('NODES_LIST', '').split(',')
payoutData = {}
payoutData['day'] = None
try:
with open(persistencePath) as json_file:
payoutData = json.load(json_file)
except OSError:
print("ERROR: Could not read " + persistencePath)
def getStringWithUnit(value):
if(value < 1000):
return str("{:.2f}".format(value)) + ' MB'
else:
return str("{:.2f}".format(value/1000)) + ' GB'
def addUnits(data):
data['ingress'] = getStringWithUnit(data['ingress'])
data['egress'] = getStringWithUnit(data['egress'])
return data
def getRelevantDay(satellitesResponse):
numberOfDays = len(satellitesResponse['bandwidthDaily'])
relevantDay = None
for i in range(0, numberOfDays):
if(satellitesResponse['bandwidthDaily'][i]['intervalStart'].split('T')[0] == str(date.today())):
relevantDay = i
return relevantDay
def getBandwidthData(satellitesResponse, data):
relevantDay = getRelevantDay(satellitesResponse)
ingress = (satellitesResponse['bandwidthDaily'][relevantDay]['ingress']['usage'] + satellitesResponse['bandwidthDaily'][relevantDay]['ingress']['repair'])/1000000
egress = (satellitesResponse['bandwidthDaily'][relevantDay]['egress']['usage'] + satellitesResponse['bandwidthDaily'][relevantDay]['egress']['repair'] + satellitesResponse['bandwidthDaily'][relevantDay]['egress']['audit'])/1000000
data['ingress'] += ingress
data['egress'] += egress
return data
def getPayoutEstimationMonth(payoutResponse, data):
data['estimatedPayoutTotal'] += payoutResponse['currentMonth']['egressBandwidthPayout'] + payoutResponse['currentMonth']['egressRepairAuditPayout'] + payoutResponse['currentMonth']['diskSpacePayout']
return data
def getPayoutEstimationToday(data):
actualDay = str(date.today())
if(payoutData['day'] != actualDay):
payoutData[actualDay] = data['estimatedPayoutTotal']
payoutData['day'] = actualDay
with open(persistencePath, 'w') as outfile:
json.dump(payoutData, outfile)
print(payoutData)
print(data)
data['estimatedPayoutToday'] = (data['estimatedPayoutTotal'] - payoutData[actualDay])
return data
def getSpaceInfo(snoResponse, data):
data['spaceUsed'] += snoResponse['diskSpace']['used']/1000000000000
data['spaceAvailable'] += snoResponse['diskSpace']['available']/1000000000000
return data
def httpRequest(ipWithPort, path):
try:
response = requests.get('http://' + ipWithPort + '/api/' + path, timeout=5)
return response.json()
except requests.exceptions.Timeout:
return None
except requests.exceptions.ConnectionError:
return None
@app.route('/bandwidth')
def get_data():
data = {}
data['ingress'] = 0
data['egress'] = 0
data['estimatedPayoutTotal'] = 0.0
data['estimatedPayoutToday'] = 0.0
data['spaceUsed'] = 0.0
data['spaceAvailable'] = 0.0
data['totalNodesCount'] = len(nodes)
data['nodesOnline'] = len(nodes)
for ip in nodes:
snoResponse = httpRequest(ip, 'sno')
if(snoResponse != None):
satellitesResponse = httpRequest(ip, 'sno/satellites')
payoutResponse = httpRequest(ip, 'sno/estimated-payout')
getBandwidthData(satellitesResponse, data)
getPayoutEstimationMonth(payoutResponse, data)
getSpaceInfo(snoResponse, data)
else:
data['nodesOnline'] -= 1
getPayoutEstimationToday(data)
data['estimatedPayoutTotal'] = float("{:.2f}".format(data['estimatedPayoutTotal']/100))
data['estimatedPayoutToday'] = float("{:.2f}".format(data['estimatedPayoutToday']/100))
data['spaceUsed'] = float("{:.2f}".format(data['spaceUsed']))
data['spaceAvailable'] = float("{:.2f}".format(data['spaceAvailable']))
return json.dumps(addUnits(data)) | 33.818966 | 232 | 0.714249 |
d4aafb7f5cb6a5981703c8eabc8dde0dceba0586 | 414 | py | Python | cwl_airflow/wes/server.py | silviu001/cwl-airflow | df45fc173ada83d94df94bc861777d9f6687b99a | [
"Apache-2.0"
] | null | null | null | cwl_airflow/wes/server.py | silviu001/cwl-airflow | df45fc173ada83d94df94bc861777d9f6687b99a | [
"Apache-2.0"
] | null | null | null | cwl_airflow/wes/server.py | silviu001/cwl-airflow | df45fc173ada83d94df94bc861777d9f6687b99a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import connexion
from connexion.resolver import Resolver
from cwl_airflow.wes.backend import CWLAirflowBackend
def run_wes_server(args):
app = connexion.App(__name__)
backend = CWLAirflowBackend()
def rs(x):
return getattr(backend, x.split('.')[-1])
app.add_api('openapi/swagger_configuration.yaml', resolver=Resolver(rs))
app.run(port=args.port, host=args.host) | 31.846154 | 76 | 0.7343 |
cb153eb4c99e876cf184d972e1033f9a7f098956 | 745 | py | Python | Encryption/Encryption.py | saurav0001kumar/HackerRank | 38daaaf1e3f7b230ca70005480fa2f3e2c7a12be | [
"MIT"
] | 1 | 2020-07-03T02:07:30.000Z | 2020-07-03T02:07:30.000Z | Encryption/Encryption.py | saurav0001kumar/HackerRank | 38daaaf1e3f7b230ca70005480fa2f3e2c7a12be | [
"MIT"
] | null | null | null | Encryption/Encryption.py | saurav0001kumar/HackerRank | 38daaaf1e3f7b230ca70005480fa2f3e2c7a12be | [
"MIT"
] | null | null | null | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the encryption function below.
def encryption(s):
res=[]
l=list(s)
l=list(filter(lambda x: x!=" " ,l))
s=''.join(l)
le=len(s)
row=math.sqrt(le)
row=int(row)
if row**2 == le:
row=int(row)
col=row
else:
col=row+1
row=int(row)
for i in range(col):
t=""
for j in range(i,le,col):
t=t+s[j]
res.append(t)
res1=" ".join(res)
return(res1)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
s = input()
result = encryption(s)
fptr.write(result + '\n')
fptr.close()
| 18.170732 | 48 | 0.500671 |
fe6e235a19e18ce11287af6f2b7e4213dd073f8e | 4,303 | py | Python | schemas/parquet_output_cluster_info_test.py | epapbak/insights-data-schemas | 00eb5eba786a21fa82693633d3c9d1eee32130d8 | [
"Apache-2.0"
] | null | null | null | schemas/parquet_output_cluster_info_test.py | epapbak/insights-data-schemas | 00eb5eba786a21fa82693633d3c9d1eee32130d8 | [
"Apache-2.0"
] | null | null | null | schemas/parquet_output_cluster_info_test.py | epapbak/insights-data-schemas | 00eb5eba786a21fa82693633d3c9d1eee32130d8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# vim: set fileencoding=utf-8
# Copyright © 2021 Pavel Tisnovsky
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for parquet_output_cluster_info module."""
import pytest
import sys
from datetime import datetime
from voluptuous import Invalid
from parquet_output_cluster_info import schema, main
from common import validate
@pytest.fixture
def validation_schema():
"""Provide standard schema to check messages."""
return schema
# verbosity level
verbose = (True, False)
# attributes
attribute = (
"cluster_id",
"cluster_version",
"platform",
"collected_at",
"desired_version",
"archive_path",
"initial_version"
)
@pytest.fixture
def correct_message():
"""Provide correct message to be tested."""
return {
"cluster_id": b"123e4567-e89b-12d3-a456-426614174000",
"cluster_version": b"1.2.3",
"platform": b"AWS",
"collected_at": datetime.now(),
"desired_version": b"2.3.4",
"archive_path":
b"archives/compressed/00/00000000-0000-0000-0000-000000000000/202102/08/002219.tar.gz",
"initial_version": b"1.2.3"
}
def test_main_help():
"""Test the main function when -h parameter is given."""
sys.argv.append("-h")
with pytest.raises(SystemExit) as excinfo:
main()
assert excinfo.value.code == 0
def test_main_input():
"""Test the main function when -i parameter is given."""
sys.argv.append("-i test")
with pytest.raises(SystemExit) as excinfo:
main()
assert excinfo.value.code == 0
@pytest.mark.parametrize("verbose", verbose)
def test_validate_no_payload(validation_schema, verbose):
"""Test the validation for improper (no) payload."""
# it should fail
with pytest.raises(ValueError) as excinfo:
validate(schema, None, verbose)
@pytest.mark.parametrize("verbose", verbose)
def test_validate_correct_message(validation_schema, verbose, correct_message):
"""Test the validation for correct payload."""
# it should not fail
validate(schema, correct_message, verbose)
@pytest.mark.parametrize("verbose", verbose)
def test_validate_message_without_cluster_id_attribute(validation_schema, verbose, correct_message):
"""Test the validation for improper payload."""
del correct_message["cluster_id"]
# it should fail
with pytest.raises(Invalid) as excinfo:
validate(schema, correct_message, verbose)
@pytest.mark.parametrize("attribute", attribute)
@pytest.mark.parametrize("verbose", verbose)
def test_validate_message_without_attributes(validation_schema, verbose, correct_message,
attribute):
"""Test the validation for improper payload."""
del correct_message[attribute]
# it should fail
with pytest.raises(Invalid) as excinfo:
validate(schema, correct_message, verbose)
@pytest.mark.parametrize("attribute", attribute)
@pytest.mark.parametrize("verbose", verbose)
def test_validate_message_wrong_attributes(validation_schema, verbose, correct_message, attribute):
"""Test the validation for improper payload."""
# check with string not representing number
correct_message[attribute] = b"foobar"
# it should fail
with pytest.raises(Invalid) as excinfo:
validate(schema, correct_message, verbose)
# check with number
correct_message[attribute] = 123456
# it should fail
with pytest.raises(Invalid) as excinfo:
validate(schema, correct_message, verbose)
# check with different data type
correct_message[attribute] = []
# it should fail
with pytest.raises(Invalid) as excinfo:
validate(schema, correct_message, verbose)
| 30.956835 | 100 | 0.69928 |
5eb7d024eaca9e157b0e84b7d1b36c50c36ad9af | 4,029 | py | Python | tests/terminal.py | randy3k/ride | 8a052daebaa8f03a9fff95eb38d45a32ac43bed9 | [
"MIT"
] | 1,051 | 2018-12-20T19:35:43.000Z | 2022-03-31T19:44:35.000Z | tests/terminal.py | randy3k/ride | 8a052daebaa8f03a9fff95eb38d45a32ac43bed9 | [
"MIT"
] | 255 | 2018-12-19T13:51:01.000Z | 2022-03-31T09:33:43.000Z | tests/terminal.py | randy3k/ride | 8a052daebaa8f03a9fff95eb38d45a32ac43bed9 | [
"MIT"
] | 54 | 2019-03-13T02:25:31.000Z | 2022-03-15T16:21:50.000Z | import sys
import pyte
import operator
import threading
from contextlib import contextmanager
import time
import os
if sys.platform.startswith("win"):
import winpty
else:
import ptyprocess
__all__ = ["PtyProcess", "Screen", "ByteStream", "Terminal"]
if sys.platform.startswith("win"):
ParentPtyProcess = winpty.PtyProcess
else:
ParentPtyProcess = ptyprocess.PtyProcess
class PtyProcess(ParentPtyProcess):
def read(self, nbytes):
if sys.platform.startswith("win"):
return super(PtyProcess, self).read(nbytes).encode("utf-8")
else:
return super(PtyProcess, self).read(nbytes)
def write(self, data):
if sys.platform.startswith("win"):
super(PtyProcess, self).write(data.decode("utf-8"))
else:
super(PtyProcess, self).write(data)
class Screen(pyte.Screen):
def __init__(self, process, *args, **kwargs):
self._process = process
super(Screen, self).__init__(*args, **kwargs)
def write_process_input(self, data):
self._process.write(data.encode("utf-8"))
class ByteStream(pyte.ByteStream):
def start_feeding(self):
screen = self.listener
process = screen._process
def reader():
while True:
try:
data = process.read(1024)
except EOFError:
break
if data:
self.feed(data)
t = threading.Thread(target=reader)
t.start()
class Var(object):
def __init__(self, getter):
self.getter = getter
def __getattr__(self, name):
# fallback methods
def _(*args, **kwargs):
return Var(lambda: getattr(self.getter(), name)(*args, **kwargs))
return _
def observe(self, *args, **kwargs):
return self.getter(*args, **kwargs)
def _assert(self, op, operand, timeout=5):
t = time.time()
while time.time() - t < timeout:
value = self.getter()
if op(value, operand):
break
time.sleep(0.05)
else:
raise Exception("value is {}".format(value))
def assert_startswith(self, operand, timeout=5):
self._assert(str.startswith, operand, timeout)
def assert_endswith(self, operand, timeout=5):
self._assert(str.endswith, operand, timeout)
def assert_equal(self, operand, timeout=5):
self._assert(operator.eq, operand, timeout)
def assert_contains(self, operand, timeout=5):
self._assert(operator.contains, operand, timeout)
class Terminal(object):
def __init__(self, process, screen, stream):
self.process = process
self.screen = screen
self.stream = stream
@classmethod
@contextmanager
def open(cls, cmd):
# github actions windows-2019 doesn't like (24, 80)
env = os.environ.copy()
env["RETICULATE_PYTHON"] = sys.executable
# don't not prompt to install miniconda
env["RETICULATE_MINICONDA_ENABLED"] = "0"
process = PtyProcess.spawn(cmd, dimensions=(40, 120), env=env)
screen = Screen(process, 120, 40)
stream = ByteStream(screen)
stream.start_feeding()
try:
yield cls(process, screen, stream)
finally:
process.terminate(force=True)
def sendintr(self):
self.process.sendintr()
def isalive(self):
return self.process.isalive()
def write(self, x):
self.process.write(x.encode('utf-8'))
def _line(self, num=0):
# parent's `line` method
return self.screen.display[num]
def line(self, num=0):
return Var(lambda: self._line(num))
def cursor(self):
return Var(lambda: (self.screen.cursor.x, self.screen.cursor.y))
def current_line(self):
return Var(lambda: self._line(self.screen.cursor.y))
def previous_line(self, num=1):
return Var(lambda: self._line(self.screen.cursor.y - num))
| 26.682119 | 77 | 0.607099 |
e699c97c0b37da3099acfdc7d66113cacbc9976c | 24,425 | py | Python | dependencies.py | Plinius-Audio/ohdevtools | 1a094d5ae918394f1307617fa6594d0bcc3005c2 | [
"BSD-2-Clause-FreeBSD"
] | 9 | 2015-12-30T10:53:51.000Z | 2021-11-11T00:10:02.000Z | dependencies.py | Plinius-Audio/ohdevtools | 1a094d5ae918394f1307617fa6594d0bcc3005c2 | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2018-05-31T10:46:58.000Z | 2022-02-13T22:43:28.000Z | dependencies.py | Plinius-Audio/ohdevtools | 1a094d5ae918394f1307617fa6594d0bcc3005c2 | [
"BSD-2-Clause-FreeBSD"
] | 4 | 2020-09-25T22:39:22.000Z | 2021-11-08T21:11:16.000Z | import os
import tarfile
import zipfile
import re
import requests
import platform
import subprocess
import json
import shutil
import io
import tempfile
from default_platform import default_platform
import deps_cross_checker
import aws
# Master table of dependency types.
# A dependency definition can specify 'type' to inherit definitions from one of these.
# String values can depend on other string values from the dependency. For example,
# if 'name' is defined as 'Example' then '${name}.exe' will expand to 'Example.exe'.
# It does not matter which order the values are defined.
# String values can also depend on boolean values. For example, the string
# '${test-value?yes-result:no-result}' will get the value of the string named
# 'yes-result' if 'test-value' is a true boolean value, and the string named
# 'no-result' if 'test-value' is a false boolean value.
# Finally, string values can also depend on a lookup table defined as a JSON object.
# For example, given these definitions:
# {
# "servertable":{
# "Windows":"windows.openhome.org",
# "Linux":"linux.openhome.org",
# "*":"openhome.org"
# },
# "server":"${servertable[$system]}"
# }
# If 'system' is defined as 'Windows', then 'server' will be defined as
# 'windows.openhome.org'. The '*' entry is the default: if a lookup fails the default
# will be used instead.
# The principle string values that must be defined are 'archive-path' to point to the
# .tar.gz file with the dependency's binaries, 'dest' to specify where to untar it,
# and 'configure-args' to specify the list of arguments to pass to waf.
# In order for source control fetching to work, the string 'source-git' should point
# to the git repo and 'tag' should identify the git tag that corresponds to the
# fetched binaries.
DEPENDENCY_TYPES = {
# Ignore dependencies
# - ignored - effectively 'comments' out entire dependency
'ignore': {
'ignore': True
},
# Openhome dependencies
# - (legacy name - basically means that they are publicly visible and available)
# - generally have an associated git repo to allow us to fetch source code.
# - stored on AWS in the linn-artifacts-public bucket
#
# At a minimum must define:
# name
# version
'openhome': {
'archive-extension': '.tar.gz',
'archive-prefix': '',
'archive-suffix': '',
'binary-repo': 's3://linn-artifacts-public/artifacts',
'archive-directory': '${binary-repo}/${name}/',
'archive-filename': '${archive-prefix}${name}-${version}-${archive-platform}${archive-suffix}${archive-extension}',
'remote-archive-path': '${archive-directory}${archive-filename}',
'use-local-archive': False,
'archive-path': '${use-local-archive?local-archive-path:remote-archive-path}',
'source-path': '${linn-git-user}@core.linn.co.uk:/home/git',
'repo-name': '${name}',
'source-git': '${source-path}/${repo-name}.git',
'tag': '${repo-name}_${version}',
'any-platform': 'AnyPlatform',
'platform-specific': True,
'host-platform': default_platform(),
'archive-platform': '${platform-specific?platform:any-platform}',
'dest': 'dependencies/${archive-platform}/',
'configure-args': []
},
# Internal dependencies
# - ony visible and available inside Linn
# - stored on AWS in the linn-artifacts-private bucket
#
# At a minimum must define:
# name
# version
'internal': {
'binary-repo': 's3://linn-artifacts-private',
'source-git': None,
'any-platform': 'AnyPlatform',
'platform-specific': True,
'archive-suffix': '',
'archive-filename': '${name}-${version}-${platform}${archive-suffix}.tar.gz',
'archive-platform': '${platform-specific?platform:any-platform}',
'archive-path': '${binary-repo}/${name}/${archive-filename}',
'host-platform': default_platform(),
'dest': 'dependencies/${archive-platform}/',
'configure-args': []
},
# External dependencies
#
# - publicly visible and available
# - no git repo that conforms to 'openhome standard'
# - stored on AWS in the linn-artifacts-public bucket
#
# At a minimum must define:
# name
# archive-filename
'external': {
'binary-repo': 's3://linn-artifacts-public/artifacts',
'source-git': None,
'any-platform': 'AnyPlatform',
'platform-specific': True,
'archive-platform': '${platform-specific?platform:any-platform}',
'archive-path': '${binary-repo}/${archive-platform}/${archive-filename}',
'host-platform': default_platform(),
'dest': 'dependencies/${archive-platform}/',
'configure-args': []
},
}
class FileFetcher(object):
def __init__(self):
pass
def fetch(self, path):
if path.startswith("file:") or path.startswith("smb:"):
raise Exception("FETCH: File URLs deprecated")
elif path.startswith("s3:"):
return self.fetch_aws(path)
elif re.match(r"[^\W\d]{2,8}:", path):
raise Exception("FETCH: Legacy URLs no longer re-routed")
return self.fetch_local(path)
@staticmethod
def fetch_aws(awspath):
print(' from AWS %s' % awspath)
temppath = tempfile.mktemp( suffix='.tmp' )
try:
aws.copy(awspath, temppath)
return temppath
except:
raise Exception("FETCH: Unable to retrieve %s from AWS" % awspath)
return None
@staticmethod
def fetch_local(path):
print( ' from LOCAL PATH %s' % path)
return path
class EnvironmentExpander(object):
# template_regex matches
template_regex = re.compile(r"""
(?x) # Enable whitespace and comments
(?P<dollar>\$\$)| # Match $$
(?P<word>\$[a-zA-Z_][a-zA-Z_0-9]*)| # Match $word
(?P<parens>\$\{[^}]*\}) # Match ${any-thing}
""")
# Matches foo[bar]
index_regex = re.compile(r"""
(?x) # Enable whitespace and comments
^ # Match only at start of string
([^][]*) # Match table name (no brackets allowed)
\[ # Match one open bracket: [
([^][]*) # Match key (no brackets allowed)
\] # Match one close bracket: ]
$
""")
def __init__(self, env_dict):
self.env_dict = env_dict
self.cache = {}
self.expandset = set()
def __getitem__(self, key):
return self.expand(key)
def getraw(self, key):
return self.env_dict[key]
def __contains__(self, key):
return key in self.env_dict
def keys(self):
return self.env_dict.keys()
def values(self):
return [self.expand(key) for key in self.keys()]
def items(self):
return [(key, self.expand(key)) for key in self.keys()]
def expand(self, key):
if key in self.cache:
return self.cache[key]
if key in self.expandset:
raise ValueError("Recursive expansion for key:", key)
self.expandset.add(key)
result = self._expand(key)
self.cache[key] = result
self.expandset.remove(key)
return result
def _expand(self, key):
if key not in self.env_dict:
raise KeyError("Key undefined:", key)
value = self.env_dict[key]
return self._expandvalue(value)
def _expandvalue(self, value):
if isinstance(value, ("".__class__, u"".__class__)):
return self.expandstring(value)
# return self.template_regex.sub(self.replacematch, value)
elif isinstance(value, (list, tuple)):
return [self._expandvalue(x) for x in value]
elif isinstance(value, dict):
return dict((k, self._expandvalue(v)) for (k, v) in value.items())
return value
def expandstring(self, value):
firstmatch = self.template_regex.match(value)
if firstmatch is not None and firstmatch.group(0) == value and value != "$$":
# Special case: The entire string is a single expansion. In this case,
# we allow the expansion to be *anything* (bool, int, list...),
# not just a string.
return self.replacematch(firstmatch)
return self.template_regex.sub(self.replacematch, value)
def replacematch(self, match):
if match.group('dollar'):
return '$'
key = None
if match.group('word'):
key = match.group('word')[1:]
if match.group('parens'):
key = match.group('parens')[2:-1]
assert key is not None
key = key.strip()
if '[' in key:
return self.expandlookup(key)
if '?' in key:
return self.expandconditional(key)
return self.expand(key)
def expandlookup(self, key):
match = self.index_regex.match(key)
if match is None:
raise ValueError('lookup must be of form ${table[key]}')
tablename = match.group(1).strip()
keyname = match.group(2).strip()
table = self.expand(tablename)
if keyname.startswith('$'):
key = self.expand(keyname[1:])
else:
key = keyname
if not isinstance(table, dict):
raise ValueError("lookup table must expand to a JSON object (got {0!r} instead)".format(table))
if not isinstance(key, ("".__class__, u"".__class__)):
raise ValueError("lookup index must expand to a JSON string (got {0!r} instead)".format(key))
if key not in table:
if '*' in table:
return table['*']
raise KeyError("Key not in table, and no default '*' entry found: key={0!r}\ntable={1!r}".format(key, table))
return table[key]
def expandconditional(self, key):
if '?' not in key:
raise ValueError('conditional must be of form ${condition?result:alternative}')
condition, rest = key.split('?', 1)
if ':' not in rest:
raise ValueError('conditional must be of form ${condition?result:alternative}')
primary, alternative = rest.split(':', 1)
condition, primary, alternative = [x.strip() for x in [condition, primary, alternative]]
try:
conditionvalue = self.expand(condition)
except KeyError:
conditionvalue = False
if self.is_trueish(conditionvalue):
return self.expand(primary)
return self.expand(alternative)
@staticmethod
def is_trueish(value):
if hasattr(value, "upper"):
value = value.upper()
return value in [1, "1", "YES", "Y", "TRUE", "ON", True]
class Dependency(object):
def __init__(self, name, environment, fetcher, has_overrides=False):
self.expander = EnvironmentExpander(environment)
self.has_overrides = has_overrides
self.fetcher = fetcher
def fetch(self):
remote_path = self.expander.expand('archive-path')
local_path = os.path.abspath(self.expander.expand('dest'))
fetched_path = None
print("\nFetching '%s'" % self.name)
try:
fetched_path = self.fetcher.fetch(remote_path)
statinfo = os.stat(fetched_path)
if not statinfo.st_size:
os.unlink(fetched_path)
print(" **** WARNING - failed to fetch %s ****" % os.path.basename(remote_path))
return False
except IOError:
print(" **** FAILED ****")
return False
try:
os.makedirs(local_path)
except OSError:
# We get an error if the directory exists, which we are happy to
# ignore. If something worse went wrong, we will find out very
# soon when we try to extract the files.
pass
print(" unpacking to '%s'" % (local_path,))
if os.path.splitext(remote_path)[1].upper() in ['.ZIP', '.NUPKG', '.JAR']:
self.unzip(fetched_path, local_path)
else:
self.untar(fetched_path, local_path)
if fetched_path:
if fetched_path != remote_path:
os.unlink(fetched_path)
print("OK")
return True
@property
def name(self):
return self['name']
def __getitem__(self, key):
return self.expander.expand(key)
def __contains__(self, key):
return key in self.expander
def items(self):
return self.expander.items()
def checkout(self):
name = self['name']
sourcegit = self['source-git']
if sourcegit is None:
print('No git repo defined for {0}'.format(name))
return False
print("Fetching source for '%s'\n into '%s'" % (name, os.path.abspath('../' + name)))
tag = self['tag']
try:
if not os.path.exists('../' + name):
print(' git clone {0} {1}'.format(sourcegit, name))
subprocess.check_call(['git', 'clone', sourcegit, name], cwd='..', shell=False)
elif not os.path.isdir('../' + name):
print('Cannot checkout {0}, because directory ../{0} already exists'.format(name))
return False
else:
print(' git fetch origin')
subprocess.check_call(['git', 'fetch', 'origin'], cwd='../' + name, shell=False)
print(" git checkout {0}".format(tag))
subprocess.check_call(['git', 'checkout', tag], cwd='../' + name, shell=False)
except subprocess.CalledProcessError as cpe:
print(str(cpe))
return False
return True
@staticmethod
def untar(source, dest):
tf = tarfile.open(source, 'r')
for f in tf:
try:
tf.extract(f.name, path=dest)
except IOError:
os.unlink( os.path.join(dest, f.name ))
tf.extract(f.name, path=dest)
tf.close()
@staticmethod
def unzip(source, dest):
zf = zipfile.ZipFile(source, mode='r')
zf.extractall(path=dest)
zf.close()
def expand_remote_path(self):
return self.expander.expand('archive-path')
def expand_local_path(self):
return self.expander.expand('dest')
def expand_configure_args(self):
return self.expander.expand('configure-args')
class DependencyCollection(object):
def __init__(self, env):
fetcher = FileFetcher()
self.base_env = env
self.dependency_types = DEPENDENCY_TYPES
self.dependencies = {}
self.fetcher = fetcher
def create_dependency(self, dependency_definition, overrides={}):
defn = dependency_definition
env = {}
env.update(self.base_env)
if 'type' in defn:
dep_type = defn['type']
env.update(self.dependency_types[dep_type])
else:
# default to an 'external' dependency type if none specified
dep_type = 'external'
env.update(self.dependency_types[dep_type])
env.update(defn)
env.update(overrides)
if 'name' not in env:
raise ValueError('Dependency definition contains no name')
name = env['name']
new_dependency = Dependency(name, env, self.fetcher, has_overrides=len(overrides) > 0)
if 'ignore' in new_dependency and new_dependency['ignore']:
return
self.dependencies[name] = new_dependency
def __contains__(self, key):
return key in self.dependencies
def __getitem__(self, key):
return self.dependencies[key]
def items(self):
return self.dependencies.items()
def _filter(self, subset=None):
if subset is None:
return self.dependencies.values()
missing_dependencies = [name for name in subset if name not in self.dependencies]
if len(missing_dependencies) > 0:
raise Exception("No entries in dependency file named: " + ", ".join(missing_dependencies) + ".")
return [self.dependencies[name] for name in subset]
def get_args(self, subset=None):
dependencies = self._filter(subset)
configure_args = sum((d.expand_configure_args() for d in dependencies), [])
return configure_args
def fetch(self, subset=None):
dependencies = self._filter(subset)
failed_dependencies = []
filename = self.fetched_deps_filename(dependencies)
fetched_deps = self.load_fetched_deps(filename)
for d in dependencies:
do_fetch = True
name = ''
path = ''
dest = ''
if 'name' in d.expander:
name = d.expander.expand('name')
if 'archive-path' in d.expander:
path = d.expander.expand('archive-path')
if 'dest' in d.expander:
dest = d.expander.expand('dest')
lookup = dest.rstrip( '/' ) + '/' + name
version = os.path.basename(path)
if lookup in fetched_deps:
if fetched_deps[lookup] == version:
print("Skipping fetch of %s as unchanged (%s)" % (name, version))
do_fetch = False
if do_fetch:
if not d.fetch():
failed_dependencies.append(d.name)
else:
fetched_deps[lookup] = version
if filename:
self.save_fetched_deps(filename, fetched_deps)
if failed_dependencies:
print("Failed to fetch some dependencies: " + ' '.join(failed_dependencies))
return False
return True
@staticmethod
def fetched_deps_filename(deps):
filename = None
for d in deps:
if 'dest' in d.expander:
filename = os.path.join(d.expander.expand('dest').split('/')[0], 'loadedDeps.json')
break
return filename
def load_fetched_deps(self, filename):
loaded_deps = {}
if filename and os.path.isfile(filename):
try:
f = open(filename, 'rt')
loaded_deps = json.load(f)
f.close()
except:
print("Error with current fetched dependency file: %s" % filename)
return loaded_deps
@staticmethod
def save_fetched_deps(filename, deps):
f = open(filename, 'wt')
json.dump(deps, f)
f.close()
def checkout(self, subset=None):
dependencies = self._filter(subset)
failed_dependencies = []
for d in dependencies:
if not d.checkout():
failed_dependencies.append(d.name)
if failed_dependencies:
print("Failed to check out some dependencies: " + ' '.join(failed_dependencies))
return False
return True
def read_json_dependencies(dependencyfile, overridefile, env):
collection = DependencyCollection(env)
dependencies = json.load(dependencyfile)
overrides = json.load(overridefile)
overrides_by_name = dict((dep['name'], dep) for dep in overrides)
for d in dependencies:
name = d['name']
override = overrides_by_name.get(name, {})
collection.create_dependency(d, override)
return collection
def read_json_dependencies_from_filename(dependencies_filename, overrides_filename, env):
try:
dependencyfile = open(dependencies_filename, "r")
with open(dependencies_filename) as dependencyfile:
if overrides_filename is not None and os.path.isfile(overrides_filename):
with open(overrides_filename) as overridesfile:
return read_json_dependencies(dependencyfile, overridesfile, env)
else:
return read_json_dependencies(dependencyfile, io.StringIO(u'[]'), env)
except (OSError, IOError) as e:
if e.errno != 2:
raise
return DependencyCollection(env)
def clean_dirs(dir):
"""Remove the specified directory tree - don't remove anything if it would fail"""
if os.path.isdir( dir ):
locked = []
for dirName, _subdirList, fileList in os.walk(dir):
for fileName in fileList:
filePath = os.path.join(dirName, fileName)
try:
if not os.path.islink( filePath ):
openAtt = 'r'
if platform.system().lower() == 'windows':
openAtt = 'a'
f = open(filePath, openAtt)
f.close()
except:
locked.append(filePath)
if locked:
for f in locked:
print('Locked file:- ', f)
raise Exception('Failed to clean dependencies\n')
else:
shutil.rmtree(dir)
def fetch_dependencies(dependency_names=None, platform=None, env=None, fetch=True, clean=True, source=False, list_details=False, local_overrides=True, verbose=False):
'''
Fetch all the dependencies defined in projectdata/dependencies.json and in
projectdata/packages.config.
platform:
Name of target platform. E.g. 'Windows-x86', 'Linux-x64', 'Mac-x64'...
env:
Extra variables referenced by the dependencies file.
fetch:
True to fetch the listed dependencies, False to skip.
clean:
True to clean out directories before fetching, False to skip.
source:
True to fetch source for the listed dependencies, False to skip.
'''
if env is None:
env = {}
if platform is not None:
env['platform'] = None
fName = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'platforms.txt')
f = open(fName, 'rt')
supported = f.readlines()
f.close()
for entry in supported:
if platform in entry:
env['platform'] = platform
if not env['platform']:
raise Exception('Platform not supported (%s) - see %s for list of supported platforms' % (platform, fName))
if 'platform' not in env:
platform = env['platform'] = default_platform()
if '-' in platform:
env['system'], env['architecture'] = platform.split('-', 2)
if platform is None:
raise Exception('Platform not specified and unable to guess.')
if clean and not list_details:
try:
os.unlink('dependencies/loadedDeps.json')
except:
pass
clean_dirs('dependencies')
overrides_filename = '../dependency_overrides.json' if local_overrides else None
dependencies = read_json_dependencies_from_filename('projectdata/dependencies.json', overrides_filename, env=env)
if list_details:
for name, dependency in dependencies.items():
print("Dependency '{0}':".format(name))
print(" fetches from: {0!r}".format(dependency['archive-path']))
print(" unpacks to: {0!r}".format(dependency['dest']))
print(" local override: {0}".format("YES (see '../dependency_overrides.json')" if dependency.has_overrides else 'no'))
if verbose:
print(" all keys:")
for key, value in sorted(dependency.items()):
print(" {0} = {1!r}".format(key, value))
print("")
else:
if fetch:
if not dependencies.fetch(dependency_names):
raise Exception("Failed to load requested dependencies")
if source:
dependencies.checkout(dependency_names)
# Finally perform cross-check of (major.minor) dependency versions to ensure that these are in sync
# across this (current) repo and all its pulled-in dependencies. Done as totally seperate operation
# to isolate from the main fetcher code to assist with any future maintenance
if not clean:
xcheck = deps_cross_checker.DepsCrossChecker( platform )
result = xcheck.execute()
if result != 0:
raise Exception( 'Failed: dependency cross-checker detected problem(s)' )
return dependencies
| 37.233232 | 166 | 0.590829 |
5b5f4d0da457327f84ba17ab73ca81baf0b9d8ed | 244 | py | Python | 2a.py | znuxor/adventofcode2017 | 79d0df07f24ea8d2793df3b1c853a85b760791c1 | [
"BSD-3-Clause"
] | null | null | null | 2a.py | znuxor/adventofcode2017 | 79d0df07f24ea8d2793df3b1c853a85b760791c1 | [
"BSD-3-Clause"
] | null | null | null | 2a.py | znuxor/adventofcode2017 | 79d0df07f24ea8d2793df3b1c853a85b760791c1 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
data_input = np.loadtxt('2a_data.txt')
mysum = 0
minimums = np.min(data_input, axis=1)
maximums = np.max(data_input, axis=1)
diffs = maximums - minimums
print(np.sum(diffs))
| 17.428571 | 38 | 0.688525 |
6ebb8f194bd9625cf0e55656f6b226c6c6453097 | 428 | py | Python | python_stock/4/Python4-4.py | hollo08/stockstrategy | 09ece2457d653439a8ace80a6ac7dd4da9813846 | [
"MIT"
] | 1 | 2020-09-18T15:08:46.000Z | 2020-09-18T15:08:46.000Z | python_stock/4/Python4-4.py | hollo08/stockstrategy | 09ece2457d653439a8ace80a6ac7dd4da9813846 | [
"MIT"
] | null | null | null | python_stock/4/Python4-4.py | hollo08/stockstrategy | 09ece2457d653439a8ace80a6ac7dd4da9813846 | [
"MIT"
] | 2 | 2022-01-23T03:26:22.000Z | 2022-03-28T16:21:01.000Z | import random #导入random标准库
mymin =200 #定义变量,存放随机数中的最小数
i = 1 #定义变量,用于统计循环次数
while i <= 15 :
r = random.randint(50,150) #在50~150之间随机产生一个数
i += 1 #循环次数加1
print("第 %d 随机数是: %s "%(i-1,r)) #显示第几个随机数是几
if r < mymin:
mymin = r #把随机数中的最小数放到mymin中
else :
print("\n\n这15个数中,最小的数是:",mymin)
| 35.666667 | 62 | 0.448598 |
53e3ffd4ac72b5ca13c1799d35991a28a5e0e78c | 2,596 | py | Python | plugins/trendmicro_deepsecurity/icon_trendmicro_deepsecurity/actions/list_rules/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 46 | 2019-06-05T20:47:58.000Z | 2022-03-29T10:18:01.000Z | plugins/trendmicro_deepsecurity/icon_trendmicro_deepsecurity/actions/list_rules/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 386 | 2019-06-07T20:20:39.000Z | 2022-03-30T17:35:01.000Z | plugins/trendmicro_deepsecurity/icon_trendmicro_deepsecurity/actions/list_rules/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 43 | 2019-07-09T14:13:58.000Z | 2022-03-28T12:04:46.000Z | import komand
from .schema import ListRulesInput, ListRulesOutput, Input, Output, Component
# Custom imports below
from icon_trendmicro_deepsecurity.util.shared import tryJSON
from icon_trendmicro_deepsecurity.util.shared import checkResponse
import requests
class ListRules(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="list_rules",
description=Component.DESCRIPTION,
input=ListRulesInput(),
output=ListRulesOutput(),
)
def run(self, params={}):
"""
List IPS rules
"""
# Get parameters
self.scope = params.get(Input.SCOPE)
self.id = params.get(Input.ID)
ips_rules = set()
covered_cves = set()
hits = 0
self.logger.info(f"Getting rules from {self.scope} {self.id}")
# Prepare request
# Check if the rules should be assigned to a computer or policy
if self.scope == "computer":
url = f"{self.connection.dsm_url}/api/computers/{self.id}/intrusionprevention/rules"
else:
url = f"{self.connection.dsm_url}/api/policies/{self.id}/intrusionprevention/rules"
# Send request
response = requests.get(url, verify=self.connection.dsm_verify_ssl, headers=self.connection.headers)
self.logger.info(f"url: {response.url}")
self.logger.info(f"status: {response.status_code}")
self.logger.info(f"reason: {response.reason}")
# Check response errors
checkResponse(response)
# Try to convert the response data to JSON
response_data = tryJSON(response)
# Extract rules
if response_data["intrusionPreventionRules"]:
for rule in response_data["intrusionPreventionRules"]:
ips_rules.add(rule["ID"])
if "CVE" in rule.keys():
self.logger.info(f"{rule['ID']}:\t{rule['name']} - " + ", ".join(rule["CVE"]))
covered_cves.update(rule["CVE"])
else:
self.logger.info(f"{rule['ID']}:\t{rule['name']}")
else:
self.logger.info("No rules found!")
hits = len(response_data["intrusionPreventionRules"])
self.logger.info(f"Found {hits} rules covering the following CVEs: \n" + ", ".join(covered_cves))
# Return assigned rules and covered CVEs
return {
Output.RULES_ASSIGNED: list(ips_rules),
Output.COVERED_CVES: list(covered_cves),
Output.RESPONSE_JSON: response_data,
}
| 34.157895 | 108 | 0.609399 |
04a6bf57031b07c8ce399db8dfd5c10266bcf5ac | 4,162 | py | Python | zdata.py | manimaul/mxmcc | 923458b759c8daa74dd969e968bc72b17fdffe02 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 | 2016-08-24T21:30:45.000Z | 2016-08-24T21:30:45.000Z | zdata.py | manimaul/mxmcc | 923458b759c8daa74dd969e968bc72b17fdffe02 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | 5 | 2021-03-18T23:25:15.000Z | 2022-03-11T23:44:20.000Z | zdata.py | manimaul/mxmcc | 923458b759c8daa74dd969e968bc72b17fdffe02 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
__author__ = 'Will Kamp'
__copyright__ = 'Copyright 2013, Matrix Mariner Inc.'
__license__ = 'BSD'
__email__ = 'will@mxmariner.com'
__status__ = 'Development' # 'Prototype', 'Development', or 'Production'
'''MX Mariner zdata generator for regions / catalogs'''
import codecs
import os.path
import zipfile
from . import config
from . import catalog
from . import regions
upd_fmt = U'UPDATE regions SET installeddate=\'%s\' WHERE name=\'%s\';\n'
custom_fmt0 = u'DELETE from regions WHERE name=\'%s\';\n'
custom_fmt1 = u'INSERT into [regions] ([name], [description], [image], [size], [installeddate] ) ' \
u'VALUES (\'%s\', \'%s\', \'%s\', \'%s\', \'%s\');\n'
fmt0 = u'DELETE from charts where region=\'%s\';\n'
fmt1 = u'INSERT INTO [charts] ([region], [file], [name], [updated], [scale], [outline], [depths], [zoom]) ' \
u'VALUES (\'%s\', \'%s\', \'%s\', \'%s\', %s, \'%s\', \'%s\', \'%s\');\n'
def get_zdat_epoch(zdat_path):
"""
:param zdat_path: path to the <region>.zdat file
:return: the installeddate value to be set
"""
zdat_file = zipfile.ZipFile(zdat_path, 'r', zipfile.ZIP_DEFLATED)
line = str(zdat_file.open(zdat_file.namelist()[0], 'r').readlines()[1])
l = line.find('\'') + 1
r = line.find('\'', l)
return line[l:r]
def generate_update():
"""generates and UPDATE.zdat file for all of the new (s)gemf regions rendered
"""
sql_fname = 'UPDATE.sql'
sql_path = os.path.join(config.compiled_dir, sql_fname)
zdat_path = os.path.join(config.compiled_dir, 'UPDATE.zdat')
print(zdat_path)
zdat = zipfile.ZipFile(zdat_path, 'w', zipfile.ZIP_DEFLATED)
sqlf = open(sql_path, 'w')
gemf_lst = []
for ea in os.listdir(config.compiled_dir):
if ea.endswith('gemf'):
gemf_lst.append(os.path.join(config.compiled_dir, ea))
gemf_lst.sort()
if len(gemf_lst) is 0:
return
sqlstr = u'update regions set latestdate=\'%s\', size=\'%s\' where name=\'%s\';'
sqlf.write(u'--MXMARINER-DBVERSION:1\n')
for p in gemf_lst:
size = str(os.path.getsize(p))
region = os.path.basename(p)
region = region[:region.rfind('.')]
z_path = os.path.join(config.compiled_dir, region + '.zdat')
sqlf.write(sqlstr % (get_zdat_epoch(z_path), size, region) + '\n')
sqlf.close()
zdat.write(sql_path, sql_fname)
os.remove(sql_path)
zdat.close()
print('update written to: ' + zdat_path)
def format_entry(region: str, entry: dict):
def san(thing):
return str(thing).strip()
return fmt1 % (region, os.path.basename(san(entry['path'])), san(entry['name']), san(entry['date']),
san(entry['scale']), san(entry['outline']), san(entry['depths']),
san(entry['max_zoom']))
def generate_zdat_for_catalog(catalog_name, description=None):
"""generates a zdat file for a region
catalog_name - the name of the catalog / region to generate data for
description - if this is a custom catalog / region... set the description here
"""
region = catalog_name.upper().strip()
reader = catalog.get_reader_for_region(catalog_name)
sql_fname = region + '.sql'
sql_path = os.path.join(config.compiled_dir, sql_fname)
zdat_path = os.path.join(config.compiled_dir, region + '.zdat')
sql_file = codecs.open(sql_path, 'w', 'utf-8')
zdat_file = zipfile.ZipFile(zdat_path, 'w', zipfile.ZIP_DEFLATED)
sql_file.write('--MXMARINER-DBVERSION:3\n')
if regions.is_valid_region(region):
sql_file.write(upd_fmt % (config.epoch, region))
sql_file.write(fmt0 % region)
else:
num_bytes = os.path.getsize(os.path.join(config.compiled_dir, region + '.gemf'))
sql_file.write(custom_fmt0 % region)
sql_file.write(custom_fmt1 % (region, description, region.lower().replace('_', ''), num_bytes, config.epoch))
for entry in reader:
sql_file.write(format_entry(region, entry))
sql_file.close()
zdat_file.write(sql_path, sql_fname)
os.remove(sql_path)
zdat_file.close()
if __name__ == '__main__':
generate_update()
| 34.97479 | 117 | 0.637434 |
013df6455e514ab341995e60c2b29691d795cfc3 | 557 | py | Python | apps/run_yolo_video.py | wenxingliu/smoke_detector_yolo3 | 2e67a4347256ad8378eddf5b4efdc3782b3fb8e2 | [
"MIT"
] | null | null | null | apps/run_yolo_video.py | wenxingliu/smoke_detector_yolo3 | 2e67a4347256ad8378eddf5b4efdc3782b3fb8e2 | [
"MIT"
] | null | null | null | apps/run_yolo_video.py | wenxingliu/smoke_detector_yolo3 | 2e67a4347256ad8378eddf5b4efdc3782b3fb8e2 | [
"MIT"
] | 1 | 2020-10-10T04:03:30.000Z | 2020-10-10T04:03:30.000Z | import os
current_path = os.path.dirname(os.path.abspath(__file__))
path_suffix = 'apps'
if current_path.endswith(path_suffix):
parent_path = current_path.rsplit(path_suffix, 1)[0]
os.chdir(parent_path)
from yolo_detect.yolo import YOLO
from yolo_detect.detect_video import detect_video
__author__ = 'sliu'
if __name__ == '__main__':
video_file_name = '41琉璃河ch0_CHANNEL0_20180108_11_56_50'
video_path = 'input_data/videos/' + '%s.avi' % video_file_name
out_path = 'output_data/41_boxed/'
detect_video(YOLO(), video_path, out_path) | 32.764706 | 66 | 0.761221 |
29cc46030fd01b7fda5165b4f9e50553d78e9b7f | 895 | py | Python | share/qt/clean_mac_info_plist.py | aptcoin/aptcoin | bcdea0990837ea8c22017fe2e34548c5375cd476 | [
"MIT"
] | null | null | null | share/qt/clean_mac_info_plist.py | aptcoin/aptcoin | bcdea0990837ea8c22017fe2e34548c5375cd476 | [
"MIT"
] | null | null | null | share/qt/clean_mac_info_plist.py | aptcoin/aptcoin | bcdea0990837ea8c22017fe2e34548c5375cd476 | [
"MIT"
] | 2 | 2015-09-01T07:03:13.000Z | 2019-07-10T13:28:51.000Z | #!/usr/bin/env python
# Jonas Schnelli, 2013
# make sure the Aptcoin-Qt.app contains the right plist (including the right version)
# fix made because of serval bugs in Qt mac deployment (https://bugreports.qt-project.org/browse/QTBUG-21267)
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "Aptcoin-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"aptcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
| 29.833333 | 109 | 0.72514 |
aa522183125fcf7395f4d7d9dfe47ffda2096f50 | 1,467 | py | Python | pychron/lasers/laser_managers/ilaser_manager.py | ASUPychron/pychron | dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76 | [
"Apache-2.0"
] | 31 | 2016-03-07T02:38:17.000Z | 2022-02-14T18:23:43.000Z | pychron/lasers/laser_managers/ilaser_manager.py | ASUPychron/pychron | dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76 | [
"Apache-2.0"
] | 1,626 | 2015-01-07T04:52:35.000Z | 2022-03-25T19:15:59.000Z | pychron/lasers/laser_managers/ilaser_manager.py | UIllinoisHALPychron/pychron | f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc | [
"Apache-2.0"
] | 26 | 2015-05-23T00:10:06.000Z | 2022-03-07T16:51:57.000Z | # ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from pychron.iextraction_device import IExtractionDevice
# ============= standard library imports ========================
# ============= local library imports ==========================
class ILaserManager(IExtractionDevice):
def trace_path(self, *args, **kw):
pass
def drill_point(self, *args, **kw):
pass
def take_snapshot(self, *args, **kw):
pass
# def extract(self, *args, **kw):
# pass
# def end_extract(self, *args, **kw):
# pass
# def move_to_position(self, *args, **kw):
# pass
# ============= EOF =============================================
| 33.340909 | 81 | 0.542604 |
89329de5a7947e343c7794b1fc2dd2a0635f5ab9 | 2,489 | py | Python | image/resnet_train.py | riejohnson/gulf | a7cf688e263921e008b0117a274add011380c1c8 | [
"MIT"
] | 2 | 2020-11-13T21:42:45.000Z | 2021-04-09T04:25:05.000Z | image/resnet_train.py | riejohnson/gulf | a7cf688e263921e008b0117a274add011380c1c8 | [
"MIT"
] | null | null | null | image/resnet_train.py | riejohnson/gulf | a7cf688e263921e008b0117a274add011380c1c8 | [
"MIT"
] | 1 | 2021-09-23T12:12:48.000Z | 2021-09-23T12:12:48.000Z | import sys
import os
import numpy as np
import torch
import torch.nn.functional as F
from utils.utils import cast, data_parallel
from torch.backends import cudnn
from .resnet import resnet
from utils.utils0 import logging, reset_logging, timeLog, raise_if_absent, add_if_absent_
from .data import dataset_attr, create_iterators_tddevtst
from .data import check_opt_for_create_iterators_tddevtst_ as check_opt_for_data_
from gulf import is_gulf, train_base_model, train_gulf_model, copy_params, Target_index
cudnn.benchmark = True
#----------------------------------------------------------
def check_opt_(opt):
raise_if_absent(opt, [ 'seed','depth','k','dropout','ngpu','dataset','dtype'], who='resnet_train')
add_if_absent_(opt, ['csv_fn'], '')
#********************************************************************
def main(opt):
timeLog("resnet_train(opt) begins ...")
check_opt_(opt)
check_opt_for_data_(opt)
logging('Using %s ... ' % ('GPU(s)' if torch.cuda.is_available() else 'CPU'))
reset_logging(opt.csv_fn)
torch.manual_seed(opt.seed)
np.random.seed(opt.seed)
#--- prepare net
num_classes = dataset_attr(opt.dataset)['nclass']
def initialize_model():
return resnet(opt.depth, opt.k, num_classes, dropout=opt.dropout)
func, params = initialize_model()
#--- prepare data
do_pin_memory = torch.cuda.is_available()
rs = np.random.get_state()
train_loader, dev_loader, test_loader = create_iterators_tddevtst(opt, do_pin_memory)
np.random.set_state(rs)
test_dss = [ {'name':'dev', 'data':dev_loader}, {'name':'test', 'data':test_loader} ]
#--- training ...
loss_function = F.cross_entropy
def net(sample, is_train=False):
if sample is None:
return loss_function
inputs = cast(sample[0], opt.dtype)
output = data_parallel(func, inputs, params, is_train, list(range(opt.ngpu))).float()
return loss_function(output, cast(sample[Target_index], 'long')), output
if not is_gulf(opt):
train_base_model(opt, net, params, train_loader, test_dss)
else:
i_func, i_params = initialize_model()
copy_params(src=params, dst=i_params)
def i_net(sample):
is_train = False
inputs = cast(sample[0], opt.dtype)
return data_parallel(i_func, inputs, i_params, is_train, list(range(opt.ngpu))).float()
train_gulf_model(opt, i_net, i_params, net, params, train_loader, test_dss)
timeLog("resnet_train(opt) ends ...")
| 34.569444 | 101 | 0.671354 |
ab9d205cd781ed1c7a79734139d735a55ab9b1e4 | 1,582 | py | Python | pytelpoint/stats.py | MMTObservatory/pypoint | 644caf325192d9a3516f3e650078fe3b576b57d8 | [
"BSD-3-Clause"
] | 1 | 2021-11-12T00:05:57.000Z | 2021-11-12T00:05:57.000Z | pytelpoint/stats.py | MMTObservatory/pytpoint | 644caf325192d9a3516f3e650078fe3b576b57d8 | [
"BSD-3-Clause"
] | null | null | null | pytelpoint/stats.py | MMTObservatory/pytpoint | 644caf325192d9a3516f3e650078fe3b576b57d8 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import astropy.units as u
__all__ = ["skyrms", "psd"]
def skyrms(coo_ref, coo_meas):
"""
Calculate sky RMS of the offsets between reference and measured coordinates in the same way as TPOINT(tm).
Return the result in arcseconds.
Parameters
----------
coo_ref : `~astropy.coordinates.SkyCoord` instance
Reference coordinates
coo_meas : `~astropy.coordinates.SkyCoord` instance
Measured coordinates
Returns:
--------
rms : `~astropy.units.Quantity` (u.arcsec)
Root mean squared of the separation between coo_ref and coo_meas expressed in arcseconds.
"""
sep = coo_ref.separation(coo_meas)
rms = np.sqrt((sep ** 2).mean()).to(u.arcsec)
return rms
def psd(coo_ref, coo_meas, nterms=8):
"""
Calculate the population standard deviation, PSD, the way TPOINT(tm) does. Return the result in arcseconds.
Parameters
----------
coo_ref : `~astropy.coordinates.SkyCoord` instance
Reference coordinates
coo_meas : `~astropy.coordinates.SkyCoord` instance
Measured coordinates
nterms : int (default: 8)
Number of terms used in the model used to correct coo_meas to match coo_ref
Returns:
--------
sd : `~astropy.units.Quantity` (u.arcsec)
Population SD of the separation between coo_ref and coo_meas expressed in arcseconds.
"""
rms = skyrms(coo_ref, coo_meas)
sd = np.sqrt(rms**2 * len(coo_meas) / (len(coo_meas) - nterms))
return sd
| 29.849057 | 111 | 0.666245 |
00a55436b607691af95e1d7b58243295b8c5508f | 1,753 | py | Python | app/api/v2/models/products.py | calebrotich10/store-manager-api-v2 | 16dff84823e77218f1135c99f0592f113fddee84 | [
"MIT"
] | null | null | null | app/api/v2/models/products.py | calebrotich10/store-manager-api-v2 | 16dff84823e77218f1135c99f0592f113fddee84 | [
"MIT"
] | null | null | null | app/api/v2/models/products.py | calebrotich10/store-manager-api-v2 | 16dff84823e77218f1135c99f0592f113fddee84 | [
"MIT"
] | 1 | 2018-11-04T18:09:38.000Z | 2018-11-04T18:09:38.000Z | """This module contains the data store
and data logic of the store's products
"""
from .. import database
class Products():
def __init__(self, product_id=None, product_name=None, product_price=None,
category=None, min_quantity=None, inventory=None, added_by=None):
self.product_name = product_name
self.product_price = product_price
self.category = category
self.product_id = product_id
self.min_quantity = min_quantity
self.inventory = inventory
self.added_by = added_by
def save(self):
query = """INSERT INTO products(product_name, product_price, category, min_quantity, inventory, added_by)
VALUES('{}', {}, {},{},{}, {})""".format(self.product_name, self.product_price,
self.category, self.min_quantity, self.inventory, self.added_by)
database.insert_to_db(query)
def fetch_all_products(self):
"""Fetches all products from
the database
"""
query = """SELECT * FROM products"""
return database.select_from_db(query)
def put(self):
query = """UPDATE products SET product_price = {},
category = {}, inventory={}, min_quantity={} WHERE product_id = {}""".format(self.product_price,
self.category, self.inventory, self.min_quantity, self.product_id)
database.insert_to_db(query)
def delete(self):
query = """DELETE FROM products WHERE product_id = {}""".format(self.product_id)
database.insert_to_db(query)
def deduct_inventory(self):
query = """UPDATE products SET inventory = {} WHERE product_id = {}""".format(self.inventory, self.product_id)
database.insert_to_db(query) | 37.297872 | 122 | 0.642898 |
86ba3abde145f8a464ab64a502bcff4100865c72 | 465 | py | Python | IFR/configs/_base_/schedules/schedule_semi.py | jfzhuang/IFR | d6ffdd0c0810d7bb244f102ba8cc19c12f61e102 | [
"MIT"
] | 3 | 2022-03-09T13:15:15.000Z | 2022-03-21T06:59:10.000Z | IFR/configs/_base_/schedules/schedule_semi.py | jfzhuang/IFR | d6ffdd0c0810d7bb244f102ba8cc19c12f61e102 | [
"MIT"
] | null | null | null | IFR/configs/_base_/schedules/schedule_semi.py | jfzhuang/IFR | d6ffdd0c0810d7bb244f102ba8cc19c12f61e102 | [
"MIT"
] | null | null | null | # optimizer
optimizer = dict(
type='SGD',
lr=0.001,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(custom_keys={'decode_head': dict(lr_mult=10.0)}),
)
optimizer_config = dict()
# learning policy
lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
# runtime settings
runner = dict(type='SemiIterBasedRunner', max_iters=40000)
checkpoint_config = dict()
evaluation = dict(interval=1000, metric='mIoU', save_best='mIoU')
| 29.0625 | 72 | 0.716129 |
bccf3fe52a70ba15c775e369519724564fb59807 | 7,213 | py | Python | USB2SERIAL_RS485_Write/PC_Transmitter/Python 2.7.x/RS485_Write_2.7.py | xanthium-enterprises/Cross-Platform-RS485-Programming-Python | 8640be8fc6df9cefb80757a7ad5fe7e5b0c05c95 | [
"Apache-2.0"
] | 16 | 2016-07-15T00:34:06.000Z | 2021-12-11T13:49:41.000Z | USB2SERIAL_RS485_Write/PC_Transmitter/Python 2.7.x/RS485_Write_2.7.py | WikiBabu/Cross-Platform-RS485-Programming-Python | 8640be8fc6df9cefb80757a7ad5fe7e5b0c05c95 | [
"Apache-2.0"
] | null | null | null | USB2SERIAL_RS485_Write/PC_Transmitter/Python 2.7.x/RS485_Write_2.7.py | WikiBabu/Cross-Platform-RS485-Programming-Python | 8640be8fc6df9cefb80757a7ad5fe7e5b0c05c95 | [
"Apache-2.0"
] | 8 | 2018-02-23T09:27:28.000Z | 2020-01-04T13:22:19.000Z | #----------------------------------------------------------------------------------------------------#
# RS485 Communication using Python (Write) (hardware = USB2SERIAL) (Python 2.7.x) #
#----------------------------------------------------------------------------------------------------#
#Program runs on the PC side and transmits a character to the Serial Port @9600bps .Program uses #
#PySerial module to communicate with Serial Port of USB2SERIAL #
#----------------------------------------------------------------------------------------------------#
# BaudRate -> 9600 #
# Data formt -> 8 databits,No parity,1 Stop bit (8N1) #
# Flow Control -> None #
#----------------------------------------------------------------------------------------------------#
#====================================================================================================#
# www.xanthium.in #
# Copyright (C) 2015 Rahul.S #
#====================================================================================================#
#====================================================================================================#
# Interpreter/IDE : Python 2.7.x/IDLE #
# Module : PySerial # #
# OS : Windows(Windows 7)/Linux #
# Programmer : Rahul.S #
# Date : 31-March-2015 #
#====================================================================================================#
#====================================================================================================#
# Finding out the serial port number corresponding to your Computer #
#====================================================================================================#
# On Linux #
#----------------------------------------------------------------------------------------------------#
# USB2SERIAL will be detected as ttyUSB0 or ttyUSB1.You can check the port number of USB2SERIAL by #
# connecting you board to USB port and doing #
# dmesg | tail #
# and checking the output. #
#====================================================================================================#
#====================================================================================================#
# Running the Program #
#====================================================================================================#
# On Linux #
#----------------------------------------------------------------------------------------------------#
# Find out your serial port name and number corresponding to USB2SERIAL on your system.It will be- #
# -usually in the form of ttyUSB0 and ttyUSB1. #
# Open terminal and type the following command to invoke Python3.x interpretor #
# [user@linux:~$] sudo python RS485_Write.py #
# Give the password and then enter your portnumber when program asks ->/dev/ttyUSB0 #
#----------------------------------------------------------------------------------------------------#
# On Windows #
#----------------------------------------------------------------------------------------------------#
# Open the command prompt and type the following #
# C:\>python RS485_Write.py #
# Enter the COM number when program asks -> com31 #
#====================================================================================================#
import serial # import the module
def banner_top():
print ' +-------------------------------------------+'
print ' | USB2SERIAL RS485 Write in Python 2.7.x |'
print ' | (c) www.xanthium.in |'
print ' +-------------------------------------------+'
def Usage():
print ' | Windows -> COMxx eg COM32 |'
print ' | Linux ->/dev/ttyS* eg /dev/ttyUSB0 |'
print ' +-------------------------------------------+'
def banner_bottom():
print ' +-------------------------------------------+'
print ' | Press Any Key to Exit |'
print ' +-------------------------------------------+'
banner_top() # Display the top banner
Usage()
COM_PortName = raw_input('\n Enter the COM Port Name ->')
#Opening the serial port
COM_Port = serial.Serial(COM_PortName) # open the COM port
print '\n ',COM_PortName,'Opened'
COM_Port.baudrate = 9600 # set Baud rate
COM_Port.bytesize = 8 # Number of data bits = 8
COM_Port.parity = 'N' # No parity
COM_Port.stopbits = 1 # Number of Stop bits = 1
print '\n Baud rate = ',COM_Port.baudrate
print ' Data bits = ',COM_Port.bytesize
print ' Parity = ',COM_Port.parity
print ' Stop bits = ',COM_Port.stopbits
#Controlling DTR and RTS pins to put USB2SERIAL in transmit mode
COM_Port.setDTR(0) #DTR=0,~DTR=1 so DE = 1,Transmit mode enabled
COM_Port.setRTS(0) #RTS=0,~RTS=1 (In FT232 RTS and DTR pins are inverted)
print '\n DTR = 0,~DTR = 1 so DE = 1,Transmit mode enabled'
print ' RTS = 0,~RTS = 1'
#Write character 'A' to serial port
data = bytearray(b'A') # Convert Character to byte array
NoOfBytes = COM_Port.write(data) # Write data to serial port
print '\n ',NoOfBytes,' bytes written'
print '\n A written to',COM_PortName
COM_Port.close() # Close the Serial port
banner_bottom() # Display the bottom banner
dummy = raw_input() # press any key to close
| 65.572727 | 203 | 0.289477 |
509b32fb801516ac48803ee7c6a90164deca2ef0 | 234 | py | Python | setup.py | CousinoMath/mnist-dataset | 4043851e305d27c119b2abaf5896e0ced0968294 | [
"MIT"
] | null | null | null | setup.py | CousinoMath/mnist-dataset | 4043851e305d27c119b2abaf5896e0ced0968294 | [
"MIT"
] | null | null | null | setup.py | CousinoMath/mnist-dataset | 4043851e305d27c119b2abaf5896e0ced0968294 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(name="mnist",
version='0.0',
packages=find_packages(),
install_requires=('docutils', 'numpy', 'theano'),
package_data={
'': ['*.txt', '*.rst', '*.gz']
},
) | 23.4 | 53 | 0.58547 |
fac0d0274bfd8190a666e4fc77dcc8dba697b8a5 | 122 | py | Python | 20210912/docs/source/code/aiohttp/aiohttp_gs_misc.py | fin-py/events | a41ba1ce9e6212b5cc1cb24c5b82a2178ab544ed | [
"MIT"
] | 2 | 2021-09-12T04:22:27.000Z | 2021-11-27T14:21:44.000Z | 20210912/docs/source/code/aiohttp/aiohttp_gs_misc.py | fin-py/events | a41ba1ce9e6212b5cc1cb24c5b82a2178ab544ed | [
"MIT"
] | 13 | 2021-07-18T22:28:20.000Z | 2021-07-30T23:57:30.000Z | 20210912/docs/source/code/aiohttp/aiohttp_gs_misc.py | fin-py/events | a41ba1ce9e6212b5cc1cb24c5b82a2178ab544ed | [
"MIT"
] | 2 | 2021-03-31T06:03:16.000Z | 2021-09-02T13:16:55.000Z | from yarl import URL
url = URL('https://connpass.com/')
print(url / 'explore')
print(url / 'search' % {'q': 'aiohttp'}) | 20.333333 | 40 | 0.622951 |
7360f9cbe4c0fdb2b213d58b0f1e55c620a119c0 | 35 | py | Python | cgn/translator/__init__.py | FabianKP/cgn | 9963e60c4a4bf4f3869e43d1dfbe11da74887ba5 | [
"MIT"
] | 1 | 2022-03-21T00:40:23.000Z | 2022-03-21T00:40:23.000Z | cgn/translator/__init__.py | FabianKP/cgn | 9963e60c4a4bf4f3869e43d1dfbe11da74887ba5 | [
"MIT"
] | null | null | null | cgn/translator/__init__.py | FabianKP/cgn | 9963e60c4a4bf4f3869e43d1dfbe11da74887ba5 | [
"MIT"
] | null | null | null |
from .translator import Translator | 17.5 | 34 | 0.857143 |
5b691128a09dc0f1ce9e87ee50a2d4b532f3224b | 2,403 | py | Python | channel/tests/test_SenderChannel.py | lindhe/datx05-code | 988b53f7466c935728190336286fdf5d30838d76 | [
"MIT"
] | null | null | null | channel/tests/test_SenderChannel.py | lindhe/datx05-code | 988b53f7466c935728190336286fdf5d30838d76 | [
"MIT"
] | null | null | null | channel/tests/test_SenderChannel.py | lindhe/datx05-code | 988b53f7466c935728190336286fdf5d30838d76 | [
"MIT"
] | null | null | null | #!/bin/python3.6
# -*- coding: utf-8 -*-
#
# MIT License
#
# Copyright (c) 2018 Robert Gustafsson
# Copyright (c) 2018 Andreas Lindhé
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import unittest
import struct
import mock
import asyncio
import pytest
from unittest.mock import patch
from ..ppProtocol import PingPongMessage
from ..SenderChannel import SenderChannel
from .unittest_helper import run, mock_async_method
class CallbackObj:
async def callback(self):
return "test"
hw_addr = "00:00:00:00:00:00"
pingpong = SenderChannel(0, hw_addr, 0, CallbackObj(), "127.0.0.1", "5555")
token = (struct.pack("ii17s", 0, 1, hw_addr.encode()), "127.0.0.1")
class TestSenderChannel(unittest.TestCase):
@patch.object(pingpong.udp_sock, 'recvfrom', new=mock_async_method(return_value=token))
def test_receive_no_exception(self):
actual = run(pingpong.receive(token))
self.assertEqual((hw_addr.encode(), 0, 1, b''), actual)
pingpong.udp_sock.sock.close()
@patch.object(pingpong.udp_sock, 'recvfrom', new=mock_async_method(return_value=None))
@patch.object(pingpong.udp_sock, 'sendto', side_effect=Exception('timeout'))
def test_receive_with_exception(self, mock_send):
with self.assertRaises(Exception):
run(pingpong.receive(token))
pingpong.udp_sock.sock.close()
if __name__ == '__main__':
unittest.main()
| 38.758065 | 91 | 0.744486 |
cb605effd7e3b71c767f84da9b39cf3527d825d6 | 2,720 | py | Python | src/stactools/sentinel1_grd/properties.py | stactools-packages/sentinel1-grd | e58c906ce2014215c74f37592ac5ce3f1c5b28f8 | [
"Apache-2.0"
] | null | null | null | src/stactools/sentinel1_grd/properties.py | stactools-packages/sentinel1-grd | e58c906ce2014215c74f37592ac5ce3f1c5b28f8 | [
"Apache-2.0"
] | 1 | 2021-08-31T14:12:31.000Z | 2021-09-06T12:47:14.000Z | src/stactools/sentinel1_grd/properties.py | stactools-packages/sentinel1-grd | e58c906ce2014215c74f37592ac5ce3f1c5b28f8 | [
"Apache-2.0"
] | null | null | null | from stactools.core.io.xml import XmlElement
from pystac.extensions.sar import FrequencyBand, Polarization
from pystac.extensions.sat import OrbitState
def fill_sar_properties(sar_ext, href):
"""Fills the properties for SAR.
Based on the sar Extension.py
Args:
input_ext (pystac.extensions.sar.SarExtension): The extension to be populated.
href (str): The HREF to the scene, this is expected to be an XML file.
Returns:
pystac.Asset: An asset with the SAR relevant properties.
"""
# Read meta file
root = XmlElement.from_file(href)
# Fixed properties
sar_ext.frequency_band = FrequencyBand("C")
sar_ext.center_frequency = 5.405
sar_ext.looks_range = 5
sar_ext.looks_azimuth = 1
sar_ext.pixel_spacing_range = 10
# Read properties
sar_ext.instrument_mode = root.findall(".//s1sarl1:mode")[0].text
sar_ext.polarizations = [
Polarization(x.text)
for x in root.findall(".//s1sarl1:transmitterReceiverPolarisation")
]
sar_ext.product_type = root.findall(".//s1sarl1:productType")[0].text
def fill_sat_properties(sat_ext, href):
"""Fills the properties for SAR.
Based on the sar Extension.py
Args:
input_ext (pystac.extensions.sar.SarExtension): The extension to be populated.
href (str): The HREF to the scene, this is expected to be an XML file.
Returns:
pystac.Asset: An asset with the SAR relevant properties.
"""
# Read meta file
root = XmlElement.from_file(href)
sat_ext.platform_international_designator = root.findall(
".//safe:nssdcIdentifier")[0].text
orbit_state = root.findall(".//s1:pass")[0].text
sat_ext.orbit_state = OrbitState(orbit_state.lower())
sat_ext.absolute_orbit = int(root.findall(".//safe:orbitNumber")[0].text)
sat_ext.relative_orbit = int(
root.findall(".//safe:relativeOrbitNumber")[0].text)
def fill_proj_properties(proj_ext, meta_links, product_meta):
"""Fills the properties for SAR.
Based on the sar Extension.py
Args:
input_ext (pystac.extensions.sar.SarExtension): The extension to be populated.
href (str): The HREF to the scene, this is expected to be an XML file.
Returns:
pystac.Asset: An asset with the SAR relevant properties.
"""
# Read meta file
links = meta_links.create_product_asset()
root = XmlElement.from_file(links[0][1].href)
proj_ext.epsg = 4326
proj_ext.geometry = product_meta.geometry
proj_ext.bbox = product_meta.bbox
x_size = int(root.findall(".//numberOfSamples")[0].text)
y_size = int(root.findall(".//numberOfLines")[0].text)
proj_ext.shape = [x_size, y_size]
| 30.222222 | 86 | 0.690441 |
64500baba878f966281d8a0d7e103fa17c159bc0 | 3,223 | py | Python | app.py | dmdhrumilmistry/IOT-Cloud-API | ed8514afb0c1640a0f8a307ad53198098223e817 | [
"MIT"
] | null | null | null | app.py | dmdhrumilmistry/IOT-Cloud-API | ed8514afb0c1640a0f8a307ad53198098223e817 | [
"MIT"
] | null | null | null | app.py | dmdhrumilmistry/IOT-Cloud-API | ed8514afb0c1640a0f8a307ad53198098223e817 | [
"MIT"
] | 1 | 2022-03-23T14:35:45.000Z | 2022-03-23T14:35:45.000Z | '''
module: app
description:
-------------------------
API for IOT cloud
-------------------------
Accepts data from the node in form of json data and stores it in local icdb file
Stored Data Format in icdb file:
{
KEY : pus
t
{
NODE :
{
SENSOR : [(data, time, date)]
}
}
}
'''
from flask import Flask, jsonify, make_response, request, render_template
from database import DB
import config
import datetime
import os
from random import random
from time import time
import json
app = Flask(__name__)
app.config['ENV'] = 'development'
db_path = os.path.join(os.getcwd(), 'pushed_data')
db = DB(db_path)
key = "Test_Key"
def __save_pushed_data(data:dict) -> bool:
'''
description:
Saves pushed data from client to database
params:
data (dict) : data in form of dictionary
returns:
bool
'''
status = True
try:
dbdata = db.read_data()
node = data.get("node", "Err")
sensor = data.get("sensor", "Err")
sensor_data = data.get("sen_data", "Err")
if config.AUTH_KEY not in dbdata.keys():
dbdata[config.AUTH_KEY] = dict()
if node not in dbdata[config.AUTH_KEY].keys():
dbdata[config.AUTH_KEY][node] = dict()
if sensor not in dbdata[config.AUTH_KEY][node].keys():
dbdata[config.AUTH_KEY][node][sensor] = list()
time = datetime.datetime.now()
data_tuple = (str(time.strftime("%m %d %Y")), str(time.strftime("%H:%M:%S")), sensor_data)
dbdata[config.AUTH_KEY][node][sensor].append(data_tuple)
db.data = dbdata
db.write_data()
except Exception as e:
status = False
return status
@app.route('/', methods=['POST', 'GET'])
def home():
'''
description:
return Home page html code and status code
params:
None
returns:
Response, int
'''
# response = make_response("<h1>IOT Cloud API</h1>")
response = render_template("index.html")
return response, 200
@app.route(f'/{config.AUTH_KEY}/push_data', methods=['POST'])
def push_data():
'''
description:
handles client pushed json data from the node,
saves in the database, and returns status back
to the user in json format along with status code.
params:
None
returns:
Response, int
'''
if request.method == "POST":
try:
data = request.json
print(data)
return jsonify({"push_status":__save_pushed_data(data)}), 200
except Exception as e:
print(e)
return jsonify({'Error':'Invalid Data'}), 400
return jsonify({'Error':'Invalid Request'}), 400
@app.route('/data', methods=["GET", "POST"])
def get_data():
# data = {
# "temp": db.data["0"]["temp"][-1],
# "humidi"
# }
# db.data["0"]["temp"]
temp = db.data[key]["0"]["temp"][-1][-1]
humid = db.data[key]["0"]["humidity"][-1][-1]
data = [time() * 1000, temp, humid]
response = make_response(json.dumps(data))
response.content_type = 'application/json'
return response
| 24.416667 | 98 | 0.568415 |
c241f394f4b0ad94299737ea42fc20c2bc75e34f | 7,720 | py | Python | polyA/_options.py | TravisWheelerLab/polyA | cbab7f2416066fd24340913fbf5328fb36432131 | [
"BSD-3-Clause"
] | 3 | 2021-01-15T11:39:30.000Z | 2021-01-26T07:28:32.000Z | polyA/_options.py | TravisWheelerLab/polyA | cbab7f2416066fd24340913fbf5328fb36432131 | [
"BSD-3-Clause"
] | 21 | 2020-12-09T23:07:43.000Z | 2021-09-23T03:05:35.000Z | polyA/_options.py | TravisWheelerLab/polyA | cbab7f2416066fd24340913fbf5328fb36432131 | [
"BSD-3-Clause"
] | null | null | null | from argparse import ArgumentParser, Namespace
from typing import List, Optional
from . import __version__
from .constants import (
DEFAULT_CHUNK_SIZE,
DEFAULT_SHARD_GAP,
DEFAULT_TRANS_PENALTY,
)
class Options:
"""
A typed container to hold program options and parameters.
>>> o = Options()
>>> o.log_file_path
''
>>> o = Options(["NONE", "NONE", "--log-file", "foo.txt"])
>>> o.log_file_path
'foo.txt'
"""
alignments_file_path: str
sub_matrices_path: str
# -----------------------
# Algorithm Configuration
# -----------------------
chunk_size: int
trans_penalty: int
confidence: bool
prior_counts_path: str
shard_gap: int
sequence_file_path: str
ultra_data_path: str
complexity_adjustment: bool
# -------------------
# Helper applications
# -------------------
easel_path: str
ultra_path: str
# --------------------
# Output configuration
# --------------------
ids: bool
log_file_path: str
log_level: str # Literal["debug", "verbose", "normal", "quiet"]
matrix_position: bool
output_path: str
output_to_file: bool
sequence_position: bool
soda: bool
# ----------------
# File Conversions
# ----------------
cm_to_stockholm: str
rm_to_stockholm: str
def __init__(self, args: Optional[List[str]] = None) -> None:
parser = ArgumentParser(
description="PolyA sequence adjudication tool",
prog=__package__,
)
parser.add_argument(
"alignments_file_path",
metavar="ALIGNMENTS",
nargs="?",
default="",
help="alignments file in Stockholm format",
)
parser.add_argument(
"sub_matrices_path",
metavar="MATRICES",
nargs="?",
default="",
help="substitution matrices file in PolyA matrix format",
)
parser.add_argument(
"-v",
"--version",
action="version",
version=__version__,
help="show version and exit",
)
parser.add_argument(
"--chunk-size",
type=int,
default=DEFAULT_CHUNK_SIZE,
help="size of the window in base pairs analyzed together",
)
parser.add_argument(
"--trans-penalty",
type=int,
default=DEFAULT_TRANS_PENALTY,
help="penalty for changing annotations",
)
parser.add_argument(
"--confidence",
action="store_true",
default=False,
help="run the confidence calculation and then exit",
)
parser.add_argument(
"--prior-counts",
metavar="FILE",
default="",
help="file containing query genomic counts",
)
parser.add_argument(
"--shard-gap",
type=int,
default=DEFAULT_SHARD_GAP,
help="maximum alignment gap before sharding occurs",
)
parser.add_argument(
"--sequences",
metavar="SEQS",
default="",
help="fasta file for running ULTRA",
)
parser.add_argument(
"--ultra-data",
metavar="FILE",
default="",
help="file of the output from ULTRA",
)
parser.add_argument(
"--easel-path",
metavar="BIN",
default="esl_scorematrix",
help="path to the esl_scorematrix program, if necessary (assumed to be in PATH)",
)
parser.add_argument(
"--ultra-path",
metavar="BIN",
default="ultra",
help="path to the ULTRA binary to use, if necessary (assumed to be in PATH)",
)
parser.add_argument(
"--ids",
action="store_true",
default=False,
help="include internal (random) annotation IDs in output",
)
parser.add_argument(
"--log-file",
metavar="LOG",
default="",
help="file to store log output in, defaults to stderr",
)
parser.add_argument(
"--log-level",
metavar="LEVEL",
choices=["debug", "verbose", "normal", "quiet"],
help="logging level to use, 'debug' is the most noisy",
)
parser.add_argument(
"--matrix-position",
action="store_true",
default=False,
help="produce output in terms of the matrix position",
)
parser.add_argument(
"--output-path",
metavar="PATH",
default=".",
help="directory to write output files to, defaults to working directory",
)
parser.add_argument(
"--output-to-file",
action="store_true",
default=False,
help="write output to a file in the output path",
)
parser.add_argument(
"--sequence-position",
action="store_true",
default=False,
help="produce output in terms of the target sequence position",
)
parser.add_argument(
"--soda",
action="store_true",
default=False,
help="write a SODA visualization file to the output directory",
)
parser.add_argument(
"--complexity-adjustment",
action="store_true",
default=False,
help="use complexity adjusted scoring",
)
parser.add_argument(
"--cm-to-stockholm",
metavar="FILE",
default="",
help="convert a file in CrossMatch format to PolyA's Stockholm format",
)
parser.add_argument(
"--rm-to-stockholm",
metavar="FILE",
default="",
help="convert a file in CrossMatch format to PolyA's Stockholm format",
)
namespace: Namespace
if args is None:
namespace = parser.parse_args(args=["NONE", "NONE"])
else:
namespace = parser.parse_args(args=args)
self.alignments_file_path = namespace.alignments_file_path
self.sub_matrices_path = namespace.sub_matrices_path
self.chunk_size = namespace.chunk_size
self.trans_penalty = namespace.trans_penalty
self.confidence = namespace.confidence
self.prior_counts_path = namespace.prior_counts
self.shard_gap = namespace.shard_gap
self.sequence_file_path = namespace.sequences
self.ultra_data_path = namespace.ultra_data
self.complexity_adjustment = namespace.complexity_adjustment
self.easel_path = namespace.easel_path
self.ultra_path = namespace.ultra_path
self.ids = namespace.ids
self.log_file_path = namespace.log_file
self.log_level = namespace.log_level
self.matrix_position = namespace.matrix_position
self.output_path = namespace.output_path
self.output_to_file = namespace.output_to_file
self.sequence_position = namespace.sequence_position
self.soda = namespace.soda
self.cm_to_stockholm = namespace.cm_to_stockholm
self.rm_to_stockholm = namespace.rm_to_stockholm
if not (self.cm_to_stockholm or self.rm_to_stockholm):
if not (self.alignments_file_path and self.alignments_file_path):
parser.error(
"ALIGNMENTS and MATRICES and required unless using a converter"
)
| 29.922481 | 93 | 0.549352 |
571436ce5f119b8ce88f6292acfaef7b33fb0092 | 549 | py | Python | angr/exploration_techniques/lengthlimiter.py | delia0204/angr | 0fd71a73d36b8a6e441634d21bad947c7e5a7def | [
"BSD-2-Clause"
] | null | null | null | angr/exploration_techniques/lengthlimiter.py | delia0204/angr | 0fd71a73d36b8a6e441634d21bad947c7e5a7def | [
"BSD-2-Clause"
] | null | null | null | angr/exploration_techniques/lengthlimiter.py | delia0204/angr | 0fd71a73d36b8a6e441634d21bad947c7e5a7def | [
"BSD-2-Clause"
] | 1 | 2019-08-07T01:42:01.000Z | 2019-08-07T01:42:01.000Z | from . import ExplorationTechnique
class LengthLimiter(ExplorationTechnique):
"""
Length limiter on paths.
"""
def __init__(self, max_length, drop=False):
super(LengthLimiter, self).__init__()
self._max_length = max_length
self._drop = drop
def _filter(self, s):
return s.history.block_count > self._max_length
def step(self, pg, stash, **kwargs):
pg = pg._one_step(stash=stash, **kwargs)
pg.move('active', '_DROP' if self._drop else 'cut', self._filter)
return pg
| 27.45 | 73 | 0.641166 |
d31dc92175cc9d4141735541b2c3a4569c6303ad | 121 | py | Python | computer_science/big_o/example7.py | LeandroTk/Algorithms | 569ed68eba3eeff902f8078992099c28ce4d7cd6 | [
"MIT"
] | 205 | 2018-12-01T17:49:49.000Z | 2021-12-22T07:02:27.000Z | computer_science/big_o/example7.py | LeandroTk/Algorithms | 569ed68eba3eeff902f8078992099c28ce4d7cd6 | [
"MIT"
] | 2 | 2020-01-01T16:34:29.000Z | 2020-04-26T19:11:13.000Z | computer_science/big_o/example7.py | LeandroTk/Algorithms | 569ed68eba3eeff902f8078992099c28ce4d7cd6 | [
"MIT"
] | 50 | 2018-11-28T20:51:36.000Z | 2021-11-29T04:08:25.000Z | # O(N + P), if P < N / 2 --> O(N)
# O(2N) --> O(N)
# O(N + logN) --> O(N)
# O(N + M), if N > M then O(N), otherwise O(M)
| 24.2 | 46 | 0.38843 |
baba6f0fd5675ebdfcf541e9f71eaba73584f8d7 | 78 | py | Python | PYSTUDY/jsonlib.py | shi-cong/review | c8da7128ea18ecaa5849f2066d321e70d6f97f70 | [
"Apache-2.0"
] | 8 | 2017-10-22T00:24:42.000Z | 2017-11-24T01:23:52.000Z | PYSTUDY/jsonlib.py | shi-cong/review | c8da7128ea18ecaa5849f2066d321e70d6f97f70 | [
"Apache-2.0"
] | 2 | 2017-10-12T22:04:25.000Z | 2017-10-12T23:43:48.000Z | PYSTUDY/jsonlib.py | shi-cong/review | c8da7128ea18ecaa5849f2066d321e70d6f97f70 | [
"Apache-2.0"
] | null | null | null | """
json模块
"""
import json
loads = json.loads # 从字符
dumps = json.dumps # 从字典
| 9.75 | 24 | 0.641026 |
af279bd6db21259a8d36e9fa01c85c6618d88bc7 | 383 | py | Python | pynasl/exceptions.py | kafti/pynasl | e0e9a7834a03139b39ee10e33b9316cc22844efb | [
"MIT"
] | 6 | 2015-05-06T14:28:46.000Z | 2022-01-21T14:37:47.000Z | pynasl/exceptions.py | kafti/pynasl | e0e9a7834a03139b39ee10e33b9316cc22844efb | [
"MIT"
] | null | null | null | pynasl/exceptions.py | kafti/pynasl | e0e9a7834a03139b39ee10e33b9316cc22844efb | [
"MIT"
] | 4 | 2015-06-18T07:32:18.000Z | 2019-09-30T11:58:04.000Z | #-------------------------------------------------------------------------------
# Copyright (c) 2011, Kafti team
#
# Released under the MIT license. See the LICENSE file for details.
#-------------------------------------------------------------------------------
class LexicalError(Exception):
"""
An Exception indicating a lexical error in script.
"""
pass
| 29.461538 | 80 | 0.386423 |
76dc75cbb8164176479edd1f4ce37ff50941aba0 | 4,959 | py | Python | shaman_project/bbo/heuristics/genetic_algorithm/genetic_algorithm.py | ValentinGaut/shaman | 754e9eef3c097f3e58b0f06e7c08716bc1b11edd | [
"Apache-2.0"
] | null | null | null | shaman_project/bbo/heuristics/genetic_algorithm/genetic_algorithm.py | ValentinGaut/shaman | 754e9eef3c097f3e58b0f06e7c08716bc1b11edd | [
"Apache-2.0"
] | null | null | null | shaman_project/bbo/heuristics/genetic_algorithm/genetic_algorithm.py | ValentinGaut/shaman | 754e9eef3c097f3e58b0f06e7c08716bc1b11edd | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 BULL SAS All rights reserved
"""Implementation of the genetic algorithm as an heuristic for black-box
optimization."""
# Ignore unused argument kwargs
# pylint: disable=unused-argument
import numpy as np
from bbo.heuristics.heuristics import Heuristic
class GeneticAlgorithm(Heuristic):
"""Object that will perform the genetic algorithm.
Inherits from the mother class Heuristic.
"""
def __init__(
self,
selection_method,
crossover_method,
mutation_method,
mutation_rate,
*args,
max_repeat=5,
**kwargs,
):
"""Initializes a GeneticAlgorithm object.
Args:
selection_method (Python function): The method to use in order
to select the two parents chromosomes.
crossover_method (Python function): The method to use to mate
the parents and cross their alleles.
mutation_method (Python function): The method to use to perform
a mutation on a given chromosome.
mutation_rate (float): A float between 0 and 1 to determine the
probability of mutation at each round.
max_repeat (int): The maximum of repetitions allowed when looking
for a new child *args, **kwargs: The arguments for the
selection of the fittest parent.
"""
# Initialization of the mother class
super(GeneticAlgorithm, self).__init__(
selection_method, crossover_method, mutation_method
)
# save selection method
self.selection_method = selection_method
# save crossover method
self.crossover_method = crossover_method
# save mutation method
self.mutation_method = mutation_method
# save mutation rate
self.mutation_rate = mutation_rate
# set number of mutation to 0
self.nbr_mutation = 0
# save maximum repetition to find new offspring
self.max_repeat = max_repeat
# save as a list of tuples the parents and their offspring,
# using the (parent_1, parent_2, offspring) notation
self.family_line = list()
# save args and kwargs
self.args = args
self.kwargs = kwargs
def choose_next_parameter(self, history, ranges, *args, **kwargs):
"""Select the next parameters for the optimization process, in this
case the children of the two parents selected as the fittest.
A genetic algorithm has the following rule for choosing the
next parameter:
1) Use a selection method to pick two parents fit for mating
2) Use a crossover method to mate those two parents
3) Probabilistically determine whether or not the mutation method
should be applied.
Args:
history (dict): the history of the optimization, i.e. the tested
parameters and the associated value.
ranges (numpy array of numpy arrays): the possible values of each
parameter dimension.
Returns:
numpy array: The next parameter, i.e. the child born from the
reproduction of the two parents.
"""
idx = 0
# loop until the child is different from its two parents
while True and idx < self.max_repeat:
# Select two parents using the selection method
parent_1, parent_2 = self.selection_method(
history=history, *self.args, **self.kwargs
)
# Mate those two parents to compute a new child
child = self.crossover_method(parent_1, parent_2)
# Is there a mutation at this round? Compute the probability
# using a bernouilli random
# variable
mutation = np.random.binomial(1, self.mutation_rate)
# If so, perform mutation on the child and return the mutant form
if mutation:
child = self.mutation_method(child, ranges)
self.nbr_mutation += 1
if not np.array_equal(child, parent_1) and not np.array_equal(
child, parent_2
):
break
idx += 1
self.family_line.append((parent_1, parent_2, child))
return child
def summary(self, *args, **kwargs):
"""Returns a summary of the optimization process of the genetic
algorithm:
- A description of the 'family line', using the format:
(parent_1, parent_2, child)
- The number of mutations
"""
print(f"Number of mutations: {self.nbr_mutation}")
# graphical representation of the family tree
print("Family tree:")
for family in self.family_line:
print(f"{family[0]} + {family[1]}")
print(f"|_> {family[2]}")
def reset(self):
"""Resets the algorithm."""
| 38.146154 | 77 | 0.616052 |
8e8e53521f3157287dff652c6fd9d2c0a5f3e425 | 21,268 | py | Python | returns/context/requires_context_result.py | nurumaik/returns | 7e2058162192b532cdf0243a3463bdd508077bde | [
"BSD-2-Clause"
] | null | null | null | returns/context/requires_context_result.py | nurumaik/returns | 7e2058162192b532cdf0243a3463bdd508077bde | [
"BSD-2-Clause"
] | null | null | null | returns/context/requires_context_result.py | nurumaik/returns | 7e2058162192b532cdf0243a3463bdd508077bde | [
"BSD-2-Clause"
] | null | null | null | from typing import (
TYPE_CHECKING,
Any,
Callable,
ClassVar,
Iterable,
Sequence,
TypeVar,
)
from typing_extensions import final
from returns._generated.iterable import iterable_kind
from returns.context import NoDeps
from returns.interfaces import iterable
from returns.interfaces.specific import reader, result
from returns.primitives.container import BaseContainer
from returns.primitives.hkt import Kind3, SupportsKind3, dekind
from returns.result import Failure, Result, Success
if TYPE_CHECKING:
from returns.context.requires_context import RequiresContext
# Context:
_EnvType = TypeVar('_EnvType', contravariant=True)
_NewEnvType = TypeVar('_NewEnvType')
# Result:
_ValueType = TypeVar('_ValueType', covariant=True)
_NewValueType = TypeVar('_NewValueType')
_ErrorType = TypeVar('_ErrorType', covariant=True)
_NewErrorType = TypeVar('_NewErrorType')
# Helpers:
_FirstType = TypeVar('_FirstType')
@final
class RequiresContextResult(
BaseContainer,
SupportsKind3['RequiresContextResult', _ValueType, _ErrorType, _EnvType],
reader.ReaderBased3[_ValueType, _ErrorType, _EnvType],
result.ResultBased3[_ValueType, _ErrorType, _EnvType],
iterable.Iterable3[_ValueType, _ErrorType, _EnvType],
):
"""
The ``RequiresContextResult`` combinator.
See :class:`returns.context.requires_context.RequiresContext` for more docs.
This is just a handy wrapper around ``RequiresContext[env, Result[a, b]]``
which represents a context-dependent pure operation
that might fail and return :class:`returns.result.Result`.
It has several important differences from the regular ``Result`` classes.
It does not have ``Success`` and ``Failure`` subclasses.
Because, the computation is not yet performed.
And we cannot know the type in advance.
So, this is a thin wrapper, without any changes in logic.
Why do we need this wrapper? That's just for better usability!
.. code:: python
>>> from returns.context import RequiresContext
>>> from returns.result import Success, Result
>>> def function(arg: int) -> Result[int, str]:
... return Success(arg + 1)
>>> # Without wrapper:
>>> assert RequiresContext.from_value(Success(1)).map(
... lambda result: result.bind(function),
... )(...) == Success(2)
>>> # With wrapper:
>>> assert RequiresContextResult.from_value(1).bind_result(
... function,
... )(...) == Success(2)
This way ``RequiresContextResult`` allows to simply work with:
- raw values and pure functions
- ``RequiresContext`` values and pure functions returning it
- ``Result`` and functions returning it
Important implementation detail:
due it is meaning, ``RequiresContextResult``
cannot have ``Success`` and ``Failure`` subclasses.
We only have just one type. That's by design.
Different converters are also not supported for this type.
Use converters inside the ``RequiresContext`` context, not outside.
See also:
https://dev.to/gcanti/getting-started-with-fp-ts-reader-1ie5
https://en.wikipedia.org/wiki/Lazy_evaluation
https://bit.ly/2R8l4WK
https://bit.ly/2RwP4fp
"""
#: This field has an extra 'RequiresContext' just because `mypy` needs it.
_inner_value: Callable[
['RequiresContextResult', _EnvType],
Result[_ValueType, _ErrorType],
]
#: A convinient placeholder to call methods created by `.from_value()`.
empty: ClassVar[NoDeps] = object()
def __init__(
self,
inner_value: Callable[[_EnvType], Result[_ValueType, _ErrorType]],
) -> None:
"""
Public constructor for this type. Also required for typing.
Only allows functions of kind ``* -> *``
and returning :class:`returns.result.Result` instances.
.. code:: python
>>> from returns.context import RequiresContextResult
>>> from returns.result import Success
>>> str(RequiresContextResult(lambda deps: Success(deps + 1)))
'<RequiresContextResult: <function <lambda> at ...>>'
"""
super().__init__(inner_value)
def __call__(self, deps: _EnvType) -> Result[_ValueType, _ErrorType]:
"""
Evaluates the wrapped function.
.. code:: python
>>> from returns.context import RequiresContextResult
>>> from returns.result import Success
>>> def first(lg: bool) -> RequiresContextResult[int, str, float]:
... # `deps` has `float` type here:
... return RequiresContextResult(
... lambda deps: Success(deps if lg else -deps),
... )
>>> instance = first(False)
>>> assert instance(3.5) == Success(-3.5)
In other things, it is a regular Python magic method.
"""
return self._inner_value(deps)
def swap(self) -> 'RequiresContextResult[_ErrorType, _ValueType, _EnvType]':
"""
Swaps value and error types.
So, values become errors and errors become values.
It is useful when you have to work with errors a lot.
And since we have a lot of ``.bind_`` related methods
and only a single ``.rescue`` - it is easier to work with values.
.. code:: python
>>> from returns.context import RequiresContextResult
>>> from returns.result import Failure, Success
>>> success = RequiresContextResult.from_value(1)
>>> failure = RequiresContextResult.from_failure(1)
>>> assert success.swap()(...) == Failure(1)
>>> assert failure.swap()(...) == Success(1)
"""
return RequiresContextResult(lambda deps: self(deps).swap())
def map( # noqa: WPS125
self, function: Callable[[_ValueType], _NewValueType],
) -> 'RequiresContextResult[_NewValueType, _ErrorType, _EnvType]':
"""
Composes successful container with a pure function.
.. code:: python
>>> from returns.context import RequiresContextResult
>>> from returns.result import Success, Failure
>>> assert RequiresContextResult.from_value(1).map(
... lambda x: x + 1,
... )(...) == Success(2)
>>> assert RequiresContextResult.from_failure(1).map(
... lambda x: x + 1,
... )(...) == Failure(1)
"""
return RequiresContextResult(lambda deps: self(deps).map(function))
def apply(
self,
container: Kind3[
'RequiresContextResult',
Callable[[_ValueType], _NewValueType],
_ErrorType,
_EnvType,
],
) -> 'RequiresContextResult[_NewValueType, _ErrorType, _EnvType]':
"""
Calls a wrapped function in a container on this container.
.. code:: python
>>> from returns.context import RequiresContextResult
>>> from returns.result import Success, Failure, Result
>>> def transform(arg: str) -> str:
... return arg + 'b'
>>> assert RequiresContextResult.from_value('a').apply(
... RequiresContextResult.from_value(transform),
... )(...) == Success('ab')
>>> assert RequiresContextResult.from_failure('a').apply(
... RequiresContextResult.from_value(transform),
... )(...) == Failure('a')
>>> assert isinstance(RequiresContextResult.from_value('a').apply(
... RequiresContextResult.from_failure(transform),
... )(...), Result.failure_type) is True
"""
return RequiresContextResult(
lambda deps: self(deps).apply(dekind(container)(deps)),
)
def bind(
self,
function: Callable[
[_ValueType],
Kind3[
'RequiresContextResult',
_NewValueType,
_ErrorType,
_EnvType,
],
],
) -> 'RequiresContextResult[_NewValueType, _ErrorType, _EnvType]':
"""
Composes this container with a function returning the same type.
.. code:: python
>>> from returns.context import RequiresContextResult
>>> from returns.result import Success, Failure
>>> def first(lg: bool) -> RequiresContextResult[int, int, float]:
... # `deps` has `float` type here:
... return RequiresContextResult(
... lambda deps: Success(deps) if lg else Failure(-deps),
... )
>>> def second(
... number: int,
... ) -> RequiresContextResult[str, int, float]:
... # `deps` has `float` type here:
... return RequiresContextResult(
... lambda deps: Success('>=' if number >= deps else '<'),
... )
>>> assert first(True).bind(second)(1) == Success('>=')
>>> assert first(False).bind(second)(2) == Failure(-2)
"""
return RequiresContextResult(
lambda deps: self(deps).bind(
lambda inner: function(inner)(deps), # type: ignore
),
)
def bind_result(
self,
function: Callable[[_ValueType], Result[_NewValueType, _ErrorType]],
) -> 'RequiresContextResult[_NewValueType, _ErrorType, _EnvType]':
"""
Binds ``Result`` returning function to current container.
.. code:: python
>>> from returns.context import RequiresContextResult
>>> from returns.result import Success, Failure, Result
>>> def function(num: int) -> Result[str, int]:
... return Success(num + 1) if num > 0 else Failure('<0')
>>> assert RequiresContextResult.from_value(1).bind_result(
... function,
... )(RequiresContextResult.empty) == Success(2)
>>> assert RequiresContextResult.from_value(0).bind_result(
... function,
... )(RequiresContextResult.empty) == Failure('<0')
>>> assert RequiresContextResult.from_failure(':(').bind_result(
... function,
... )(RequiresContextResult.empty) == Failure(':(')
"""
return RequiresContextResult(lambda deps: self(deps).bind(function))
def bind_context(
self,
function: Callable[
[_ValueType],
'RequiresContext[_NewValueType, _EnvType]',
],
) -> 'RequiresContextResult[_NewValueType, _ErrorType, _EnvType]':
"""
Binds ``RequiresContext`` returning function to current container.
.. code:: python
>>> from returns.context import RequiresContext
>>> from returns.result import Success, Failure
>>> def function(arg: int) -> RequiresContext[int, str]:
... return RequiresContext(lambda deps: len(deps) + arg)
>>> assert function(2)('abc') == 5
>>> assert RequiresContextResult.from_value(2).bind_context(
... function,
... )('abc') == Success(5)
>>> assert RequiresContextResult.from_failure(2).bind_context(
... function,
... )('abc') == Failure(2)
"""
return RequiresContextResult(
lambda deps: self(deps).map(
lambda inner: function(inner)(deps), # type: ignore
),
)
def alt(
self, function: Callable[[_ErrorType], _NewErrorType],
) -> 'RequiresContextResult[_ValueType, _NewErrorType, _EnvType]':
"""
Composes failed container with a pure function.
.. code:: python
>>> from returns.context import RequiresContextResult
>>> from returns.result import Success, Failure
>>> assert RequiresContextResult.from_value(1).alt(
... lambda x: x + 1,
... )(...) == Success(1)
>>> assert RequiresContextResult.from_failure(1).alt(
... lambda x: x + 1,
... )(...) == Failure(2)
"""
return RequiresContextResult(lambda deps: self(deps).alt(function))
def rescue(
self,
function: Callable[
[_ErrorType],
Kind3[
'RequiresContextResult',
_ValueType,
_NewErrorType,
_EnvType,
],
],
) -> 'RequiresContextResult[_ValueType, _NewErrorType, _EnvType]':
"""
Composes this container with a function returning the same type.
.. code:: python
>>> from returns.context import RequiresContextResult
>>> from returns.result import Success, Failure
>>> def rescuable(arg: str) -> RequiresContextResult[str, str, str]:
... if len(arg) > 1:
... return RequiresContextResult(
... lambda deps: Success(deps + arg),
... )
... return RequiresContextResult(
... lambda deps: Failure(arg + deps),
... )
>>> assert RequiresContextResult.from_value('a').rescue(
... rescuable,
... )('c') == Success('a')
>>> assert RequiresContextResult.from_failure('a').rescue(
... rescuable,
... )('c') == Failure('ac')
>>> assert RequiresContextResult.from_failure('aa').rescue(
... rescuable,
... )('b') == Success('baa')
"""
return RequiresContextResult(
lambda deps: self(deps).rescue(
lambda inner: function(inner)(deps), # type: ignore
),
)
def modify_env(
self,
function: Callable[[_NewEnvType], _EnvType],
) -> 'RequiresContextResult[_ValueType, _ErrorType, _NewEnvType]':
"""
Allows to modify the environment type.
.. code:: python
>>> from returns.context import RequiresContextResultE
>>> from returns.result import Success, safe
>>> def div(arg: int) -> RequiresContextResultE[float, int]:
... return RequiresContextResultE(
... safe(lambda deps: arg / deps),
... )
>>> assert div(3).modify_env(int)('2') == Success(1.5)
>>> assert div(3).modify_env(int)('0').failure()
"""
return RequiresContextResult(lambda deps: self(function(deps)))
@classmethod
def ask(cls) -> 'RequiresContextResult[_EnvType, _ErrorType, _EnvType]':
"""
Is used to get the current dependencies inside the call stack.
Similar to :meth:`returns.context.requires_context.RequiresContext.ask`,
but returns ``Result`` instead of a regular value.
Please, refer to the docs there to learn how to use it.
One important note that is worth duplicating here:
you might need to provide ``_EnvType`` explicitly,
so ``mypy`` will know about it statically.
.. code:: python
>>> from returns.context import RequiresContextResultE
>>> from returns.result import Success
>>> assert RequiresContextResultE[int, int].ask().map(
... str,
... )(1) == Success('1')
"""
return RequiresContextResult(Success)
@classmethod
def from_result(
cls, inner_value: Result[_ValueType, _ErrorType],
) -> 'RequiresContextResult[_ValueType, _ErrorType, NoDeps]':
"""
Creates new container with ``Result`` as a unit value.
.. code:: python
>>> from returns.context import RequiresContextResult
>>> from returns.result import Success, Failure
>>> deps = RequiresContextResult.empty
>>> assert RequiresContextResult.from_result(
... Success(1),
... )(deps) == Success(1)
>>> assert RequiresContextResult.from_result(
... Failure(1),
... )(deps) == Failure(1)
"""
return RequiresContextResult(lambda _: inner_value)
@classmethod
def from_typecast(
cls,
inner_value:
'RequiresContext[Result[_NewValueType, _NewErrorType], _EnvType]',
) -> 'RequiresContextResult[_NewValueType, _NewErrorType, _EnvType]':
"""
You might end up with ``RequiresContext[Result[...]]`` as a value.
This method is designed to turn it into ``RequiresContextResult``.
It will save all the typing information.
It is just more useful!
.. code:: python
>>> from returns.context import RequiresContext
>>> from returns.result import Success, Failure
>>> assert RequiresContextResult.from_typecast(
... RequiresContext.from_value(Success(1)),
... )(RequiresContextResult.empty) == Success(1)
>>> assert RequiresContextResult.from_typecast(
... RequiresContext.from_value(Failure(1)),
... )(RequiresContextResult.empty) == Failure(1)
"""
return RequiresContextResult(inner_value)
@classmethod
def from_context(
cls, inner_value: 'RequiresContext[_FirstType, _EnvType]',
) -> 'RequiresContextResult[_FirstType, Any, _EnvType]':
"""
Creates new container from ``RequiresContext`` as a success unit.
.. code:: python
>>> from returns.context import RequiresContext
>>> from returns.result import Success
>>> assert RequiresContextResult.from_context(
... RequiresContext.from_value(1),
... )(...) == Success(1)
"""
return RequiresContextResult(lambda deps: Success(inner_value(deps)))
@classmethod
def from_failed_context(
cls, inner_value: 'RequiresContext[_FirstType, _EnvType]',
) -> 'RequiresContextResult[Any, _FirstType, _EnvType]':
"""
Creates new container from ``RequiresContext`` as a failure unit.
.. code:: python
>>> from returns.context import RequiresContext
>>> from returns.result import Failure
>>> assert RequiresContextResult.from_failed_context(
... RequiresContext.from_value(1),
... )(...) == Failure(1)
"""
return RequiresContextResult(lambda deps: Failure(inner_value(deps)))
@classmethod
def from_value(
cls, inner_value: _FirstType,
) -> 'RequiresContextResult[_FirstType, Any, NoDeps]':
"""
Creates new container with ``Success(inner_value)`` as a unit value.
.. code:: python
>>> from returns.context import RequiresContextResult
>>> from returns.result import Success
>>> assert RequiresContextResult.from_value(1)(...) == Success(1)
"""
return RequiresContextResult(lambda _: Success(inner_value))
@classmethod
def from_failure(
cls, inner_value: _FirstType,
) -> 'RequiresContextResult[Any, _FirstType, NoDeps]':
"""
Creates new container with ``Failure(inner_value)`` as a unit value.
.. code:: python
>>> from returns.context import RequiresContextResult
>>> from returns.result import Failure
>>> assert RequiresContextResult.from_failure(1)(...) == Failure(1)
"""
return RequiresContextResult(lambda _: Failure(inner_value))
@classmethod
def from_iterable(
cls,
inner_value:
Iterable[
Kind3[
'RequiresContextResult',
_ValueType,
_ErrorType,
_EnvType,
],
],
) -> 'RequiresContextResult[Sequence[_ValueType], _ErrorType, _EnvType]':
"""
Transforms an iterable of ``RequiresContextResult`` containers.
Returns a single container with multiple elements inside.
.. code:: python
>>> from returns.context import RequiresContextResult
>>> from returns.result import Success, Failure
>>> assert RequiresContextResult.from_iterable([
... RequiresContextResult.from_value(1),
... RequiresContextResult.from_value(2),
... ])(...) == Success((1, 2))
>>> assert RequiresContextResult.from_iterable([
... RequiresContextResult.from_value(1),
... RequiresContextResult.from_failure('a'),
... ])(...) == Failure('a')
>>> assert RequiresContextResult.from_iterable([
... RequiresContextResult.from_failure('a'),
... RequiresContextResult.from_value(1),
... ])(...) == Failure('a')
"""
return dekind(iterable_kind(cls, inner_value))
# Aliases:
#: Alias for a popular case when ``Result`` has ``Exception`` as error type.
RequiresContextResultE = RequiresContextResult[
_ValueType, Exception, _EnvType,
]
#: Alias to save you some typing. Uses original name from Haskell.
ReaderResult = RequiresContextResult
#: Alias to save you some typing. Has ``Exception`` as error type.
ReaderResultE = RequiresContextResult[_ValueType, Exception, _EnvType]
| 33.23125 | 80 | 0.591781 |
ba26069bf439820986a841df4e9fba10364c2283 | 583 | py | Python | python/API_test/test.py | GG-yuki/bugs | aabd576e9e57012a3390007af890b7c6ab6cdda8 | [
"MIT"
] | null | null | null | python/API_test/test.py | GG-yuki/bugs | aabd576e9e57012a3390007af890b7c6ab6cdda8 | [
"MIT"
] | null | null | null | python/API_test/test.py | GG-yuki/bugs | aabd576e9e57012a3390007af890b7c6ab6cdda8 | [
"MIT"
] | null | null | null | # import numpy as np
# import matplotlib.pyplot as plt
#
# def f(t):
# return np.exp(-t) * np.cos(2*np.pi*t)
#
# t1 = np.arange(0.0, 5.0, 0.1)
# t2 = np.arange(0.0, 5.0, 0.02)
#
# plt.figure("2suplot")
# plt.subplot(211)
# plt.plot(t1, f(t1), 'bo', t2, f(t2), 'k')
#
# plt.subplot(212)
# plt.plot(t2, np.cos(2*np.pi*t2), 'r--')
# plt.show()
# plt.figure("2suplot222")
# plt.subplot(211)
# plt.plot(t1, f(t1), 'bo', t2, f(t2), 'k')
#
# plt.subplot(212)
# plt.plot(t2, np.cos(2*np.pi*t2), 'r--')
# plt.show()
if __name__ == '__main__':
print('程序自身在运行')
else:
print('我来自另一模块') | 21.592593 | 43 | 0.572899 |
0f39f7c1fb1495d797daa94ba4ab0c135d5992ab | 15,568 | py | Python | cbuild_config.py | couchbase/couchbase-python-client | 99ec055835f5aef0cd07905497b3ab4bb3cbbc32 | [
"Apache-2.0"
] | 189 | 2015-01-07T18:34:31.000Z | 2022-03-21T17:41:56.000Z | cbuild_config.py | couchbase/couchbase-python-client | 99ec055835f5aef0cd07905497b3ab4bb3cbbc32 | [
"Apache-2.0"
] | 24 | 2015-05-19T14:00:16.000Z | 2022-03-16T22:01:30.000Z | cbuild_config.py | couchbase/couchbase-python-client | 99ec055835f5aef0cd07905497b3ab4bb3cbbc32 | [
"Apache-2.0"
] | 60 | 2015-03-10T22:12:50.000Z | 2022-03-07T21:57:40.000Z | #
# Copyright 2019, Couchbase, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import itertools
import json
import os
import os.path
import platform
import re
import sys
import warnings
from distutils.command.install_headers import install_headers as install_headers_orig
from shutil import copyfile, copymode
from setuptools.command.build_ext import build_ext
import pathlib
import gen_config
curdir = pathlib.Path(__file__).parent
def get_json_build_cfg():
with open(str(curdir.joinpath("cbuild_cfg.json"))) as JSONFILE:
return json.load(JSONFILE)
BUILD_CFG = get_json_build_cfg()
PYCBC_LCB_API = os.getenv("PYCBC_LCB_API", BUILD_CFG.get(
'comp_options', {}).get('PYCBC_LCB_API'))
def get_all_sources():
return BUILD_CFG.get('source', []) + BUILD_CFG.get('apis',
{}).get(PYCBC_LCB_API, {}).get('sources', [])
def get_sources():
sources_ext = {}
all_sources = get_all_sources()
SOURCEMODS = list(filter(re.compile(r'^.*\.c$').match, all_sources))
SOURCEMODS_CPP = list(
filter(re.compile(r'^.*\.(cpp|cxx|cc)$').match, all_sources))
sources_ext['sources'] = list(map(str, SOURCEMODS + SOURCEMODS_CPP))
return sources_ext
couchbase_core = BUILD_CFG.get(
"comp_options", {}).get("PYCBC_CORE", "couchbase")
def get_cbuild_options():
extoptions = {}
extoptions['extra_compile_args'] = []
extoptions['extra_link_args'] = []
def boolean_option(flag):
return ["-D{}={}".format(flag, os.environ.get(flag))]
def string_option(flag):
return ["-D{}={}".format(flag, os.environ.get(flag))]
COMP_OPTION_PREFIX = "PYCBC_COMP_OPT_"
def comp_option(flag):
return [
"-{}={}".format(flag.replace(COMP_OPTION_PREFIX, ""), os.environ.get(flag))]
COMP_OPTION_BOOL_PREFIX = "PYCBC_COMP_OPT_BOOL_"
def comp_option_bool(flag):
return ["-{}".format(flag.replace(COMP_OPTION_BOOL_PREFIX, ""))]
CLANG_SAN_OPTIONS = {"address": "lsan", "undefined": "ubsan"}
CLANG_SAN_PREFIX = "PYCBC_SAN_OPT_"
def comp_clang_san_option(flag):
san_option = flag.replace(CLANG_SAN_PREFIX, "")
fsanitize_statements = [
"-fsanitize={}".format(san_option), "-fno-omit-frame-pointer"]
extoptions['extra_link_args'] += fsanitize_statements + \
['-Llibclang_rt.asan_osx_dynamic']
return fsanitize_statements
def comp_option_pattern(prefix):
return re.escape(prefix) + ".*"
comp_flags = {"PYCBC_STRICT": boolean_option,
"PYCBC_TABBED_CONTEXTS_ENABLE": boolean_option,
"PYCBC_LCB_API": string_option,
"PYCBC_REF_ACCOUNTING": boolean_option,
"PYCBC_TRACING_DISABLE": boolean_option, "PYCBC_DEBUG": boolean_option,
"PYCBC_GEN_PYTHON": boolean_option,
"PYCBC_CRYPTO_VERSION": boolean_option, comp_option_pattern(COMP_OPTION_PREFIX): comp_option,
comp_option_pattern(COMP_OPTION_BOOL_PREFIX): comp_option_bool,
comp_option_pattern(CLANG_SAN_PREFIX): comp_clang_san_option}
debug_symbols = len(set(os.environ.keys()) & {
"PYCBC_DEBUG", "PYCBC_DEBUG_SYMBOLS"}) > 0
comp_arg_additions = list(itertools.chain.from_iterable(
action(actual_flag) for flag, action in comp_flags.items() for actual_flag in os.environ.keys() if
re.match(flag, actual_flag)))
print(comp_arg_additions)
extoptions['include_dirs'] = []
extoptions['extra_compile_args'] += list(comp_arg_additions)
return extoptions, debug_symbols
def get_ext_options():
extoptions, debug_symbols = get_cbuild_options()
pkgdata = {}
if sys.platform != 'win32':
extoptions['extra_compile_args'] += ['-Wno-strict-prototypes',
'-fPIC', '-std=c11']
extoptions['libraries'] = ['couchbase']
if debug_symbols:
extoptions['extra_compile_args'] += ['-O0', '-g3']
extoptions['extra_link_args'] += ['-O0', '-g3']
if sys.platform == 'darwin':
extoptions['extra_compile_args'] += ['-Wsometimes-uninitialized',
'-Wconditional-uninitialized',
'-Wno-nullability-completeness',
'-Wno-expansion-to-defined']
extoptions['extra_compile_args'] += ['-Wuninitialized',
'-Wswitch', '-Werror', '-Wno-missing-braces']
print(pkgdata)
else:
if sys.version_info < (3, 0, 0):
raise RuntimeError(
"Windows on Python earlier than v3 unsupported.")
warnings.warn("I'm detecting you're running windows."
"You might want to modify "
"the 'setup.py' script to use appropriate paths")
# The layout i have here is an ..\lcb-winbuild, in which there are subdirs
# called 'x86' and 'x64', for x86 and x64 architectures. The default
# 'nmake install' on libcouchbase will install them to 'deps'
bit_type = platform.architecture()[0]
lcb_root = os.path.join(os.path.pardir, 'lcb-winbuild')
if bit_type.startswith('32'):
lcb_root = os.path.join(lcb_root, 'x86')
else:
lcb_root = os.path.join(lcb_root, 'x64')
lcb_root = os.path.join(lcb_root, 'deps')
extoptions['libraries'] = ['libcouchbase']
if debug_symbols:
extoptions['extra_compile_args'] += ['/Zi', '/DEBUG', '/O0']
extoptions['extra_link_args'] += ['/DEBUG', '-debug']
extoptions['library_dirs'] = [os.path.join(lcb_root, 'lib')]
extoptions['include_dirs'] = [os.path.join(lcb_root, 'include')]
extoptions['define_macros'] = [('_CRT_SECURE_NO_WARNINGS', 1)]
pkgdata[couchbase_core] = ['libcouchbase.dll']
extoptions['extra_compile_args'] += [
'-DPYCBC_LCB_API={}'.format(PYCBC_LCB_API)]
extoptions.update(get_sources())
return extoptions, pkgdata
class CBuildInfo:
def __init__(self, cmake_base=None):
self.setbase(cmake_base)
self.cfg = "Release"
self.pkg_data_dir = os.path.join(couchbase_core)
@property
def base(self):
print("self.base is {}".format(self._cmake_base))
return self._cmake_base
def setbase(self, path):
self._cmake_base = (path if isinstance(path, list) else list(
os.path.split(path))) if path else None
print("set base as {}".format(self._cmake_base))
@base.setter
def base(self, path):
self.setbase(path)
def entries(self):
plat = get_plat_code()
print("Got platform {}".format(plat))
default = ['libcouchbase.so.8']
return {'darwin': ['libcouchbase.2.dylib', 'libcouchbase.dylib'], 'linux': default,
'win': ['libcouchbase_d.dll', 'libcouchbase.dll']}.get(get_plat_code(), default)
def lcb_build_base(self):
print("self.base is {}".format(self.base))
return self._cmake_base + ['install', 'lib']
def lcb_pkgs_srcs(self):
return {'Debug': self.lcb_build_base(
) + ['Debug'], 'Release': self.lcb_build_base() + ['Release']}
def lcb_pkgs(self, cfg):
return map(lambda x: self.lcb_pkgs_srcs()[cfg] + [x], self.entries())
def lcb_pkgs_strlist(self):
print("got pkgs {}".format(self.entries()))
for x in self.entries():
print("yielding binary {} : {}".format(
x, os.path.join(self.pkg_data_dir, x)))
yield os.path.join(self.pkg_data_dir, x)
def get_rpaths(self, cfg):
result = [{'Darwin': '@loader_path', 'Linux': '$ORIGIN'}.get(platform.system(), "$ORIGIN"),
os.path.join(*self.lcb_pkgs_srcs()[cfg])]
print("got rpaths {}".format(result))
return result
def get_lcb_dirs(self):
lcb_dbg_build = os.path.join(
*(self.base + ["install", "lib", "Debug"]))
lcb_build = os.path.join(*(self.base + ["install", "lib", "Release"]))
lib_dirs = [lcb_dbg_build, lcb_build]
return lib_dirs
class LazyCommandClass(dict):
"""
Lazy command class that defers operations requiring given cmdclass until
they've actually been downloaded and installed by setup_requires.
"""
def __init__(self, cmdclass_real):
super(LazyCommandClass, self).__init__()
self.cmdclass_real = cmdclass_real
def __contains__(self, key):
return (
key == 'build_ext'
or super(LazyCommandClass, self).__contains__(key)
)
def __setitem__(self, key, value):
if key == 'build_ext':
raise AssertionError("build_ext overridden!")
super(LazyCommandClass, self).__setitem__(key, value)
def __getitem__(self, key):
if key != 'build_ext':
return super(LazyCommandClass, self).__getitem__(key)
return self.cmdclass_real
class CBuildCommon(build_ext):
@classmethod
def setup_build_info(cls, extoptions, pkgdata):
cls.info = CBuildInfo()
cls.info.pkgdata = pkgdata
cls.info.pkg_data_dir = os.path.join(
os.path.abspath("."), couchbase_core)
pkgdata['couchbase'] = list(cls.info.lcb_pkgs_strlist())
extoptions['library_dirs'] = [cls.info.pkg_data_dir] + \
extoptions.get('library_dirs', [])
def build_extension(self, ext):
self.init_info_and_rpaths(ext)
self.prep_build(ext)
self.add_inc_and_lib_bundled(ext, self.get_lcb_api_flags())
build_ext.build_extension(self, ext)
def prep_build(self, ext):
pass
def init_info_and_rpaths(self, ext):
self.ssl_config = gen_config.gen_config(
self.build_temp, couchbase_core=couchbase_core)
self.info.setbase(self.build_temp)
self.info.cfg = self.cfg_type()
self.compiler.add_include_dir(os.path.join(
*self.info.base + ["install", "include"]))
self.compiler.add_library_dir(os.path.join(
*self.info.base + ["install", "lib", self.cfg_type()]))
if sys.platform == 'darwin':
warnings.warn('Adding /usr/local to lib search path for OS X')
self.compiler.add_library_dir('/usr/local/lib')
self.compiler.add_include_dir('/usr/local/include')
self.add_rpaths(ext)
def add_rpaths(self, ext=None, extoptions=None):
rpaths = self.info.get_rpaths(self.cfg_type())
if platform.system() != 'Windows':
for rpath in rpaths:
linker_arg = '-Wl,-rpath,' + rpath
ext.runtime_library_dirs = (
ext.runtime_library_dirs if ext.runtime_library_dirs else []) + [rpath]
ext.extra_link_args += [linker_arg]
(extoptions['extra_link_args'] if extoptions else ext.extra_link_args if ext else [
]).insert(0, linker_arg)
def cfg_type(self):
return 'Debug' if self.debug else 'Release'
def copy_binary_to(self, cfg, dest_dir, lib_paths, name):
try:
os.makedirs(dest_dir)
except BaseException:
pass
dest = os.path.join(dest_dir, name)
failures = {}
lib_paths_prioritized = [(k, v)
for k, v in lib_paths.items() if k == cfg]
lib_paths_prioritized += [(k, v)
for k, v in lib_paths.items() if k != cfg]
for rel_type, binary_path in lib_paths_prioritized:
src = os.path.join(*(binary_path + [name]))
try:
if os.path.exists(src):
print("copying {} to {}".format(src, dest))
copyfile(src, dest)
print("success")
except Exception as e:
failures[rel_type] = "copying {} to {}, got {}".format(
src, dest, repr(e))
if len(failures) == len(lib_paths):
raise Exception("Failed to copy binary: {}".format(failures))
def copy_test_file(self, src_file):
'''
Copy ``src_file`` to ``dest_file`` ensuring parent directory exists.
By default, message like `creating directory /path/to/package` and
`copying directory /src/path/to/package -> path/to/package` are displayed on standard output. Adapted from scikit-build.
'''
# Create directory if needed
dest_dir = os.path.join(os.path.dirname(
os.path.abspath(__file__)), 'tests', 'bin')
if dest_dir != "" and not os.path.exists(dest_dir):
print("creating directory {}".format(dest_dir))
os.makedirs(dest_dir)
# Copy file
dest_file = os.path.join(dest_dir, os.path.basename(src_file))
print("copying {} -> {}".format(src_file, dest_file))
copyfile(src_file, dest_file)
copymode(src_file, dest_file)
def add_inc_and_lib_bundled(self, ext, lcb_api_flags):
from distutils.ccompiler import CCompiler
ext.extra_compile_args += lcb_api_flags
compiler = self.compiler # type: CCompiler
lcb_include = os.path.join(self.build_temp, "install", "include")
try:
compiler.set_include_dirs([lcb_include] + compiler.include_dirs)
except BaseException:
compiler.add_include_dirs([lcb_include])
lib_dirs = [self.info.pkg_data_dir] + self.info.get_lcb_dirs()
try:
existing_lib_dirs = compiler.library_dirs
compiler.set_library_dirs(lib_dirs + existing_lib_dirs)
except BaseException:
compiler.add_library_dirs(lib_dirs)
def get_pycbc_lcb_api(self):
return os.getenv("PYCBC_LCB_API",
BUILD_CFG.get('comp_options', {}).get('PYCBC_LCB_API', None))
def get_lcb_api_flags(self):
pycbc_lcb_api = self.get_pycbc_lcb_api()
return [
'-DPYCBC_LCB_API={}'.format(pycbc_lcb_api)] if pycbc_lcb_api else []
class install_headers(install_headers_orig):
def run(self):
headers = self.distribution.headers or []
for header in headers:
dst = os.path.join(self.install_dir, os.path.dirname(header))
self.mkpath(dst)
(out, _) = self.copy_file(header, dst)
self.outfiles.append(out)
def get_plat_code():
plat = sys.platform.lower()
substitutions = {'win': r'^win.*$'}
for target, pattern in substitutions.items():
plat = re.compile(pattern).sub(target, plat)
return plat
build_type = os.getenv("PYCBC_BUILD",
{"Windows": "CMAKE_HYBRID", "Darwin": "CMAKE_HYBRID", "Linux": "CMAKE_HYBRID"}.get(platform.system(),
"CMAKE_HYBRID"))
| 38.534653 | 128 | 0.611254 |
6f6b8aff31251244be971248c18cbe5d5afd56bc | 4,898 | py | Python | Camera GUI/Scaled-Yolo/tk_utils.py | nishantg96/my-scripts | a53708935a57c4cd4a46d4a315cf24b614f20fcb | [
"Apache-2.0"
] | null | null | null | Camera GUI/Scaled-Yolo/tk_utils.py | nishantg96/my-scripts | a53708935a57c4cd4a46d4a315cf24b614f20fcb | [
"Apache-2.0"
] | null | null | null | Camera GUI/Scaled-Yolo/tk_utils.py | nishantg96/my-scripts | a53708935a57c4cd4a46d4a315cf24b614f20fcb | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
from PIL import Image,ImageTk
import tkinter as tki
import threading
import cv2,os,sys
from common_utils import *
import pandas as pd
from openpyxl import load_workbook
def rounded(input_list):
return [round(x,1) for x in input_list]
class camera_gui:
def __init__(self, cam1,cam2, out_path):
self.cam1 = cam1
self.cam2 = cam2
self.out_path = out_path
self.frame = None
self.frame2 = None
self.root = tki.Tk()
self.panel = None
self.panel1 = None
self.thread = None
self.stopEvent = None
self.out_list = []
self.data_pd = pd.DataFrame()
self.i , self.i1 = None, None
btn = tki.Button(self.root, text="Save Data!",command=self.save_results)
btn.pack(side="bottom", fill="both", expand="yes", padx=10,pady=10)
self.stopEvent = threading.Event()
self.thread = threading.Thread(target=self.video_loop, args=())
self.thread.start()
self.root.wm_title("ArUCO Detector!")
self.root.wm_protocol("WM_DELETE_WINDOW", self.onClose)
def video_loop(self):
with torch.no_grad():
try:
while not self.stopEvent.is_set():
_,self.frame = self.cam1.read()
_,self.frame2 = self.cam2.read()
self.frame,self.data = detect_object(self.frame)
image = cv2.cvtColor(self.frame,cv2.COLOR_BGR2RGB)
# image,self.data = get_aruco_point(image)
self.i = cv2.cvtColor(image.copy(),cv2.COLOR_BGR2RGB)
image = Image.fromarray(image)
image = ImageTk.PhotoImage(image)
self.frame2,self.data1 = detect_object(self.frame2)
image2 = cv2.cvtColor(self.frame2,cv2.COLOR_BGR2RGB)
# image2,self.data1 = get_aruco_point(image2)
# image2 = increase_brightness(image2,50)
self.i1 = cv2.cvtColor(image2.copy(),cv2.COLOR_BGR2RGB)
image2 = Image.fromarray(image2)
image2 = ImageTk.PhotoImage(image2)
self.position,self.orientation = get_pose()
if self.panel is None:
self.panel = tki.Label(image=image)
self.panel.image = image
self.panel.pack(side="left", padx=10, pady=10)
self.panel1 = tki.Label(image=image2)
self.panel1.image = image2
self.panel1.pack(side="right", padx=10, pady=10)
# otherwise, simply update the panel
else:
self.panel.configure(image=image)
self.panel.image = image
self.panel1.configure(image=image2)
self.panel1.image = image2
except RuntimeError as e:
print("[INFO] caught a RuntimeError: ",e)
def save_results(self):
output = [self.position,self.orientation,self.data,self.data1]
name = f"/home/krekik/RICAIP/Images/Camera_0_P{rounded(self.position)}_O{rounded(self.orientation)}_A{self.data}.jpg"
name1 = f"/home/krekik/RICAIP/Images/Camera_1_P{rounded(self.position)}_O{rounded(self.orientation)}_A{self.data1}.jpg"
if (self.data != []):
cv2.imwrite(name,self.i)
if (self.data1 != []):
cv2.imwrite(name1,self.i1)
self.out_list.append(output)
print("Saving results....")
print(f"| ArUCO: {self.data}| ArUCO: {self.data1} | Position: {self.position} | orientation: {self.orientation} |")
self.data_pd = pd.DataFrame(self.out_list, columns =['Position', 'Orientation','ArUCO Data 0','ArUCO Data 1'],)
def onClose(self):
if os.path.isfile('/home/krekik/RICAIP/Results.xlsx'):
path = "/home/krekik/RICAIP/Results.xlsx"
book = load_workbook(path)
writer = pd.ExcelWriter(path, engine='openpyxl')
writer.book = book
writer.sheets = {ws.title: ws for ws in book.worksheets}
self.data_pd.to_excel(writer, startrow=writer.sheets['Sheet1'].max_row, index = False,header= False)
writer.save()
else:
self.data_pd.to_excel('/home/krekik/RICAIP/Results.xlsx',index = False,header=True)
print("Exiting gracefully....")
print("[INFO] closing...")
self.stopEvent.set()
print("[INFO] closing...")
self.cam1.release()
print("[INFO] closing...")
self.cam2.release()
print("[INFO] closing...")
self.root.destroy()
print("[INFO] closed...") | 41.159664 | 127 | 0.558187 |
6b08291f4dbd47fc0057b71a8524e6cbb2cfd7ea | 288 | py | Python | c12/p224_readCensusExcel_test.py | pkingpeng/-python- | f7c3269b6c13edf31449a3f21c3314c27095c984 | [
"Apache-2.0"
] | null | null | null | c12/p224_readCensusExcel_test.py | pkingpeng/-python- | f7c3269b6c13edf31449a3f21c3314c27095c984 | [
"Apache-2.0"
] | null | null | null | c12/p224_readCensusExcel_test.py | pkingpeng/-python- | f7c3269b6c13edf31449a3f21c3314c27095c984 | [
"Apache-2.0"
] | null | null | null | from gererate_python_file.census2010 import allDate as data
print(data['AK']['Anchorage'])
anchoragePoe = data['AK']['Anchorage']['pop']
print('The 2010 population of Anchorage was %s.' % anchoragePoe)
"""
{'pop': 291826, 'tracts': 55}
The 2010 population of Anchorage was 291826.
"""
| 24 | 64 | 0.715278 |
19bb663a42660e5231c354fd058283feba22ce46 | 1,714 | py | Python | task_1/solution_test.py | kristyanYochev/python-course | 52d136179de210bd7edefe3085e50550e3862f62 | [
"MIT"
] | 2 | 2019-12-30T13:26:55.000Z | 2020-01-18T14:03:25.000Z | task_1/solution_test.py | kristyanYochev/python-course | 52d136179de210bd7edefe3085e50550e3862f62 | [
"MIT"
] | 3 | 2019-11-05T16:47:54.000Z | 2020-10-31T18:50:31.000Z | task_1/solution_test.py | kristyanYochev/python-course | 52d136179de210bd7edefe3085e50550e3862f62 | [
"MIT"
] | 24 | 2019-10-10T19:17:40.000Z | 2020-10-25T10:42:00.000Z | import unittest
import solution
class SolutionTest(unittest.TestCase):
def test_accumulate_left(self):
res = solution.accumulate_left(lambda a, b: a / b, 64, [2, 4, 8])
self.assertEqual(1.0, res)
def test_accumulate_left_over_tuple(self):
res = solution.accumulate_left(lambda a, b: a / b, 64, (2, 4, 8))
self.assertEqual(1.0, res)
def test_accumulate_left_list(self):
res = solution.accumulate_left(
lambda a, b: a + b, [], [[1, 2, 3], [4, 5, 6]])
self.assertEqual([1, 2, 3, 4, 5, 6], res)
def test_accumulate_left_over_empty_list(self):
res = solution.accumulate_left(lambda a, b: a / b, 8, [])
self.assertEqual(8, res)
def test_accumulate_left_over_empty_tuple(self):
res = solution.accumulate_left(lambda a, b: a / b, 8, ())
self.assertEqual(8, res)
def test_accumulate_right(self):
res = solution.accumulate_right(lambda a, b: a / b, 8, [16, 32, 64])
self.assertEqual(4.0, res)
def test_accumulate_right_over_tuple(self):
res = solution.accumulate_right(lambda a, b: a / b, 8, (16, 32, 64))
self.assertEqual(4.0, res)
def test_accumulate_right_list(self):
res = solution.accumulate_right(lambda a, b: a + b, [], [[1, 2], [3, 4]])
self.assertEqual([1, 2, 3, 4], res)
def test_accumulate_right_over_empty_list(self):
res = solution.accumulate_right(lambda a, b: a / b, 8, [])
self.assertEqual(8, res)
def test_accumulate_righ_over_empty_tuple(self):
res = solution.accumulate_right(lambda a, b: a / b, 8, ())
self.assertEqual(8, res)
if __name__ == "__main__":
unittest.main()
| 33.607843 | 81 | 0.623104 |
5c9b8a15733efdb446cb67a949dcc4732500de5c | 2,336 | py | Python | spatial-hash/spatial_hash.py | Sopheria/tools | 45cf766553ade086419df884e8259605f5fdef81 | [
"Unlicense"
] | null | null | null | spatial-hash/spatial_hash.py | Sopheria/tools | 45cf766553ade086419df884e8259605f5fdef81 | [
"Unlicense"
] | null | null | null | spatial-hash/spatial_hash.py | Sopheria/tools | 45cf766553ade086419df884e8259605f5fdef81 | [
"Unlicense"
] | null | null | null | # simple script that reads a file containing a set of points and determines how tiles at those points would be sorted into a 2D spatial hash.
# The file it reads from is tiles.txt, and must contain a set of newline-delimited 2D points in (x,y) format. The output is written to tile_hash.txt.
# This file will be overwritten on each run.
def addToListIgnoringDuplicates(item, list):
if item not in list:
list.append(item)
def addToHistogram(key, histogram):
if key not in histogram:
histogram[key] = 1
else:
histogram[key] += 1
readfile = open("tiles.txt", "r")
writefile = open("tile_hash.txt", "w")
tilewidth = 32
tileheight = 32
mapwidth = 3200
mapheight = 3200
numhashcols = 8
numhashrows = 8
bucketwidth = mapwidth/numhashcols
bucketheight = mapheight/numhashrows
histogram = {}
for line in readfile:
point = eval(line)
hashBuckets = []
hashId = ((point[0]*tilewidth)/bucketwidth) + numhashcols*((point[1]*tileheight)/bucketheight)
x = (point[0]*tilewidth)/bucketwidth
y = (point[1]*tileheight)/bucketheight
yfinal = numhashcols*y
addToListIgnoringDuplicates(hashId, hashBuckets)
hashId = ((point[0]*tilewidth + tilewidth)/bucketwidth) + numhashcols*((point[1]*tileheight)/bucketheight)
addToListIgnoringDuplicates(hashId, hashBuckets)
x = (point[0]*tilewidth + tilewidth)/bucketwidth
y = (point[1]*tileheight)/bucketheight
yfinal = numhashcols*y
hashId = ((point[0]*tilewidth + tilewidth)/bucketwidth) + numhashcols*((point[1]*tileheight + tileheight)/bucketheight)
addToListIgnoringDuplicates(hashId, hashBuckets)
x = (point[0]*tilewidth + tilewidth)/bucketwidth
y = (point[1]*tileheight + tileheight)/bucketheight
yfinal = numhashcols*y
hashId = ((point[0]*tilewidth)/bucketwidth) + numhashcols*((point[1]*tileheight + tileheight)/bucketheight)
addToListIgnoringDuplicates(hashId, hashBuckets)
x = (point[0]*tilewidth)/bucketwidth
y = (point[1]*tileheight + tileheight)/bucketheight
yfinal = numhashcols*y
for bucket in hashBuckets:
addToHistogram(bucket, histogram)
writefile.write(str(point))
writefile.write(" -> ");
writefile.write(str(hashBuckets))
writefile.write("\n")
writefile.write("\n")
for key in histogram:
writefile.write(str(key))
writefile.write(":\t")
writefile.write(str(histogram[key]))
writefile.write("\n")
readfile.close()
writefile.close()
| 28.144578 | 149 | 0.743579 |
fbd40893504f2db1d999a39b3850830bcee2bc45 | 1,930 | py | Python | examples/rasterio_polygonize.py | rouault/rasterio | 0b101b0414a575b263dcebefb0775b672f07cdeb | [
"BSD-3-Clause"
] | null | null | null | examples/rasterio_polygonize.py | rouault/rasterio | 0b101b0414a575b263dcebefb0775b672f07cdeb | [
"BSD-3-Clause"
] | null | null | null | examples/rasterio_polygonize.py | rouault/rasterio | 0b101b0414a575b263dcebefb0775b672f07cdeb | [
"BSD-3-Clause"
] | 1 | 2017-10-16T12:50:16.000Z | 2017-10-16T12:50:16.000Z | # Emulates GDAL's gdal_polygonize.py
import argparse
import logging
import subprocess
import sys
import fiona
import numpy as np
import rasterio
from rasterio.features import shapes
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
logger = logging.getLogger('rasterio_polygonize')
def main(raster_file, vector_file, driver, mask_value):
with rasterio.drivers():
with rasterio.open(raster_file) as src:
image = src.read(1)
if mask_value is not None:
mask = image == mask_value
else:
mask = None
results = (
{'properties': {'raster_val': v}, 'geometry': s}
for i, (s, v)
in enumerate(
shapes(image, mask=mask, transform=src.affine)))
with fiona.open(
vector_file, 'w',
driver=driver,
crs=src.crs,
schema={'properties': [('raster_val', 'int')],
'geometry': 'Polygon'}) as dst:
dst.writerecords(results)
return dst.name
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Writes shapes of raster features to a vector file")
parser.add_argument(
'input',
metavar='INPUT',
help="Input file name")
parser.add_argument(
'output',
metavar='OUTPUT',
help="Output file name")
parser.add_argument(
'--output-driver',
metavar='OUTPUT DRIVER',
help="Output vector driver name")
parser.add_argument(
'--mask-value',
default=None,
type=int,
metavar='MASK VALUE',
help="Value to mask")
args = parser.parse_args()
name = main(args.input, args.output, args.output_driver, args.mask_value)
print subprocess.check_output(
['ogrinfo', '-so', args.output, name])
| 25.733333 | 77 | 0.57513 |
b441e308c9c0fc3b17c2ea24a3d2a2f9c1941d20 | 6,320 | py | Python | tunnelling/tunnelling.py | gry/tunnelling | b3234284ac952d0c3b131ae884c4e8f82cc6d9aa | [
"MIT"
] | null | null | null | tunnelling/tunnelling.py | gry/tunnelling | b3234284ac952d0c3b131ae884c4e8f82cc6d9aa | [
"MIT"
] | null | null | null | tunnelling/tunnelling.py | gry/tunnelling | b3234284ac952d0c3b131ae884c4e8f82cc6d9aa | [
"MIT"
] | 2 | 2017-02-11T17:10:18.000Z | 2021-01-29T22:45:56.000Z | #!/usr/bin/env python
"""
Tunnelling is a SSH tunnelling library (useful when you need to do tunnels inside other python programs)
"""
import select
import SocketServer
import paramiko
from threading import Thread, Event
class ForwardServer(SocketServer.ThreadingTCPServer):
daemon_threads = True
allow_reuse_address = True
class Handler (SocketServer.BaseRequestHandler):
def handle(self):
try:
chan = self.ssh_transport.open_channel('direct-tcpip', (self.chain_host, self.chain_port), self.request.getpeername())
except Exception, e:
#print('Incoming request to %s:%d failed: %s' % (self.chain_host, self.chain_port, repr(e)))
return
if chan is None:
print('Incoming request to %s:%d was rejected by the SSH server.' % (self.chain_host, self.chain_port))
return
#print('Connected! Tunnel open %r -> %r -> %r' % (self.request.getpeername(), chan.getpeername(), (self.chain_host, self.chain_port)))
while True:
r, w, x = select.select([self.request, chan], [], [])
if self.request in r:
data = self.request.recv(1024)
if len(data) == 0:
break
chan.send(data)
if chan in r:
data = chan.recv(1024)
if len(data) == 0:
break
self.request.send(data)
chan.close()
self.request.close()
#print('Tunnel closed from %r' % (self.request,))
#print('Tunnel closed from %r' % (self.request.getpeername(),))
class Tunnel():
def __init__(self, ssh_client, local_port, remote_host, remote_port):
self.c = ssh_client
self.trans = self.c.get_transport()
self.local_port = local_port
self.remote_host = remote_host
self.remote_port = remote_port
def startTunnel(self):
class SubHandler(Handler):
chain_host = self.remote_host
chain_port = self.remote_port
ssh_transport = self.c.get_transport()
my_signal = Event()
my_signal.clear()
def ThreadTunnel():
self.t = ForwardServer(('127.0.0.1', self.local_port), SubHandler)
my_signal.set()
self.t.serve_forever()
Thread(target=ThreadTunnel).start()
my_signal.wait()
def stopTunnel(self):
self.t.shutdown()
#self.trans.close()
#self.c.close()
self.t.socket.close()
class PortForwarder(object):
"""
Create connection to a server and port and do all the port forwarding jobs
forward_list = List( (String) Local Port, (String) Address, (String) Remote Port)
self.start() and self.stop() makes the connection and tunnels and stops them
"""
def __init__(self, server, port, username, forward_list, key_filename=None, password=None):
self.client = None
self.server = server
self.port = port
self.username = username
self.forward_list = forward_list
self.key_filename = key_filename
self.password = password
self.look_for_keys = True if self.key_filename else False
def start(self):
self.client = paramiko.SSHClient()
self.client.load_system_host_keys()
self.client.set_missing_host_key_policy(paramiko.WarningPolicy())
self.client.connect(self.server, self.port, username=self.username, key_filename=self.key_filename,
look_for_keys=self.look_for_keys, password=self.password)
self.t_list = []
for idx, (lport, rhost, rport) in enumerate(self.forward_list):
tun = Tunnel(self.client, int(lport), rhost, int(rport))
tun.startTunnel()
self.t_list.append(tun)
lport = tun.t.socket.getsockname()[1]
print 'Tunnel active: %s:%s:%s' %(lport, rhost, rport)
self.forward_list[idx][0] = lport
def stop(self):
for t in self.t_list:
t.stopTunnel()
self.client.close()
def main():
import argparse
def getArguments():
"""Argparse configuration and parsing
Returns: arguments parsed
"""
argparser = argparse.ArgumentParser(description='PyTunnel Forwarder')
argparser.add_argument('server',
metavar='<server>',
help='Server Address')
argparser.add_argument('-p','--port',
dest='port',
type=int,
default=22,
metavar='<port>',
help='Server Port')
argparser.add_argument('-u','--user',
dest='user',
# default='root',
metavar='<user>',
help='user')
argparser.add_argument('-k','--key',
dest='key',
metavar='<key>',
help='Key Filename')
argparser.add_argument('-P','--Password',
dest='password',
metavar='<password>',
help='Password')
argparser.add_argument('forward_list',
nargs='+',
metavar='<port:host:hostport>',
help='List of forward tunnels')
args = argparser.parse_args()
return args
args = getArguments()
if len(args.server.split('@')) == 2:
server = args.server.split('@')[1]
user = args.server.split('@')[0] if not args.user else args.user
else:
server = args.server
user = args.user
forward_list = [fw.split(':') for fw in args.forward_list]
pfw = PortForwarder(server, args.port, user, forward_list, key_filename=args.key, password=args.password)
pfw.start()
try:
while True:
pass
except KeyboardInterrupt:
pfw.stop()
exit(0)
if __name__=='__main__':
main()
| 34.162162 | 143 | 0.541614 |