repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringlengths 1
5
| size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
openbig/odoo-contract | partner_billing/wizard/sale_make_invoice_advance.py | 1 | 1615 | # -*- encoding: utf-8 -*-
##############################################################################
#
# partner_billing
# (C) 2015 Mikołaj Dziurzyński, Grzegorz Grzelak, Thorsten Vocks (big-consulting GmbH)
# All Rights reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp import fields, models
import logging
_logger = logging.getLogger(__name__)
class sale_advance_payment_inv(osv.osv_memory):
_inherit = "sale.advance.payment.inv"
def _prepare_advance_invoice_vals(self, cr, uid, ids, context=None):
res = super(sale_advance_payment_inv,self)._prepare_advance_invoice_vals(cr, uid, ids, context=context)
sale_order_obj = self.pool.get('sale.order')
for pair in res:
for sale in sale_order_obj.browse(cr, uid, [pair[0]]):
pair[1]['associated_partner'] = sale.associated_partner and sale.associated_partner.id or False
return res
| agpl-3.0 | -6,439,668,196,751,878,000 | 39.325 | 105 | 0.651581 | false |
lipixun/pytest | rabbitmq/deadchannel/going2dead.py | 1 | 2112 | #!/usr/bin/env python
# encoding=utf8
# The dead channel applicationn
import sys
reload(sys)
sys.setdefaultencoding('utf8')
from uuid import uuid4
from time import time, sleep
from haigha.connections.rabbit_connection import RabbitConnection
from haigha.message import Message
class Client(object):
"""The RPC Client
"""
def __init__(self, host, port, vhost, user, password):
"""Create a new Server
"""
self._conn = RabbitConnection(host = host, port = port, vhost = vhost, user = user, password = password)
self._channel = self._conn.channel()
result = self._channel.queue.declare(arguments = { 'x-dead-letter-exchange': 'amq.topic', 'x-dead-letter-routing-key': 'test.dead_channel' })
self._deadQueue = result[0]
# Send a message
self._channel.basic.publish(Message('OMG! I\'m dead!'), '', self._deadQueue)
def dead(self):
"""Normal dead
"""
self._channel.close()
if __name__ == '__main__':
from argparse import ArgumentParser
def getArguments():
"""Get arguments
"""
parser = ArgumentParser(description = 'RabbitMQ dead channel client')
parser.add_argument('--host', dest = 'host', required = True, help = 'The host')
parser.add_argument('--port', dest = 'port', default = 5672, type = int, help = 'The port')
parser.add_argument('--vhost', dest = 'vhost', default = '/test', help = 'The virtual host')
parser.add_argument('--user', dest = 'user', default = 'test', help = 'The user name')
parser.add_argument('--password', dest = 'password', default = 'test', help = 'The password')
# Done
return parser.parse_args()
def main():
"""The main entry
"""
args = getArguments()
# Create the server
client = Client(args.host, args.port, args.vhost, args.user, args.password)
# Go to dead
print 'Will go to dead in 10s, or you can use ctrl + c to cause a unexpected death'
sleep(10)
client.dead()
print 'Normal dead'
main()
| gpl-2.0 | -8,202,055,047,594,408,000 | 33.064516 | 149 | 0.606061 | false |
gf712/AbPyTools | abpytools/core/fab_collection.py | 1 | 14123 | from .chain_collection import ChainCollection
import numpy as np
import pandas as pd
from .chain import calculate_charge
from abpytools.utils import DataLoader
from operator import itemgetter
from .fab import Fab
from .helper_functions import germline_identity_pd, to_numbering_table
from .base import CollectionBase
import os
import json
from .utils import (json_FabCollection_formatter, pb2_FabCollection_formatter, pb2_FabCollection_parser,
json_FabCollection_parser)
from .flags import *
if BACKEND_FLAGS.HAS_PROTO:
from abpytools.core.formats import FabCollectionProto
class FabCollection(CollectionBase):
def __init__(self, fab=None, heavy_chains=None, light_chains=None, names=None):
"""
Fab object container that handles combinations of light/heavy Chain pairs.
Args:
fab (list):
heavy_chains (ChainCollection):
light_chains (ChainCollection):
names (list):
"""
# check if it's a Chain object
if heavy_chains is None and light_chains is None and fab is None:
raise ValueError('Provide a list of Chain objects or an ChainCollection object')
# check if fab object is a list and if all object are abpytools.Fab objects
if isinstance(fab, list) and all(isinstance(fab_i, Fab) for fab_i in fab):
self._fab = fab
self._light_chains = ChainCollection([x[0] for x in self._fab])
self._heavy_chains = ChainCollection([x[1] for x in self._fab])
if fab is None and (heavy_chains is not None and light_chains is not None):
if isinstance(heavy_chains, list):
self._heavy_chains = ChainCollection(antibody_objects=heavy_chains)
elif isinstance(heavy_chains, ChainCollection):
self._heavy_chains = heavy_chains
else:
raise ValueError('Provide a list of Chain objects or an ChainCollection object')
if isinstance(light_chains, list):
self._light_chains = ChainCollection(antibody_objects=light_chains)
elif isinstance(light_chains, ChainCollection):
self._light_chains = light_chains
else:
raise ValueError('Provide a list of Chain objects or an ChainCollection object')
if len(self._light_chains.loading_status()) == 0:
self._light_chains.load()
if len(self._heavy_chains.loading_status()) == 0:
self._heavy_chains.load()
if self._light_chains.n_ab != self._heavy_chains.n_ab:
raise ValueError('Number of heavy chains must be the same of light chains')
if isinstance(names, list) and all(isinstance(name, str) for name in names):
if len(names) == self._heavy_chains.n_ab:
self._names = names
else:
raise ValueError(
'Length of name list must be the same as length of heavy_chains/light chains lists')
elif names is None:
self._names = ['{} - {}'.format(heavy, light) for heavy, light in zip(self._heavy_chains.names,
self._light_chains.names)]
else:
raise ValueError("Names expected a list of strings, instead got {}".format(type(names)))
self._n_ab = self._light_chains.n_ab
self._pair_sequences = [heavy + light for light, heavy in zip(self._heavy_chains.sequences,
self._light_chains.sequences)]
# keep the name of the heavy and light chains internally to keep everything in the right order
self._internal_heavy_name = self._heavy_chains.names
self._internal_light_name = self._light_chains.names
# even though it makes more sense to draw all these values from the base Fab objects this is much slower
# whenever self._n_ab > 1 it makes more sense to use the self._heavy_chain and self._light_chain containers
# in all the methods
# in essence the abpytools.Fab object is just a representative building block that could in future just
# cache data and would then represent a speed up in the calculations
def molecular_weights(self, monoisotopic=False):
return [heavy + light for heavy, light in zip(self._heavy_chains.molecular_weights(monoisotopic=monoisotopic),
self._light_chains.molecular_weights(monoisotopic=monoisotopic))]
def extinction_coefficients(self, extinction_coefficient_database='Standard', reduced=False, normalise=False,
**kwargs):
heavy_ec = self._heavy_chains.extinction_coefficients(
extinction_coefficient_database=extinction_coefficient_database,
reduced=reduced)
light_ec = self._light_chains.extinction_coefficients(
extinction_coefficient_database=extinction_coefficient_database,
reduced=reduced)
if normalise:
return [(heavy + light) / mw for heavy, light, mw in
zip(heavy_ec, light_ec, self.molecular_weights(**kwargs))]
else:
return [heavy + light for heavy, light in zip(heavy_ec, light_ec)]
def hydrophobicity_matrix(self):
return np.column_stack((self._heavy_chains.hydrophobicity_matrix(), self._light_chains.hydrophobicity_matrix()))
def charge(self):
return np.column_stack((self._heavy_chains.charge, self._light_chains.charge))
def total_charge(self, ph=7.4, pka_database='Wikipedia'):
available_pi_databases = ["EMBOSS", "DTASetect", "Solomon", "Sillero", "Rodwell", "Wikipedia", "Lehninger",
"Grimsley"]
assert pka_database in available_pi_databases, \
"Selected pI database {} not available. Available databases: {}".format(pka_database,
' ,'.join(available_pi_databases))
data_loader = DataLoader(data_type='AminoAcidProperties', data=['pI', pka_database])
pka_data = data_loader.get_data()
return [calculate_charge(sequence=seq, ph=ph, pka_values=pka_data) for seq in self.sequences]
def igblast_local_query(self, file_path, chain):
if chain.lower() == 'light':
self._light_chains.igblast_local_query(file_path=file_path)
elif chain.lower() == 'heavy':
self._heavy_chains.igblast_local_query(file_path=file_path)
else:
raise ValueError('Specify if the data being loaded is for the heavy or light chain')
def igblast_server_query(self, **kwargs):
self._light_chains.igblast_server_query(**kwargs)
self._heavy_chains.igblast_server_query(**kwargs)
def numbering_table(self, as_array=False, region='all', chain='both', **kwargs):
return to_numbering_table(as_array=as_array, region=region, chain=chain,
heavy_chains_numbering_table=self._heavy_chains.numbering_table,
light_chains_numbering_table=self._light_chains.numbering_table,
names=self.names, **kwargs)
def _germline_pd(self):
# empty dictionaries return false, so this condition checks if any of the values are False
if all([x for x in self._light_chains.germline_identity.values()]) is False:
# this means there is no information about the germline,
# by default it will run a web query
self._light_chains.igblast_server_query()
if all([x for x in self._heavy_chains.germline_identity.values()]) is False:
self._heavy_chains.igblast_server_query()
heavy_chain_germlines = self._heavy_chains.germline
light_chain_germlines = self._light_chains.germline
data = np.array([[heavy_chain_germlines[x][0] for x in self._internal_heavy_name],
[heavy_chain_germlines[x][1] for x in self._internal_heavy_name],
[light_chain_germlines[x][0] for x in self._internal_light_name],
[light_chain_germlines[x][1] for x in self._internal_light_name]]).T
df = pd.DataFrame(data=data,
columns=pd.MultiIndex.from_tuples([('Heavy', 'Assignment'),
('Heavy', 'Score'),
('Light', 'Assignment'),
('Light', 'Score')]),
index=self.names)
df.loc[:, (slice(None), 'Score')] = df.loc[:, (slice(None), 'Score')].apply(pd.to_numeric)
return df
def save_to_json(self, path, update=True):
with open(os.path.join(path + '.json'), 'w') as f:
fab_data = json_FabCollection_formatter(self)
json.dump(fab_data, f, indent=2)
def save_to_pb2(self, path, update=True):
proto_parser = FabCollectionProto()
try:
with open(os.path.join(path + '.pb2'), 'rb') as f:
proto_parser.ParseFromString(f.read())
except IOError:
# Creating new file
pass
pb2_FabCollection_formatter(self, proto_parser)
with open(os.path.join(path + '.pb2'), 'wb') as f:
f.write(proto_parser.SerializeToString())
def save_to_fasta(self, path, update=True):
raise NotImplementedError
@classmethod
def load_from_json(cls, path, n_threads=20, verbose=True, show_progressbar=True):
with open(path, 'r') as f:
data = json.load(f)
fab_objects = json_FabCollection_parser(data)
fab_collection = cls(fab=fab_objects)
return fab_collection
@classmethod
def load_from_pb2(cls, path, n_threads=20, verbose=True, show_progressbar=True):
with open(path, 'rb') as f:
proto_parser = FabCollectionProto()
proto_parser.ParseFromString(f.read())
fab_objects = pb2_FabCollection_parser(proto_parser)
fab_collection = cls(fab=fab_objects)
return fab_collection
@classmethod
def load_from_fasta(cls, path, numbering_scheme=NUMBERING_FLAGS.CHOTHIA, n_threads=20,
verbose=True, show_progressbar=True):
raise NotImplementedError
def _get_names_iter(self, chain='both'):
if chain == 'both':
for light_chain, heavy_chain in zip(self._light_chains, self._heavy_chains):
yield f"{light_chain.name}-{heavy_chain.name}"
elif chain == 'light':
for light_chain in self._light_chains:
yield light_chain.name
elif chain == 'heavy':
for heavy_chain in self._heavy_chains:
yield heavy_chain.name
else:
raise ValueError(f"Unknown chain type ({chain}), available options are:"
f"both, light or heavy.")
@property
def regions(self):
heavy_regions = self._heavy_chains.ab_region_index()
light_regions = self._light_chains.ab_region_index()
return {name: {CHAIN_FLAGS.HEAVY_CHAIN: heavy_regions[heavy],
CHAIN_FLAGS.LIGHT_CHAIN: light_regions[light]} for name, heavy, light in
zip(self.names, self._internal_heavy_name, self._internal_light_name)}
@property
def names(self):
return self._names
@property
def sequences(self):
return self._pair_sequences
@property
def aligned_sequences(self):
return [heavy + light for light, heavy in
zip(self._heavy_chains.aligned_sequences,
self._light_chains.aligned_sequences)]
@property
def n_ab(self):
return self._n_ab
@property
def germline_identity(self):
return self._germline_identity()
@property
def germline(self):
return self._germline_pd()
def _string_summary_basic(self):
return "abpytools.FabCollection Number of sequences: {}".format(self._n_ab)
def __len__(self):
return self._n_ab
def __repr__(self):
return "<%s at 0x%02x>" % (self._string_summary_basic(), id(self))
def __getitem__(self, indices):
if isinstance(indices, int):
return Fab(heavy_chain=self._heavy_chains[indices],
light_chain=self._light_chains[indices],
name=self.names[indices], load=False)
else:
return FabCollection(heavy_chains=list(itemgetter(*indices)(self._heavy_chains)),
light_chains=list(itemgetter(*indices)(self._light_chains)),
names=list(itemgetter(*indices)(self._names)))
def _germline_identity(self):
# empty dictionaries return false, so this condition checks if any of the values are False
if all([x for x in self._light_chains.germline_identity.values()]) is False:
# this means there is no information about the germline,
# by default it will run a web query
self._light_chains.igblast_server_query()
if all([x for x in self._heavy_chains.germline_identity.values()]) is False:
self._heavy_chains.igblast_server_query()
return germline_identity_pd(self._heavy_chains.germline_identity,
self._light_chains.germline_identity,
self._internal_heavy_name,
self._internal_light_name,
self._names)
def get_object(self, name):
"""
:param name: str
:return:
"""
if name in self.names:
index = self.names.index(name)
return self[index]
else:
raise ValueError('Could not find sequence with specified name')
| mit | -4,991,626,911,150,680,000 | 41.158209 | 120 | 0.593075 | false |
jwill89/clifford-discord-bot | source/retired/main.py | 1 | 31345 | import discord
from discord.ext import commands
import random
import MySQLdb
# ********************************************** #
# DEFINITIONS ********************************** #
# ********************************************** #
# Bot Description
description = '''Official Zealot Gaming Discord bot!'''
# Define Bot
bot = commands.Bot(command_prefix='!', description='Official Zealot Gaming Discord Bot')
# Define MySQL DB and Cursor Object
db = MySQLdb.connect(host="localhost",
user="discord_secure",
passwd="password-here",
db="discord")
# ********************************************** #
# FUNCTIONS ************************************ #
# ********************************************** #
# Check for Game Abbreviations
def is_game_abv(game_abv: str):
try:
sql = "SELECT 1 FROM games WHERE `abv` = %s LIMIT 1"
cur = db.cursor()
result = cur.execute(sql, (game_abv,))
cur.close()
except Exception as e:
print('Exception: ' + str(e))
result = 0
# If we got a result, true, else false
return result == 1
# Check for Game Names
def is_game_name(game_name: str):
try:
sql = "SELECT 1 FROM games WHERE `name` = %s LIMIT 1"
cur = db.cursor()
result = cur.execute(sql, (game_name,))
cur.close()
except Exception as e:
print('Exception: ' + str(e))
result = 0
# If we got a result, true, else false
return result == 1
# Check for Staff Member Status
def is_staff(member: discord.Member):
# Return True or False if User is a Staff Member
return 'Staff' in [r.name for r in member.roles]
# ********************************************** #
# BOT EVENTS *********************************** #
# ********************************************** #
# Bot Start Event
@bot.event
async def on_ready():
print('Logged in as')
print(bot.user.name)
print(bot.user.id)
print('------')
await bot.change_presence(game=discord.Game(name='Zealot Gaming'))
# Welcome Message
@bot.event
async def on_member_join(member):
channel = bot.get_channel('108369515502411776')
fmt = "Everyone welcome {0.mention} to Zealot Gaming! Have a great time here! :wink: " \
"http://puu.sh/nG6Qe.wav".format(member)
await bot.send_message(channel, fmt)
# Goodbye Message
@bot.event
async def on_member_remove(member):
channel = bot.get_channel('108369515502411776')
fmt = ":wave: Goodbye {0}, we're sad to see you go!".format(member.name)
await bot.send_message(channel, fmt)
# ********************************************** #
# UN-GROUPED BOT COMMANDS ********************** #
# ********************************************** #
# COMMAND: !hello
@bot.command(pass_context=True)
async def hello(ctx):
# we do not want the bot to reply to itself
if ctx.message.author == bot.user:
return
else:
msg = 'Hello {0.message.author.mention}'.format(ctx)
await bot.send_message(ctx.message.channel, msg)
# COMMAND: !carlito
@bot.command()
async def carlito():
"""The legendary message of Carlito, maz00's personal cabana boy."""
await bot.say("wew men :ok_hand::skin-tone-1: that's some good shit:100: some good shit :100: that's some good shit"
" right there :100: :ok_hand::skin-tone-1: right there :ok_hand::skin-tone-1: :100: sign me the FUCK "
"up:100: :100: :ok_hand::skin-tone-1: :eggplant:")
# COMMAND: !eightball
@bot.command(pass_context=True)
async def eightball(ctx, question: str):
"""Rolls a magic 8-ball to answer any question you have."""
if question is None:
await bot.say('{0.message.author.mention}, you did not ask a question.'.format(ctx))
return
# Answers List (Classic 8-Ball, 20 Answers)
answers = ['It is certain.',
'It is decidedly so',
'Without a doubt.',
'Yes, definitely.',
'You may rely on it.',
'As I see it, yes.',
'Most likely.',
'Outlook good.',
'Yes.',
'Signs point to yes.',
'Reply hazy; try again.',
'Ask again later.',
'Better not tell you now.',
'Cannot predict now.',
'Concentrate, then ask again.',
'Do not count on it.',
'My reply is no.',
'My sources say no.',
'Outlook not so good.',
'Very doubtful.']
# Send the Answer
await bot.say('{0.message.author.mention}, '.format(ctx) + random.choice(answers))
# COMMAND: !roll
@bot.command()
async def roll(dice: str):
"""Rolls a dice in NdN format."""
try:
rolls, limit = map(int, dice.split('d'))
except Exception:
await bot.say('Format has to be in NdN!')
return
result = ', '.join(str(random.randint(1, limit)) for r in range(rolls))
await bot.say(result)
# COMMAND: !choose
@bot.command()
async def choose(*choices: str):
"""Chooses between multiple choices."""
await bot.say(random.choice(choices))
# COMMAND: !joined
@bot.command()
async def joined(member: discord.Member):
"""Says when a member joined."""
await bot.say('{0.name} joined in {0.joined_at}'.format(member))
# COMMAND: !get_roles
@bot.command()
async def get_roles(member: discord.Member):
"""Lists a User's Roles"""
total = 0
role_list = ''
for role in member.roles:
if total > 0:
role_list += ', '
role_list += str(role)
total += 1
await bot.say('{0.name} is a member of these roles: '.format(member) + role_list)
# COMMAND: !get_channel_id
@bot.command(pass_context=True)
async def get_channel_id(ctx):
"""Lists the ID of the channel the message is sent in."""
# Is the user allowed? (Must be staff)
if not is_staff(ctx.message.author):
await bot.say('{0.mention}, you must be a staff member to use this command.'.format(ctx.message.author))
return
await bot.say('Channel ID is {0.id}'.format(ctx.message.channel))
# COMMAND: !join
@bot.command(pass_context=True)
async def join(ctx, *, role_name: str):
"""Allows a user to join a public group."""
# List of Allowed Public Roles
allowed_roles = ['Europe',
'North America',
'Oceania',
'Overwatch',
'League of Legends',
'Co-op',
'Minna-chan']
if role_name not in allowed_roles:
await bot.say('{0.mention}, you may only join allowed public groups.'.format(ctx.message.author))
return
# Define role, then add role to member.
try:
role = discord.utils.get(ctx.message.server.roles, name=role_name)
await bot.add_roles(ctx.message.author, role)
except Exception as e:
await bot.send_message(ctx.message.channel, "{0.mention}, there was an error getting the roster for you. "
"I'm sorry! : ".format(ctx.message.author) + str(e))
return
# Success Message
await bot.say('{0.mention}, you have successfully been added to the group **{1}**.'
.format(ctx.message.author, role_name))
# ********************************************** #
# GROUPED COMMANDS : EVENTS ******************** #
# ********************************************** #
# COMMAND: !events
@bot.group(pass_context=True)
async def events(ctx):
"""Manage events and attendance!"""
if ctx.invoked_subcommand is None:
await bot.say('Invalid command passed. Must be *add*, *description*, *edit*, *register*, or *remove*.')
# COMMAND: !events add
@events.command(name='add', pass_context=True)
async def events_add(ctx, date: str, time: str, *, title: str):
"""Add an event to the Events List!
Date **must** be in YYYY/MM/DD format. Time **must** be in UTC."""
# Set #events Channel
event_channel = bot.get_channel('296694692135829504')
# Is the user allowed? (Must be staff)
if not is_staff(ctx.message.author):
await bot.say('{0.mention}, you must be a staff member to use this command.'.format(ctx.message.author))
return
# Make sure we have a date.
if date is None:
await bot.say('Error: You must enter a date in YYYY/MM/DD format.')
return
# Make sure we have a time.
if time is None:
await bot.say('Error: You must enter a time in HH:MM format in UTC timezone.')
return
# Make sure we have a title.
if date is None:
await bot.say('Error: You must enter a title for the event.')
return
# Add Event to Database
try:
sql = "INSERT INTO events (`date`,`time`,`title`) VALUES (%s, %s, %s)"
cur = db.cursor()
cur.execute(sql, (date, time, title))
event_id = cur.lastrowid
msg_text = "**Title**: {0} \n**Event ID**: {1} \n**Date & Time**: {2} at {3} (UTC)"
# Add Message to Events Channel and Save Message ID
message = await bot.send_message(event_channel, msg_text.format(title, event_id, date, time))
cur.execute('UPDATE events SET `message_id` = %s WHERE `event_id` = %s', (message.id, event_id))
db.commit()
cur.close()
except Exception as e:
await bot.say('{0.mention}, there was an error adding the event to the list. '.format(ctx.message.author)
+ str(e))
return
# Success Message
await bot.say('{0.mention}, your event was successfully added. The event ID is: {1}.'
.format(ctx.message.author, event_id))
# COMMAND: !events description
@events.command(name='description', pass_context=True)
async def events_description(ctx, event_id: int, *, desc: str):
"""Adds a Description to an Event Given an Event ID."""
# EVENT CHANNEL ID: 296694692135829504
event_channel = bot.get_channel('296694692135829504')
# Is the user allowed? (Must be staff)
if not is_staff(ctx.message.author):
await bot.say('{0.mention}, you must be a staff member to use this command.'.format(ctx.message.author))
return
# Make sure we have a date.
if event_id is None:
await bot.say('Error: You must enter an event ID. Check the #events channel.')
return
# Make sure we have a date.
if desc is None:
await bot.say('Error: You must enter a description.')
return
try:
sql = "UPDATE events SET `description` = %s WHERE `event_id` = %s"
cur = db.cursor()
cur.execute(sql, (desc, event_id))
cur.execute("SELECT `message_id` FROM events WHERE `event_id` = %s", (event_id,))
msg_id = cur.fetchone()
message = await bot.get_message(event_channel, msg_id[0])
msg_text = message.content + " \n**Description**: {0}".format(desc)
# Update Message in Events Channel with Description
await bot.edit_message(message, msg_text)
db.commit()
cur.close()
except Exception as e:
await bot.say('{0.mention}, there was an error adding a description to the event. '.format(ctx.message.author)
+ str(e))
return
# Success Message
await bot.say('{0.mention}, the event was successfully updated with a description.'.format(ctx.message.author))
# ********************************************** #
# GROUPED COMMANDS : GAMES ********************* #
# ********************************************** #
# COMMAND: !games
@bot.group(pass_context=True)
async def games(ctx):
"""Manages games for the roster."""
if ctx.invoked_subcommand is None:
await bot.say('Invalid command passed. Must be *add*, *edit*, *list*, or *remove*.')
# COMMAND: !games add
@games.command(name='add', pass_context=True)
async def games_add(ctx, game_abv: str, *, game_name: str):
"""Adds a game to the list of games available in the roster."""
# Is the user allowed? (Must be staff)
if not is_staff(ctx.message.author):
await bot.say('{0.mention}, you must be a staff member to use this command.'.format(ctx.message.author))
return
# Does Game Abbreviation Exist?
if is_game_abv(game_abv):
await bot.say('{0.mention}, this abbreviation is already in use.'.format(ctx.message.author))
return
# Does Game Name Exist?
if is_game_name(game_name):
await bot.say('{0.mention}, this game is already in the list.'.format(ctx.message.author))
return
# Handle Database
try:
sql = "INSERT INTO games (`abv`,`name`) VALUES (%s, %s)"
cur = db.cursor()
cur.execute(sql, (game_abv, game_name))
db.commit()
cur.close()
except Exception as e:
await bot.say('{0.mention}, there was an error adding the game to the games list. '.format(ctx.message.author)
+ str(e))
return
# Display Success Message
await bot.say('{0.mention}, the game was successfully added to the games list!'.format(ctx.message.author))
# COMMAND: !games edit
@games.command(name='edit', pass_context=True)
async def games_edit(ctx, game_abv: str, *, game_name: str):
"""Updates a game in the list of games available in the roster."""
# Is the user allowed? (Must be staff)
if not is_staff(ctx.message.author):
await bot.say('{0.mention}, you must be a staff member to use this command.'.format(ctx.message.author))
return
# Is there anything to update?
if not (is_game_abv(game_abv) or is_game_name(game_name)):
await bot.say('{0.mention}, either the abbreviation of game must exist to update.'.format(ctx.message.author))
return
# Handle Database
try:
sql = "UPDATE games SET `abv` = %s, `name = %s WHERE `abv` = %s OR `name` = %s"
cur = db.cursor()
cur.execute(sql, (game_abv, game_name, game_abv, game_name))
db.commit()
cur.close()
except Exception as e:
await bot.say('{0.mention}, there was an error updating the game in the games list. '.format(ctx.message.author)
+ str(e))
return
# Display Success Message
await bot.say('{0.mention}, the game was successfully updated in the games list!'.format(ctx.message.author))
# COMMAND: !games remove
@games.command(name='remove', pass_context=True)
async def games_remove(ctx, *, game_or_abv: str):
"""Removes a game from the list of games available in the roster."""
# Is the user allowed? (Must be staff)
if not is_staff(ctx.message.author):
await bot.say('{0.mention}, you must be a staff member to use this command.'.format(ctx.message.author))
return
# Is there anything to update?
if not (is_game_abv(game_or_abv) or is_game_name(game_or_abv)):
await bot.say('{0.mention}, either the abbreviation of game must exist to update.'.format(ctx.message.author))
return
# Handle Database
try:
sql = "DELETE FROM games WHERE `abv` = %s OR `name` = %s"
cur = db.cursor()
cur.execute(sql, (game_or_abv, game_or_abv))
db.commit()
cur.close()
except Exception as e:
await bot.say("{0.mention}, there was an error deleting the game from the games list."
" ".format(ctx.message.author) + str(e))
return
# Display Success Message
await bot.say('{0.mention}, the game was successfully deleted from the games list!'.format(ctx.message.author))
# COMMAND: !games list
@games.command(name='list', pass_context=True)
async def games_list(ctx):
"""Sends a message to the user with the current games and abbreviations for use in the roster."""
# Handle Database
try:
sql = "SELECT `abv`, `name` FROM games ORDER BY `name`"
cur = db.cursor()
cur.execute(sql)
result = cur.fetchall()
cur.close()
except Exception:
await bot.send_message(ctx.message.channel, "{0.mention}, there was an error getting the list of games for you."
" I'm sorry!".format(ctx.message.author))
return
# Create Variables for Embed Table
abvs = ''
names = ''
for row in result:
abvs += (row[0] + '\n')
names += (row[1] + '\n')
# Create Embed Table
embed = discord.Embed()
embed.add_field(name="Abbreviation", value=abvs, inline=True)
embed.add_field(name="Game Name", value=names, inline=True)
# Send Table to User Privately
await bot.send_message(ctx.message.channel, embed=embed)
# ********************************************** #
# GROUPED COMMANDS : ROSTER ******************** #
# ********************************************** #
# COMMAND: !roster
@bot.group(pass_context=True)
async def roster(ctx):
"""Handles Roster Management."""
if ctx.invoked_subcommand is None:
await bot.say('Invalid roster command passed. Must be *add*, *edit*, *list*, or *remove*.')
# COMMAND: !roster add
@roster.command(name='add', pass_context=True)
async def roster_add(ctx, game_abv: str, *, ign: str):
"""Adds username to roster.
User a game abbreviation from the games list. Only one entry per game. Include all in-game names if necessary."""
username = str(ctx.message.author)
# Does Game Abbreviation Exist?
if not is_game_abv(game_abv):
await bot.say('{0.mention}, this abbreviation does not exist. Use !games display for a list of acceptable game '
'abbreviations.'.format(ctx.message.author))
return
# Handle Database
try:
sql = "INSERT INTO roster (`discord_account`,`game_abv`,`game_account`) VALUES (%s, %s, %s)"
cur = db.cursor()
cur.execute(sql, (username, game_abv, ign))
db.commit()
cur.close()
except Exception:
await bot.say('{0.message.author.mention}, there was an error adding your information to the roster.'.format(ctx))
return
# Display Success Message
await bot.say('{0.message.author.mention}, your information was successfully added to the roster!'.format(ctx))
# COMMAND: !roster edit
@roster.command(name='edit', pass_context=True)
async def roster_edit(ctx, game_abv: str, *, ign: str):
"""Updates a roster entry for a specific game.
If the either Game Name or your in-Game Name have spaces, put them in quotes."""
username = str(ctx.message.author)
# Does Game Abbreviation Exist?
if not is_game_abv(game_abv):
await bot.say('{0.mention}, this abbreviation does not exist. Use !games display for a list of acceptable game'
' abbreviations.'.format(ctx.message.author))
return
# Handle Database
try:
sql = "UPDATE roster SET `game_account` = %s WHERE `discord_account` = %s AND `game_abv` = %s"
cur = db.cursor()
cur.execute(sql, (ign, username, game_abv))
db.commit()
cur.close()
except Exception:
await bot.say('{0.message.author.mention}, there was an error updating your roster information.'.format(ctx))
return
# Display Success Message
await bot.say('{0.message.author.mention}, your roster information was successfully updated!'.format(ctx))
# COMMAND: !roster remove
@roster.command(name='remove', pass_context=True)
async def roster_remove(ctx, game_abv: str, *, ign: str):
"""Removes a user's entries in the roster for the specified game."""
username = str(ctx.message.author)
# Does Game Abbreviation Exist?
if not is_game_abv(game_abv):
await bot.say('{0.mention}, this abbreviation does not exist. Use !games display for a list of acceptable '
'game abbreviations.'.format(ctx.message.author))
return
# Handle Database
try:
sql = "DELETE FROM roster WHERE `discord_account` = %s AND `game_abv` = %s AND `game_account` = %s"
cur = db.cursor()
cur.execute(sql, (username, game_abv, ign))
db.commit()
cur.close()
except Exception:
await bot.say('{0.message.author.mention}, there was an error deleting your roster information.'.format(ctx))
return
# Display Success Message
await bot.say('{0.message.author.mention}, your roster information was successfully deleted!'.format(ctx))
# COMMAND: !roster list
@roster.command(name='list', pass_context=True)
async def roster_list(ctx, game_abv: str):
"""Sends a message to the user with the current roster for the specified game."""
# Does Game Abbreviation Exist?
if not is_game_abv(game_abv):
await bot.say('{0.mention}, this abbreviation does not exist. Use !games display for a list of acceptable game '
'abbreviations.'.format(ctx.message.author))
return
# Handle Database
try:
sql = "SELECT `discord_account`, `game_account` FROM roster WHERE `game_abv` = %s ORDER BY `discord_account`"
cur = db.cursor()
cur.execute(sql, (game_abv,))
result = cur.fetchall()
cur.close()
except Exception:
await bot.send_message(ctx.message.channel, "{0.mention}, there was an error getting the roster for you. "
"I'm sorry!".format(ctx.message.author))
return
# Create Variables for Embed Table
accounts = ''
names = ''
for row in result:
accounts += (row[0] + '\n')
names += (row[1] + '\n')
# Create Embed Table
embed = discord.Embed()
embed.add_field(name="Discord Account", value=accounts, inline=True)
embed.add_field(name="In-Game Name", value=names, inline=True)
# Send Table to Channel
await bot.send_message(ctx.message.channel, embed=embed)
# ********************************************** #
# GROUPED COMMANDS : RECRUIT ******************* #
# ********************************************** #
# COMMAND: !recruit
@bot.group(pass_context=True)
async def recruit(ctx):
"""Handles Recruitment Post and Invites Management."""
if ctx.invoked_subcommand is None:
await bot.say('Invalid recruitment command passed. Must be *add*, *edit*, *invite*, *list*, or *remove*.')
# COMMAND: !recruit add
@recruit.command(name='add', pass_context=True)
async def recruit_add(ctx, game_abv: str, *, link: str):
"""Adds recruitment post link to the recruitment list. Use a game abbreviation from the games list."""
# Is the user allowed? (Must be staff)
if not is_staff(ctx.message.author):
await bot.say('{0.mention}, you must be a staff member to use this command.'.format(ctx.message.author))
return
# Does Game Abbreviation Exist?
if not is_game_abv(game_abv):
await bot.say(
'{0.mention}, this abbreviation does not exist. Use !games display for a list of acceptable game '
'abbreviations.'.format(ctx.message.author))
return
# Handle Database
try:
sql = "INSERT INTO recruitment (`game`,`link`) VALUES (%s, %s)"
cur = db.cursor()
cur.execute(sql, (game_abv, link))
db.commit()
cur.close()
except Exception:
await bot.say(
'{0.message.author.mention}, there was an error adding your recruitment link to the list.'.format(ctx))
return
# Display Success Message
await bot.say('{0.message.author.mention}, your information was successfully added to the recruitment '
'posts list!'.format(ctx))
# COMMAND: !recruit edit
@recruit.command(name='edit', pass_context=True)
async def roster_edit(ctx, entry_id: int, *, link: str):
"""Updates a recruitment post entry with the specified entry ID."""
# Is the user allowed? (Must be staff)
if not is_staff(ctx.message.author):
await bot.say('{0.mention}, you must be a staff member to use this command.'.format(ctx.message.author))
return
# Handle Database
try:
sql = "UPDATE recruitment SET `link` = %s WHERE `entry_id` = %s"
cur = db.cursor()
cur.execute(sql, (link, entry_id))
db.commit()
cur.close()
except Exception:
await bot.say('{0.message.author.mention}, there was an error updating the specified '
'recruitment entry.'.format(ctx))
return
# Display Success Message
await bot.say('{0.message.author.mention}, the recruitment entry was successfully updated!'.format(ctx))
# COMMAND: !recruit remove
@recruit.command(name='remove', pass_context=True)
async def recruit_remove(ctx, entry_id: int):
"""Removes an entry for the recruitment posts list with the specified entry ID."""
# Is the user allowed? (Must be staff)
if not is_staff(ctx.message.author):
await bot.say('{0.mention}, you must be a staff member to use this command.'.format(ctx.message.author))
return
# Handle Database
try:
sql = "DELETE FROM recruitment WHERE `entry_id` = %s"
cur = db.cursor()
cur.execute(sql, (entry_id,))
db.commit()
cur.close()
except Exception:
await bot.say('{0.message.author.mention}, there was an error deleting the specified '
'recruitment entry.'.format(ctx))
return
# Display Success Message
await bot.say('{0.message.author.mention}, the recruitment entry was successfully deleted!'.format(ctx))
# COMMAND: !recruit list
@recruit.command(name='list', pass_context=True)
async def recruit_list(ctx):
"""Lists all recruitment post entries in the system."""
# Handle Database
try:
sql = "SELECT * FROM recruitment ORDER BY `game`"
cur = db.cursor()
cur.execute(sql)
result = cur.fetchall()
cur.close()
except Exception:
await bot.send_message(ctx.message.channel, "{0.mention}, there was an error getting the recruitment list "
"for you. I'm sorry!".format(ctx.message.author))
return
# Create Variables for Embed Table
entries = ''
game_abvs = ''
links = ''
for row in result:
entries += (row[0] + '\n')
game_abvs += (row[1] + '\n')
links += (row[2] + '\n')
# Create Embed Table
embed = discord.Embed()
embed.add_field(name="ID", value=entries, inline=True)
embed.add_field(name="Game", value=game_abvs, inline=True)
embed.add_field(name="Link", value=links, inline=True)
# Send Table to Channel
await bot.send_message(ctx.message.channel, embed=embed)
# COMMAND: !recruit invite
@recruit.command(name='invite')
async def recruit_invite(duration: int):
"""Provides an invite link to the Discord server. Set duration to 0 for permanent invite."""
# Default Duration 30 Minutes, Else Convert to Minutes
if duration is None:
duration = 1800
else:
duration *= 60
# WELCOME CHANNEL ID: 141622052133142529
welcome_channel = bot.get_channel('141622052133142529')
# Create the Invite
new_invite = await bot.create_invite(welcome_channel, max_age=duration)
# Send Message with Invite Link
await bot.say('Your newly generated invite link is: {0.url}'.format(new_invite))
# ********************************************** #
# MODERATOR COMMANDS *************************** #
# ********************************************** #
# COMMAND: !give_role
@bot.command(pass_context=True)
async def give_role(ctx, username: str, *, role_name: str):
"""Assigns a role to a user."""
# List of Roles Staff Can Add To.
allowed_roles = ['Europe',
'North America',
'Oceania',
'Overwatch',
'League of Legends',
'Co-op',
'Minna-chan',
'Squire',
'Knight',
'Zealot']
# Is the user allowed? (Must be Staff)
if not is_staff(ctx.message.author):
await bot.say('{0.mention}, you must be a staff member to use this command.'.format(ctx.message.author))
return
if role_name not in allowed_roles:
await bot.say('{0.mention}, you may only assign users to public roles, Guest, or Registered Member'
.format(ctx.message.author))
return
# Define role, then add role to member.
try:
role = discord.utils.get(ctx.message.server.roles, name=role_name)
user = discord.utils.get(ctx.message.server.members, name=username)
await bot.add_roles(user, role)
except Exception as e:
await bot.send_message(ctx.message.channel, "{0.mention}, there was an granting the role to the user."
" ".format(ctx.message.author) + str(e))
return
# Success Message
await bot.say('{0.mention}, you have successfully added **{1}** to the group **{2}**'
'.'.format(ctx.message.author, username, role_name))
# COMMAND: !kick
@bot.command(name='kick', pass_context=True)
async def mod_kick(ctx, username: str, *, reason: str):
"""Kicks a user from the server."""
# User must be a staff member
if not is_staff(ctx.message.author):
await bot.say('{0.mention}, you must be a staff member to use this command.'.format(ctx.message.author))
return
# Add to DB and Post Message
try:
# Variables Needed
member = discord.utils.get(ctx.message.server.members, name=username)
staffer = ctx.message.author
# Handle Database
sql = "INSERT INTO mod_log (`action`,`user`, `user_id`, `staff`, `staff_id`, reason) " \
"VALUES ('kick', %s, %s, %s, %s, %s)"
cur = db.cursor()
cur.execute(sql, (str(member), member.id, str(staffer), staffer.id, reason))
# Save Last Row ID
case_id = cur.lastrowid
# Insert Message
log_channel = bot.get_channel('303262467205890051')
msg_text = "**Case #{0}** | Kick :boot: \n**User**: {1} ({2}) " \
"\n**Moderator**: {3} ({4}) \n**Reason**: {5}"
# Add Message to Events Channel and Save Message ID
case_message = await bot.send_message(log_channel, msg_text.format(case_id, str(member), member.id, str(staffer), staffer.id, reason))
cur.execute("UPDATE mod_log SET `message_id` = %s WHERE `case_id` = %s", (case_message.id, case_id))
# Finish Database Stuff and Commit
db.commit()
cur.close()
# Kick the Member
await bot.kick(member)
except Exception as e:
await bot.send_message(ctx.message.channel, "{0.mention}, there was an error when kicking the user."
" ".format(ctx.message.author) + str(e))
await bot.say("{0.mention}, the user was successfully kicked. A log entry has been added.".format(ctx.message.author))
# ********************************************** #
# START THE BOT ******************************** #
# ********************************************** #
# Run the Bot
bot.run('token-here')
| gpl-3.0 | 2,852,675,339,850,913,300 | 33.866518 | 142 | 0.58759 | false |
nicain/dipde_dev | dipde/interfaces/zmq/__init__.py | 1 | 4371 | import time
import zmq
import threading
context = zmq.Context()
class PublishCallback(object):
def __init__(self, port, topic, message_callback):
self.port = port
self.topic = topic
self.message_callback = message_callback
self.socket = context.socket(zmq.PUB)
def __call__(self, obj):
message_to_send = list(self.message_callback(obj))
message_to_send.insert(0,"%s" % self.topic)
self.socket.send_multipart(map(str, message_to_send))
class PublishCallbackConnect(PublishCallback):
def __init__(self, port, topic, message_callback):
super(self.__class__, self).__init__(port, topic, message_callback)
self.socket.connect("tcp://localhost:%s" % self.port)
class CallbackSubscriber(object):
def __init__(self, port=None, receive_callback=None):
self.socket = context.socket(zmq.SUB)
if port is None:
self.port = self.socket.bind_to_random_port('tcp://*', min_port=6001, max_port=6004, max_tries=100)
else:
self.socket.bind("tcp://*:%s" % port)
self.port = port
self.socket.setsockopt(zmq.SUBSCRIBE, 'test')
if receive_callback is None:
def receive_callback(received_message):
print received_message
self.receive_callback = receive_callback
def run(self):
while True:
received_message_multipart = self.socket.recv_multipart()
topic = received_message_multipart[0]
received_message = received_message_multipart[1:]
self.receive_callback(received_message)
class CallbackSubscriberThread(threading.Thread):
def __init__(self, port=None):
super(self.__class__, self).__init__()
self.subscriber = CallbackSubscriber(port)
self.daemon = True
def run(self, port=None):
self.subscriber.run()
@property
def port(self):
return self.subscriber.port
class RequestConnection(object):
def __init__(self, port):
self.port = port
self.socket = context.socket(zmq.REQ)
self.socket.connect("tcp://localhost:%s" % port)
def __call__(self, *args):
if len(args) == 0:
self.socket.send(b'')
else:
self.socket.send_multipart(map(str,args))
message = self.socket.recv_multipart()
return float(message[0])
def shutdown(self):
self.socket.close()
assert self.socket.closed
class ReplyServerBind(object):
def __init__(self, reply_function, port=None):
self.socket = context.socket(zmq.REP)
if port is None:
self.port = self.socket.bind_to_random_port('tcp://*', min_port=6001, max_port=6004, max_tries=100)
else:
self.socket.bind("tcp://*:%s" % port)
self.port = port
self.reply_function = reply_function
def run(self):
while True:
message = self.socket.recv()
# print 'message:', message, type(message)
if message == 'SHUTDOWN':
break
# print 'message'
if message == '':
requested_args = tuple()
else:
requested_args = tuple([float(message)])
self.socket.send_multipart([b"%s" % self.reply_function(*requested_args)])
self.socket.send('DOWN')
self.socket.close()
class ReplyServerThread(threading.Thread):
def __init__(self, reply_function, port=None):
super(ReplyServerThread, self).__init__()
self._stop = threading.Event()
self.daemon = True
self.reply_function = reply_function
self.server = ReplyServerBind(self.reply_function, port=port)
def run(self, port=None):
self.server.run()
def shutdown(self):
shutdown_socket = context.socket(zmq.REQ)
shutdown_socket.connect("tcp://localhost:%s" % self.port)
shutdown_socket.send('SHUTDOWN')
message = shutdown_socket.recv()
assert message == 'DOWN'
self.stop()
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
@property
def port(self):
return self.server.port
| gpl-3.0 | -8,901,712,531,665,347,000 | 27.94702 | 111 | 0.577442 | false |
yantrabuddhi/nativeclient | buildbot/buildbot_lib.py | 1 | 21952 | #!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os.path
import shutil
import subprocess
import stat
import sys
import time
import traceback
ARCH_MAP = {
'32': {
'gyp_arch': 'ia32',
'scons_platform': 'x86-32',
},
'64': {
'gyp_arch': 'x64',
'scons_platform': 'x86-64',
},
'arm': {
'gyp_arch': 'arm',
'scons_platform': 'arm',
},
'mips32': {
'gyp_arch': 'mips32',
'scons_platform': 'mips32',
},
}
def RunningOnBuildbot():
return os.environ.get('BUILDBOT_SLAVE_TYPE') is not None
def GetHostPlatform():
sys_platform = sys.platform.lower()
if sys_platform.startswith('linux'):
return 'linux'
elif sys_platform in ('win', 'win32', 'windows', 'cygwin'):
return 'win'
elif sys_platform in ('darwin', 'mac'):
return 'mac'
else:
raise Exception('Can not determine the platform!')
def SetDefaultContextAttributes(context):
"""
Set default values for the attributes needed by the SCons function, so that
SCons can be run without needing ParseStandardCommandLine
"""
platform = GetHostPlatform()
context['platform'] = platform
context['mode'] = 'opt'
context['default_scons_mode'] = ['opt-host', 'nacl']
context['default_scons_platform'] = ('x86-64' if platform == 'win'
else 'x86-32')
context['android'] = False
context['clang'] = False
context['asan'] = False
context['pnacl'] = False
context['use_glibc'] = False
context['use_breakpad_tools'] = False
context['max_jobs'] = 8
context['scons_args'] = []
# Windows-specific environment manipulation
def SetupWindowsEnvironment(context):
# Poke around looking for MSVC. We should do something more principled in
# the future.
# The name of Program Files can differ, depending on the bittage of Windows.
program_files = r'c:\Program Files (x86)'
if not os.path.exists(program_files):
program_files = r'c:\Program Files'
if not os.path.exists(program_files):
raise Exception('Cannot find the Program Files directory!')
# The location of MSVC can differ depending on the version.
msvc_locs = [
('Microsoft Visual Studio 12.0', 'VS120COMNTOOLS', '2013'),
('Microsoft Visual Studio 10.0', 'VS100COMNTOOLS', '2010'),
('Microsoft Visual Studio 9.0', 'VS90COMNTOOLS', '2008'),
('Microsoft Visual Studio 8.0', 'VS80COMNTOOLS', '2005'),
]
for dirname, comntools_var, gyp_msvs_version in msvc_locs:
msvc = os.path.join(program_files, dirname)
context.SetEnv('GYP_MSVS_VERSION', gyp_msvs_version)
if os.path.exists(msvc):
break
else:
# The break statement did not execute.
raise Exception('Cannot find MSVC!')
# Put MSVC in the path.
vc = os.path.join(msvc, 'VC')
comntools = os.path.join(msvc, 'Common7', 'Tools')
perf = os.path.join(msvc, 'Team Tools', 'Performance Tools')
context.SetEnv('PATH', os.pathsep.join([
context.GetEnv('PATH'),
vc,
comntools,
perf]))
# SCons needs this variable to find vsvars.bat.
# The end slash is needed because the batch files expect it.
context.SetEnv(comntools_var, comntools + '\\')
# This environment variable will SCons to print debug info while it searches
# for MSVC.
context.SetEnv('SCONS_MSCOMMON_DEBUG', '-')
# Needed for finding devenv.
context['msvc'] = msvc
SetupGyp(context, [])
def SetupGyp(context, extra_vars=[]):
if RunningOnBuildbot():
goma_opts = [
'use_goma=1',
'gomadir=/b/build/goma',
]
else:
goma_opts = []
context.SetEnv('GYP_DEFINES', ' '.join(
context['gyp_vars'] + goma_opts + extra_vars))
def SetupLinuxEnvironment(context):
if context['arch'] == 'mips32':
# Ensure the trusted mips toolchain is installed.
cmd = ['build/package_version/package_version.py', '--packages',
'linux_x86/mips_trusted', 'sync', '-x']
Command(context, cmd)
SetupGyp(context, ['target_arch='+context['gyp_arch']])
def SetupMacEnvironment(context):
SetupGyp(context, ['target_arch='+context['gyp_arch']])
def SetupAndroidEnvironment(context):
SetupGyp(context, ['OS=android', 'target_arch='+context['gyp_arch']])
context.SetEnv('GYP_CROSSCOMPILE', '1')
def ParseStandardCommandLine(context):
"""
The standard buildbot scripts require 3 arguments to run. The first
argument (dbg/opt) controls if the build is a debug or a release build. The
second argument (32/64) controls the machine architecture being targeted.
The third argument (newlib/glibc) controls which c library we're using for
the nexes. Different buildbots may have different sets of arguments.
"""
parser = optparse.OptionParser()
parser.add_option('-n', '--dry-run', dest='dry_run', default=False,
action='store_true', help='Do not execute any commands.')
parser.add_option('--inside-toolchain', dest='inside_toolchain',
default=bool(os.environ.get('INSIDE_TOOLCHAIN')),
action='store_true', help='Inside toolchain build.')
parser.add_option('--android', dest='android', default=False,
action='store_true', help='Build for Android.')
parser.add_option('--clang', dest='clang', default=False,
action='store_true', help='Build trusted code with Clang.')
parser.add_option('--coverage', dest='coverage', default=False,
action='store_true',
help='Build and test for code coverage.')
parser.add_option('--validator', dest='validator', default=False,
action='store_true',
help='Only run validator regression test')
parser.add_option('--asan', dest='asan', default=False,
action='store_true', help='Build trusted code with ASan.')
parser.add_option('--scons-args', dest='scons_args', default =[],
action='append', help='Extra scons arguments.')
parser.add_option('--step-suffix', metavar='SUFFIX', default='',
help='Append SUFFIX to buildbot step names.')
parser.add_option('--no-gyp', dest='no_gyp', default=False,
action='store_true', help='Do not run the gyp build')
parser.add_option('--no-goma', dest='no_goma', default=False,
action='store_true', help='Do not run with goma')
parser.add_option('--use-breakpad-tools', dest='use_breakpad_tools',
default=False, action='store_true',
help='Use breakpad tools for testing')
parser.add_option('--skip-build', dest='skip_build', default=False,
action='store_true',
help='Skip building steps in buildbot_pnacl')
parser.add_option('--skip-run', dest='skip_run', default=False,
action='store_true',
help='Skip test-running steps in buildbot_pnacl')
options, args = parser.parse_args()
if len(args) != 3:
parser.error('Expected 3 arguments: mode arch toolchain')
# script + 3 args == 4
mode, arch, toolchain = args
if mode not in ('dbg', 'opt', 'coverage'):
parser.error('Invalid mode %r' % mode)
if arch not in ARCH_MAP:
parser.error('Invalid arch %r' % arch)
if toolchain not in ('newlib', 'glibc', 'pnacl', 'nacl_clang'):
parser.error('Invalid toolchain %r' % toolchain)
# TODO(ncbray) allow a command-line override
platform = GetHostPlatform()
context['platform'] = platform
context['mode'] = mode
context['arch'] = arch
context['android'] = options.android
# ASan is Clang, so set the flag to simplify other checks.
context['clang'] = options.clang or options.asan
context['validator'] = options.validator
context['asan'] = options.asan
# TODO(ncbray) turn derived values into methods.
context['gyp_mode'] = {
'opt': 'Release',
'dbg': 'Debug',
'coverage': 'Debug'}[mode]
context['gn_is_debug'] = {
'opt': 'false',
'dbg': 'true',
'coverage': 'true'}[mode]
context['gyp_arch'] = ARCH_MAP[arch]['gyp_arch']
context['gyp_vars'] = []
if context['clang']:
context['gyp_vars'].append('clang=1')
if context['asan']:
context['gyp_vars'].append('asan=1')
context['default_scons_platform'] = ARCH_MAP[arch]['scons_platform']
context['default_scons_mode'] = ['nacl']
# Only Linux can build trusted code on ARM.
# TODO(mcgrathr): clean this up somehow
if arch != 'arm' or platform == 'linux':
context['default_scons_mode'] += [mode + '-host']
context['use_glibc'] = toolchain == 'glibc'
context['pnacl'] = toolchain == 'pnacl'
context['nacl_clang'] = toolchain == 'nacl_clang'
context['max_jobs'] = 8
context['dry_run'] = options.dry_run
context['inside_toolchain'] = options.inside_toolchain
context['step_suffix'] = options.step_suffix
context['no_gyp'] = options.no_gyp
context['no_goma'] = options.no_goma
context['coverage'] = options.coverage
context['use_breakpad_tools'] = options.use_breakpad_tools
context['scons_args'] = options.scons_args
context['skip_build'] = options.skip_build
context['skip_run'] = options.skip_run
# Don't run gyp on coverage builds.
if context['coverage']:
context['no_gyp'] = True
for key, value in sorted(context.config.items()):
print '%s=%s' % (key, value)
def EnsureDirectoryExists(path):
"""
Create a directory if it does not already exist.
Does not mask failures, but there really shouldn't be any.
"""
if not os.path.exists(path):
os.makedirs(path)
def TryToCleanContents(path, file_name_filter=lambda fn: True):
"""
Remove the contents of a directory without touching the directory itself.
Ignores all failures.
"""
if os.path.exists(path):
for fn in os.listdir(path):
TryToCleanPath(os.path.join(path, fn), file_name_filter)
def TryToCleanPath(path, file_name_filter=lambda fn: True):
"""
Removes a file or directory.
Ignores all failures.
"""
if os.path.exists(path):
if file_name_filter(path):
print 'Trying to remove %s' % path
try:
RemovePath(path)
except Exception:
print 'Failed to remove %s' % path
else:
print 'Skipping %s' % path
def Retry(op, *args):
# Windows seems to be prone to having commands that delete files or
# directories fail. We currently do not have a complete understanding why,
# and as a workaround we simply retry the command a few times.
# It appears that file locks are hanging around longer than they should. This
# may be a secondary effect of processes hanging around longer than they
# should. This may be because when we kill a browser sel_ldr does not exit
# immediately, etc.
# Virus checkers can also accidently prevent files from being deleted, but
# that shouldn't be a problem on the bots.
if GetHostPlatform() == 'win':
count = 0
while True:
try:
op(*args)
break
except Exception:
print "FAILED: %s %s" % (op.__name__, repr(args))
count += 1
if count < 5:
print "RETRY: %s %s" % (op.__name__, repr(args))
time.sleep(pow(2, count))
else:
# Don't mask the exception.
raise
else:
op(*args)
def PermissionsFixOnError(func, path, exc_info):
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
def _RemoveDirectory(path):
print 'Removing %s' % path
if os.path.exists(path):
shutil.rmtree(path, onerror=PermissionsFixOnError)
print ' Succeeded.'
else:
print ' Path does not exist, nothing to do.'
def RemoveDirectory(path):
"""
Remove a directory if it exists.
Does not mask failures, although it does retry a few times on Windows.
"""
Retry(_RemoveDirectory, path)
def RemovePath(path):
"""Remove a path, file or directory."""
if os.path.isdir(path):
RemoveDirectory(path)
else:
if os.path.isfile(path) and not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWUSR)
os.remove(path)
# This is a sanity check so Command can print out better error information.
def FileCanBeFound(name, paths):
# CWD
if os.path.exists(name):
return True
# Paths with directories are not resolved using the PATH variable.
if os.path.dirname(name):
return False
# In path
for path in paths.split(os.pathsep):
full = os.path.join(path, name)
if os.path.exists(full):
return True
return False
def RemoveGypBuildDirectories():
# Remove all directories on all platforms. Overkill, but it allows for
# straight-line code.
# Windows
RemoveDirectory('build/Debug')
RemoveDirectory('build/Release')
RemoveDirectory('build/Debug-Win32')
RemoveDirectory('build/Release-Win32')
RemoveDirectory('build/Debug-x64')
RemoveDirectory('build/Release-x64')
# Linux and Mac
RemoveDirectory('../xcodebuild')
RemoveDirectory('../out')
RemoveDirectory('src/third_party/nacl_sdk/arm-newlib')
def RemoveSconsBuildDirectories():
RemoveDirectory('scons-out')
RemoveDirectory('breakpad-out')
# Execute a command using Python's subprocess module.
def Command(context, cmd, cwd=None):
print 'Running command: %s' % ' '.join(cmd)
# Python's subprocess has a quirk. A subprocess can execute with an
# arbitrary, user-defined environment. The first argument of the command,
# however, is located using the PATH variable of the Python script that is
# launching the subprocess. Modifying the PATH in the environment passed to
# the subprocess does not affect Python's search for the first argument of
# the command (the executable file.) This is a little counter intuitive,
# so we're forcing the search to use the same PATH variable as is seen by
# the subprocess.
env = context.MakeCommandEnv()
script_path = os.environ['PATH']
os.environ['PATH'] = env['PATH']
try:
if FileCanBeFound(cmd[0], env['PATH']) or context['dry_run']:
# Make sure that print statements before the subprocess call have been
# flushed, otherwise the output of the subprocess call may appear before
# the print statements.
sys.stdout.flush()
if context['dry_run']:
retcode = 0
else:
retcode = subprocess.call(cmd, cwd=cwd, env=env)
else:
# Provide a nicer failure message.
# If subprocess cannot find the executable, it will throw a cryptic
# exception.
print 'Executable %r cannot be found.' % cmd[0]
retcode = 1
finally:
os.environ['PATH'] = script_path
print 'Command return code: %d' % retcode
if retcode != 0:
raise StepFailed()
return retcode
# A specialized version of CommandStep.
def SCons(context, mode=None, platform=None, parallel=False, browser_test=False,
args=(), cwd=None):
python = sys.executable
if mode is None: mode = context['default_scons_mode']
if platform is None: platform = context['default_scons_platform']
if parallel:
jobs = context['max_jobs']
else:
jobs = 1
cmd = []
if browser_test and context.Linux():
# Although we could use the "browser_headless=1" Scons option, it runs
# xvfb-run once per Chromium invocation. This is good for isolating
# the tests, but xvfb-run has a stupid fixed-period sleep, which would
# slow down the tests unnecessarily.
cmd.extend(['xvfb-run', '--auto-servernum'])
cmd.extend([
python, 'scons.py',
'--verbose',
'-k',
'-j%d' % jobs,
'--mode='+','.join(mode),
'platform='+platform,
])
cmd.extend(context['scons_args'])
if context['clang']: cmd.append('--clang')
if context['asan']: cmd.append('--asan')
if context['use_glibc']: cmd.append('--nacl_glibc')
if context['pnacl']: cmd.append('bitcode=1')
if context['nacl_clang']: cmd.append('nacl_clang=1')
if context['use_breakpad_tools']:
cmd.append('breakpad_tools_dir=breakpad-out')
if context['android']:
cmd.append('android=1')
# Append used-specified arguments.
cmd.extend(args)
Command(context, cmd, cwd)
class StepFailed(Exception):
"""
Thrown when the step has failed.
"""
class StopBuild(Exception):
"""
Thrown when the entire build should stop. This does not indicate a failure,
in of itself.
"""
class Step(object):
"""
This class is used in conjunction with a Python "with" statement to ensure
that the preamble and postamble of each build step gets printed and failures
get logged. This class also ensures that exceptions thrown inside a "with"
statement don't take down the entire build.
"""
def __init__(self, name, status, halt_on_fail=True):
self.status = status
if 'step_suffix' in status.context:
suffix = status.context['step_suffix']
else:
suffix = ''
self.name = name + suffix
self.halt_on_fail = halt_on_fail
self.step_failed = False
# Called on entry to a 'with' block.
def __enter__(self):
sys.stdout.flush()
print
print '@@@BUILD_STEP %s@@@' % self.name
self.status.ReportBegin(self.name)
# The method is called on exit from a 'with' block - even for non-local
# control flow, i.e. exceptions, breaks, continues, returns, etc.
# If an exception is thrown inside a block wrapped with a 'with' statement,
# the __exit__ handler can suppress the exception by returning True. This is
# used to isolate each step in the build - if an exception occurs in a given
# step, the step is treated as a failure. This allows the postamble for each
# step to be printed and also allows the build to continue of the failure of
# a given step doesn't halt the build.
def __exit__(self, type, exception, trace):
sys.stdout.flush()
if exception is None:
# If exception is None, no exception occurred.
step_failed = False
elif isinstance(exception, StepFailed):
step_failed = True
print
print 'Halting build step because of failure.'
print
else:
step_failed = True
print
print 'The build step threw an exception...'
print
traceback.print_exception(type, exception, trace, file=sys.stdout)
print
if step_failed:
self.status.ReportFail(self.name)
print '@@@STEP_FAILURE@@@'
if self.halt_on_fail:
print
print 'Entire build halted because %s failed.' % self.name
sys.stdout.flush()
raise StopBuild()
else:
self.status.ReportPass(self.name)
sys.stdout.flush()
# Suppress any exception that occurred.
return True
# Adds an arbitrary link inside the build stage on the waterfall.
def StepLink(text, link):
print '@@@STEP_LINK@%s@%s@@@' % (text, link)
# Adds arbitrary text inside the build stage on the waterfall.
def StepText(text):
print '@@@STEP_TEXT@%s@@@' % (text)
class BuildStatus(object):
"""
Keeps track of the overall status of the build.
"""
def __init__(self, context):
self.context = context
self.ever_failed = False
self.steps = []
def ReportBegin(self, name):
pass
def ReportPass(self, name):
self.steps.append((name, 'passed'))
def ReportFail(self, name):
self.steps.append((name, 'failed'))
self.ever_failed = True
# Handy info when this script is run outside of the buildbot.
def DisplayBuildStatus(self):
print
for step, status in self.steps:
print '%-40s[%s]' % (step, status)
print
if self.ever_failed:
print 'Build failed.'
else:
print 'Build succeeded.'
def ReturnValue(self):
return int(self.ever_failed)
class BuildContext(object):
"""
Encapsulates the information needed for running a build command. This
includes environment variables and default arguments for SCons invocations.
"""
# Only allow these attributes on objects of this type.
__slots__ = ['status', 'global_env', 'config']
def __init__(self):
# The contents of global_env override os.environ for any commands run via
# self.Command(...)
self.global_env = {}
# PATH is a special case. See: Command.
self.global_env['PATH'] = os.environ.get('PATH', '')
self.config = {}
self['dry_run'] = False
# Emulate dictionary subscripting.
def __getitem__(self, key):
return self.config[key]
# Emulate dictionary subscripting.
def __setitem__(self, key, value):
self.config[key] = value
# Emulate dictionary membership test
def __contains__(self, key):
return key in self.config
def Windows(self):
return self.config['platform'] == 'win'
def Linux(self):
return self.config['platform'] == 'linux'
def Mac(self):
return self.config['platform'] == 'mac'
def GetEnv(self, name, default=None):
return self.global_env.get(name, default)
def SetEnv(self, name, value):
self.global_env[name] = str(value)
def MakeCommandEnv(self):
# The external environment is not sanitized.
e = dict(os.environ)
# Arbitrary variables can be overridden.
e.update(self.global_env)
return e
def RunBuild(script, status):
try:
script(status, status.context)
except StopBuild:
pass
# Emit a summary step for three reasons:
# - The annotator will attribute non-zero exit status to the last build step.
# This can misattribute failures to the last build step.
# - runtest.py wraps the builds to scrape perf data. It emits an annotator
# tag on exit which misattributes perf results to the last build step.
# - Provide a label step in which to show summary result.
# Otherwise these go back to the preamble.
with Step('summary', status):
if status.ever_failed:
print 'There were failed stages.'
else:
print 'Success.'
# Display a summary of the build.
status.DisplayBuildStatus()
sys.exit(status.ReturnValue())
| bsd-3-clause | -6,223,911,041,280,375,000 | 30.722543 | 80 | 0.654246 | false |
tjcsl/director | web3/apps/sites/migrations/0001_initial.py | 1 | 1297 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-05 23:20
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('users', '0002_auto_20161105_2046'),
]
operations = [
migrations.CreateModel(
name='Website',
fields=[
('id', models.PositiveIntegerField(primary_key=True, serialize=False, validators=[django.core.validators.MinValueValidator(1000)])),
('name', models.CharField(max_length=32, unique=True)),
('category', models.CharField(choices=[('legacy', 'legacy'), ('static', 'static'), ('php', 'php'), ('dynamic', 'dynamic')], max_length=16)),
('purpose', models.CharField(choices=[('user', 'user'), ('activity', 'activity')], max_length=16)),
('domain', models.TextField()),
('description', models.TextField()),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Group')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.User')),
],
),
]
| mit | -8,739,404,138,227,232,000 | 39.53125 | 156 | 0.596762 | false |
TAMU-CPT/galaxy-tools | tools/gff3/gff3_filter.py | 1 | 1553 | #!/usr/bin/env python
import sys
import logging
import argparse
from cpt_gffParser import gffParse, gffWrite
from gff3 import feature_lambda, feature_test_qual_value
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
def gff_filter(gff3, id_list=None, id="", attribute_field="ID", subfeatures=True):
attribute_field = attribute_field.split("__cn__")
if id_list:
filter_strings = [line.strip() for line in id_list]
else:
filter_strings = [x.strip() for x in id.split("__cn__")]
for rec in gffParse(gff3):
rec.features = feature_lambda(
rec.features,
feature_test_qual_value,
{"qualifier": attribute_field, "attribute_list": filter_strings},
subfeatures=subfeatures,
)
rec.annotations = {}
gffWrite([rec], sys.stdout)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="extract features from a GFF3 file based on ID/qualifiers"
)
parser.add_argument("gff3", type=argparse.FileType("r"), help="GFF3 annotations")
parser.add_argument("--id_list", type=argparse.FileType("r"))
parser.add_argument("--id", type=str)
parser.add_argument(
"--attribute_field",
type=str,
help="Column 9 Field to search against",
default="ID",
)
parser.add_argument(
"--subfeatures",
action="store_true",
help="Retain subfeature tree of matched features",
)
args = parser.parse_args()
gff_filter(**vars(args))
| gpl-3.0 | 2,550,448,760,510,067,700 | 31.354167 | 85 | 0.627817 | false |
greggian/TapdIn | django/contrib/localflavor/us/models.py | 1 | 1132 | from django.conf import settings
from django.db.models.fields import Field
class USStateField(Field):
def get_internal_type(self):
return "USStateField"
def db_type(self):
if settings.DATABASE_ENGINE == 'oracle':
return 'CHAR(2)'
else:
return 'varchar(2)'
def formfield(self, **kwargs):
from django.contrib.localflavor.us.forms import USStateSelect
defaults = {'widget': USStateSelect}
defaults.update(kwargs)
return super(USStateField, self).formfield(**defaults)
class PhoneNumberField(Field):
def get_internal_type(self):
return "PhoneNumberField"
def db_type(self):
if settings.DATABASE_ENGINE == 'oracle':
return 'VARCHAR2(20)'
else:
return 'varchar(20)'
def formfield(self, **kwargs):
from django.contrib.localflavor.us.forms import USPhoneNumberField
defaults = {'form_class': USPhoneNumberField}
defaults.update(kwargs)
return super(PhoneNumberField, self).formfield(**defaults)
| apache-2.0 | 2,579,539,055,631,886,000 | 30.342857 | 74 | 0.614841 | false |
seraphlnWu/in_trip | in_trip/scripts/change_data_from_hbase_to_pg.py | 1 | 1620 | #coding=utf-8
import time
import cPickle
from in_trip.store_data.views import pg_db,conn
import logging
logger = logging.getLogger('parser')
def creat_table():
sql_str = '''
create table "tmp_hbase_to_pg"(
data text,
timestamp float(24)
)
'''
pg_db.execute(sql_str)
conn.commit()
def insert_data(o_dict, default_value):
data =cPickle.dumps({
'o_dict' : o_dict,
'default_value' : default_value
})
sql_str = '''
insert into tmp_hbase_to_pg
(data,timestamp)
values
(%s,%s);
'''
try:
pg_db.execute(sql_str,(data,time.time()))
conn.commit()
except Exception as e:
conn.rollback()
logger.error('insert to pg error: %s', e)
def get_data_all():
sql_str = '''
select * from tmp_hbase_to_pg;
'''
pg_db.execute(sql_str)
print pg_db.fetchall()
def get_data(offset,limit=1000):
sql_str = '''
select * from tmp_hbase_to_pg limit(%s) offset(%s);
'''
pg_db.execute(sql_str,(limit,offset))
return pg_db.fetchall()
def insert_into_hbase():
from in_trip.store_data.hbase.run import insert_data as hbase_insert
offset = 0
limit = 1000
while True:
res_list = get_data(offset,limit)
if not res_list:
break
offset = offset + limit
for item in res_list:
tmp_data = cPickle.loads(item[0])
hbase_insert(tmp_data['o_dict'],tmp_data['default_value'])
return True
if __name__ == "__main__":
creat_table()
print "success!"
| mit | 5,948,230,377,055,756,000 | 22.478261 | 72 | 0.557407 | false |
fallen/artiq | artiq/frontend/artiq_run.py | 1 | 4103 | #!/usr/bin/env python3
# Copyright (C) 2014, 2015 M-Labs Limited
# Copyright (C) 2014, 2015 Robert Jordens <jordens@gmail.com>
import argparse
import sys
import time
from operator import itemgetter
from itertools import chain
import logging
import h5py
from artiq.language.environment import EnvExperiment
from artiq.protocols.file_db import FlatFileDB
from artiq.master.worker_db import DeviceManager, ResultDB
from artiq.tools import *
logger = logging.getLogger(__name__)
class ELFRunner(EnvExperiment):
def build(self):
self.attr_device("core")
self.attr_argument("file")
def run(self):
with open(self.file, "rb") as f:
self.core.comm.load(f.read())
self.core.comm.run("run")
self.core.comm.serve(dict(), dict())
class SimpleParamLogger:
def set(self, timestamp, name, value):
logger.info("Parameter change: {} = {}".format(name, value))
class DummyScheduler:
def __init__(self):
self.next_rid = 0
self.pipeline_name = "main"
self.priority = 0
self.expid = None
def submit(self, pipeline_name, expid, priority, due_date, flush):
rid = self.next_rid
self.next_rid += 1
logger.info("Submitting: %s, RID=%s", expid, rid)
return rid
def delete(self, rid):
logger.info("Deleting RID %s", rid)
def pause(self):
pass
def get_argparser(with_file=True):
parser = argparse.ArgumentParser(
description="Local experiment running tool")
verbosity_args(parser)
parser.add_argument("-d", "--ddb", default="ddb.pyon",
help="device database file")
parser.add_argument("-p", "--pdb", default="pdb.pyon",
help="parameter database file")
parser.add_argument("-e", "--experiment", default=None,
help="experiment to run")
parser.add_argument("-o", "--hdf5", default=None,
help="write results to specified HDF5 file"
" (default: print them)")
if with_file:
parser.add_argument("file",
help="file containing the experiment to run")
parser.add_argument("arguments", nargs="*",
help="run arguments")
return parser
def _build_experiment(dmgr, pdb, rdb, args):
if hasattr(args, "file"):
if args.file.endswith(".elf"):
if args.arguments:
raise ValueError("arguments not supported for ELF kernels")
if args.experiment:
raise ValueError("experiment-by-name not supported "
"for ELF kernels")
return ELFRunner(dmgr, pdb, rdb, file=args.file)
else:
module = file_import(args.file)
file = args.file
else:
module = sys.modules["__main__"]
file = getattr(module, "__file__")
exp = get_experiment(module, args.experiment)
arguments = parse_arguments(args.arguments)
expid = {
"file": file,
"experiment": args.experiment,
"arguments": arguments
}
dmgr.virtual_devices["scheduler"].expid = expid
return exp(dmgr, pdb, rdb, **arguments)
def run(with_file=False):
args = get_argparser(with_file).parse_args()
init_logger(args)
dmgr = DeviceManager(FlatFileDB(args.ddb),
virtual_devices={"scheduler": DummyScheduler()})
pdb = FlatFileDB(args.pdb)
pdb.hooks.append(SimpleParamLogger())
rdb = ResultDB()
try:
exp_inst = _build_experiment(dmgr, pdb, rdb, args)
exp_inst.prepare()
exp_inst.run()
exp_inst.analyze()
finally:
dmgr.close_devices()
if args.hdf5 is not None:
with h5py.File(args.hdf5, "w") as f:
rdb.write_hdf5(f)
elif rdb.rt.read or rdb.nrt:
r = chain(rdb.rt.read.items(), rdb.nrt.items())
for k, v in sorted(r, key=itemgetter(0)):
print("{}: {}".format(k, v))
def main():
return run(with_file=True)
if __name__ == "__main__":
main()
| gpl-3.0 | -3,275,687,307,934,452,700 | 27.894366 | 75 | 0.58835 | false |
vntarasov/openpilot | selfdrive/debug/get_fingerprint.py | 1 | 1030 | #!/usr/bin/env python3
# simple script to get a vehicle fingerprint.
# Instructions:
# - connect to a Panda
# - run selfdrive/boardd/boardd
# - launching this script
# - turn on the car in STOCK MODE (set giraffe switches properly).
# Note: it's very important that the car is in stock mode, in order to collect a complete fingerprint
# - since some messages are published at low frequency, keep this script running for at least 30s,
# until all messages are received at least once
import cereal.messaging as messaging
logcan = messaging.sub_sock('can')
msgs = {}
while True:
lc = messaging.recv_sock(logcan, True)
if lc is None:
continue
for c in lc.can:
# read also msgs sent by EON on CAN bus 0x80 and filter out the
# addr with more than 11 bits
if c.src in [0, 2] and c.address < 0x800:
msgs[c.address] = len(c.dat)
fingerprint = ', '.join("%d: %d" % v for v in sorted(msgs.items()))
print("number of messages {0}:".format(len(msgs)))
print("fingerprint {0}".format(fingerprint))
| mit | -3,785,566,846,449,061,400 | 31.1875 | 103 | 0.695146 | false |
vcoin-project/v | qa/rpc-tests/test_framework/bignum.py | 1 | 1991 | #
#
# bignum.py
#
# This file is copied from python-vcoinlib.
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
"""Bignum routines"""
from __future__ import absolute_import, division, print_function, unicode_literals
import struct
# generic big endian MPI format
def bn_bytes(v, have_ext=False):
ext = 0
if have_ext:
ext = 1
return ((v.bit_length()+7)//8) + ext
def bn2bin(v):
s = bytearray()
i = bn_bytes(v)
while i > 0:
s.append((v >> ((i-1) * 8)) & 0xff)
i -= 1
return s
def bin2bn(s):
l = 0
for ch in s:
l = (l << 8) | ch
return l
def bn2mpi(v):
have_ext = False
if v.bit_length() > 0:
have_ext = (v.bit_length() & 0x07) == 0
neg = False
if v < 0:
neg = True
v = -v
s = struct.pack(b">I", bn_bytes(v, have_ext))
ext = bytearray()
if have_ext:
ext.append(0)
v_bin = bn2bin(v)
if neg:
if have_ext:
ext[0] |= 0x80
else:
v_bin[0] |= 0x80
return s + ext + v_bin
def mpi2bn(s):
if len(s) < 4:
return None
s_size = bytes(s[:4])
v_len = struct.unpack(b">I", s_size)[0]
if len(s) != (v_len + 4):
return None
if v_len == 0:
return 0
v_str = bytearray(s[4:])
neg = False
i = v_str[0]
if i & 0x80:
neg = True
i &= ~0x80
v_str[0] = i
v = bin2bn(v_str)
if neg:
return -v
return v
# vcoin-specific little endian format, with implicit size
def mpi2vch(s):
r = s[4:] # strip size
r = r[::-1] # reverse string, converting BE->LE
return r
def bn2vch(v):
return bytes(mpi2vch(bn2mpi(v)))
def vch2mpi(s):
r = struct.pack(b">I", len(s)) # size
r += s[::-1] # reverse string, converting LE->BE
return r
def vch2bn(s):
return mpi2bn(vch2mpi(s))
| mit | -4,014,981,737,356,212,000 | 18.519608 | 82 | 0.52436 | false |
ultimanet/nifty | rg/powerspectrum.py | 1 | 26583 | ## NIFTY (Numerical Information Field Theory) has been developed at the
## Max-Planck-Institute for Astrophysics.
##
## Copyright (C) 2013 Max-Planck-Society
##
## Author: Marco Selig
## Project homepage: <http://www.mpa-garching.mpg.de/ift/nifty/>
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
## See the GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
## TODO: cythonize
from __future__ import division
import numpy as np
def draw_vector_nd(axes,dgrid,ps,symtype=0,fourier=False,zerocentered=False,kpack=None):
"""
Draws a n-dimensional field on a regular grid from a given power
spectrum. The grid parameters need to be specified, together with a
couple of global options explained below. The dimensionality of the
field is determined automatically.
Parameters
----------
axes : ndarray
An array with the length of each axis.
dgrid : ndarray
An array with the pixel length of each axis.
ps : ndarray
The power spectrum as a function of Fourier modes.
symtype : int {0,1,2} : *optional*
Whether the output should be real valued (0), complex-hermitian (1)
or complex without symmetry (2). (default=0)
fourier : bool : *optional*
Whether the output should be in Fourier space or not
(default=False).
zerocentered : bool : *optional*
Whether the output array should be zerocentered, i.e. starting with
negative Fourier modes going over the zero mode to positive modes,
or not zerocentered, where zero, positive and negative modes are
simpy ordered consecutively.
Returns
-------
field : ndarray
The drawn random field.
"""
if(kpack is None):
kdict = np.fft.fftshift(nkdict_fast(axes,dgrid,fourier))
klength = nklength(kdict)
else:
kdict = kpack[1][np.fft.ifftshift(kpack[0],axes=shiftaxes(zerocentered,st_to_zero_mode=False))]
klength = kpack[1]
#output is in position space
if(not fourier):
#output is real-valued
if(symtype==0):
vector = drawherm(klength,kdict,ps)
if(np.any(zerocentered==True)):
return np.real(np.fft.fftshift(np.fft.ifftn(vector),axes=shiftaxes(zerocentered)))
else:
return np.real(np.fft.ifftn(vector))
#output is complex with hermitian symmetry
elif(symtype==1):
vector = drawwild(klength,kdict,ps,real_corr=2)
if(np.any(zerocentered==True)):
return np.fft.fftshift(np.fft.ifftn(np.real(vector)),axes=shiftaxes(zerocentered))
else:
return np.fft.ifftn(np.real(vector))
#output is complex without symmetry
else:
vector = drawwild(klength,kdict,ps)
if(np.any(zerocentered==True)):
return np.fft.fftshift(np.fft.ifftn(vector),axes=shiftaxes(zerocentered))
else:
return np.fft.ifftn(vector)
#output is in fourier space
else:
#output is real-valued
if(symtype==0):
vector = drawwild(klength,kdict,ps,real_corr=2)
if np.any(zerocentered == True):
return np.real(np.fft.fftshift(vector,axes=shiftaxes(zerocentered)))
else:
return np.real(vector)
#output is complex with hermitian symmetry
elif(symtype==1):
vector = drawherm(klength,kdict,ps)
if(np.any(zerocentered==True)):
return np.fft.fftshift(vector,axes=shiftaxes(zerocentered))
else:
return vector
#output is complex without symmetry
else:
vector = drawwild(klength,kdict,ps)
if(np.any(zerocentered==True)):
return np.fft.fftshift(vector,axes=shiftaxes(zerocentered))
else:
return vector
#def calc_ps(field,axes,dgrid,zerocentered=False,fourier=False):
#
# """
# Calculates the power spectrum of a given field assuming that the field
# is statistically homogenous and isotropic.
#
# Parameters
# ----------
# field : ndarray
# The input field from which the power spectrum should be determined.
#
# axes : ndarray
# An array with the length of each axis.
#
# dgrid : ndarray
# An array with the pixel length of each axis.
#
# zerocentered : bool : *optional*
# Whether the output array should be zerocentered, i.e. starting with
# negative Fourier modes going over the zero mode to positive modes,
# or not zerocentered, where zero, positive and negative modes are
# simpy ordered consecutively.
#
# fourier : bool : *optional*
# Whether the output should be in Fourier space or not
# (default=False).
#
# """
#
# ## field absolutes
# if(not fourier):
# foufield = np.fft.fftshift(np.fft.fftn(field))
# elif(np.any(zerocentered==False)):
# foufield = np.fft.fftshift(field, axes=shiftaxes(zerocentered,st_to_zero_mode=True))
# else:
# foufield = field
# fieldabs = np.abs(foufield)**2
#
# kdict = nkdict_fast(axes,dgrid,fourier)
# klength = nklength(kdict)
#
# ## power spectrum
# ps = np.zeros(klength.size)
# rho = np.zeros(klength.size)
# for ii in np.ndindex(kdict.shape):
# position = np.searchsorted(klength,kdict[ii])
# rho[position] += 1
# ps[position] += fieldabs[ii]
# ps = np.divide(ps,rho)
# return ps
def calc_ps_fast(field,axes,dgrid,zerocentered=False,fourier=False,pindex=None,kindex=None,rho=None):
"""
Calculates the power spectrum of a given field faster assuming that the
field is statistically homogenous and isotropic.
Parameters
----------
field : ndarray
The input field from which the power spectrum should be determined.
axes : ndarray
An array with the length of each axis.
dgrid : ndarray
An array with the pixel length of each axis.
zerocentered : bool : *optional*
Whether the output array should be zerocentered, i.e. starting with
negative Fourier modes going over the zero mode to positive modes,
or not zerocentered, where zero, positive and negative modes are
simpy ordered consecutively.
fourier : bool : *optional*
Whether the output should be in Fourier space or not
(default=False).
pindex : ndarray
Index of the Fourier grid points in a numpy.ndarray ordered
following the zerocentered flag (default=None).
kindex : ndarray
Array of all k-vector lengths (default=None).
rho : ndarray
Degeneracy of the Fourier grid, indicating how many k-vectors in
Fourier space have the same length (default=None).
"""
## field absolutes
if(not fourier):
foufield = np.fft.fftshift(np.fft.fftn(field))
elif(np.any(zerocentered==False)):
foufield = np.fft.fftshift(field, axes=shiftaxes(zerocentered,st_to_zero_mode=True))
else:
foufield = field
fieldabs = np.abs(foufield)**2
if(rho is None):
if(pindex is None):
## kdict
kdict = nkdict_fast(axes,dgrid,fourier)
## klength
if(kindex is None):
klength = nklength(kdict)
else:
klength = kindex
## power spectrum
ps = np.zeros(klength.size)
rho = np.zeros(klength.size)
for ii in np.ndindex(kdict.shape):
position = np.searchsorted(klength,kdict[ii])
ps[position] += fieldabs[ii]
rho[position] += 1
else:
## zerocenter pindex
if(np.any(zerocentered==False)):
pindex = np.fft.fftshift(pindex, axes=shiftaxes(zerocentered,st_to_zero_mode=True))
## power spectrum
ps = np.zeros(np.max(pindex)+1)
rho = np.zeros(ps.size)
for ii in np.ndindex(pindex.shape):
ps[pindex[ii]] += fieldabs[ii]
rho[pindex[ii]] += 1
elif(pindex is None):
## kdict
kdict = nkdict_fast(axes,dgrid,fourier)
## klength
if(kindex is None):
klength = nklength(kdict)
else:
klength = kindex
## power spectrum
ps = np.zeros(klength.size)
for ii in np.ndindex(kdict.shape):
position = np.searchsorted(klength,kdict[ii])
ps[position] += fieldabs[ii]
else:
## zerocenter pindex
if(np.any(zerocentered==False)):
pindex = np.fft.fftshift(pindex, axes=shiftaxes(zerocentered,st_to_zero_mode=True))
## power spectrum
ps = np.zeros(rho.size)
for ii in np.ndindex(pindex.shape):
ps[pindex[ii]] += fieldabs[ii]
ps = np.divide(ps,rho)
return ps
def get_power_index(axes,dgrid,zerocentered,irred=False,fourier=True):
"""
Returns the index of the Fourier grid points in a numpy
array, ordered following the zerocentered flag.
Parameters
----------
axes : ndarray
An array with the length of each axis.
dgrid : ndarray
An array with the pixel length of each axis.
zerocentered : bool
Whether the output array should be zerocentered, i.e. starting with
negative Fourier modes going over the zero mode to positive modes,
or not zerocentered, where zero, positive and negative modes are
simpy ordered consecutively.
irred : bool : *optional*
If True, the function returns an array of all k-vector lengths and
their degeneracy factors. If False, just the power index array is
returned.
fourier : bool : *optional*
Whether the output should be in Fourier space or not
(default=False).
Returns
-------
index or {klength, rho} : scalar or list
Returns either an array of all k-vector lengths and
their degeneracy factors or just the power index array
depending on the flag irred.
"""
## kdict, klength
if(np.any(zerocentered==False)):
kdict = np.fft.fftshift(nkdict_fast(axes,dgrid,fourier),axes=shiftaxes(zerocentered,st_to_zero_mode=True))
else:
kdict = nkdict_fast(axes,dgrid,fourier)
klength = nklength(kdict)
## output
if(irred):
rho = np.zeros(klength.shape,dtype=np.int)
for ii in np.ndindex(kdict.shape):
rho[np.searchsorted(klength,kdict[ii])] += 1
return klength,rho
else:
ind = np.empty(axes,dtype=np.int)
for ii in np.ndindex(kdict.shape):
ind[ii] = np.searchsorted(klength,kdict[ii])
return ind
def get_power_indices(axes,dgrid,zerocentered,fourier=True):
"""
Returns the index of the Fourier grid points in a numpy
array, ordered following the zerocentered flag.
Parameters
----------
axes : ndarray
An array with the length of each axis.
dgrid : ndarray
An array with the pixel length of each axis.
zerocentered : bool
Whether the output array should be zerocentered, i.e. starting with
negative Fourier modes going over the zero mode to positive modes,
or not zerocentered, where zero, positive and negative modes are
simpy ordered consecutively.
irred : bool : *optional*
If True, the function returns an array of all k-vector lengths and
their degeneracy factors. If False, just the power index array is
returned.
fourier : bool : *optional*
Whether the output should be in Fourier space or not
(default=False).
Returns
-------
index, klength, rho : ndarrays
Returns the power index array, an array of all k-vector lengths and
their degeneracy factors.
"""
## kdict, klength
if(np.any(zerocentered==False)):
kdict = np.fft.fftshift(nkdict_fast(axes,dgrid,fourier),axes=shiftaxes(zerocentered,st_to_zero_mode=True))
else:
kdict = nkdict_fast(axes,dgrid,fourier)
klength = nklength(kdict)
## output
ind = np.empty(axes,dtype=np.int)
rho = np.zeros(klength.shape,dtype=np.int)
for ii in np.ndindex(kdict.shape):
ind[ii] = np.searchsorted(klength,kdict[ii])
rho[ind[ii]] += 1
return ind,klength,rho
def get_power_indices2(axes,dgrid,zerocentered,fourier=True):
"""
Returns the index of the Fourier grid points in a numpy
array, ordered following the zerocentered flag.
Parameters
----------
axes : ndarray
An array with the length of each axis.
dgrid : ndarray
An array with the pixel length of each axis.
zerocentered : bool
Whether the output array should be zerocentered, i.e. starting with
negative Fourier modes going over the zero mode to positive modes,
or not zerocentered, where zero, positive and negative modes are
simpy ordered consecutively.
irred : bool : *optional*
If True, the function returns an array of all k-vector lengths and
their degeneracy factors. If False, just the power index array is
returned.
fourier : bool : *optional*
Whether the output should be in Fourier space or not
(default=False).
Returns
-------
index, klength, rho : ndarrays
Returns the power index array, an array of all k-vector lengths and
their degeneracy factors.
"""
## kdict, klength
if(np.any(zerocentered==False)):
kdict = np.fft.fftshift(nkdict_fast2(axes,dgrid,fourier),axes=shiftaxes(zerocentered,st_to_zero_mode=True))
else:
kdict = nkdict_fast2(axes,dgrid,fourier)
klength,rho,ind = nkdict_to_indices(kdict)
return ind,klength,rho
def nkdict_to_indices(kdict):
kindex,pindex = np.unique(kdict,return_inverse=True)
pindex = pindex.reshape(kdict.shape)
rho = pindex.flatten()
rho.sort()
rho = np.unique(rho,return_index=True,return_inverse=False)[1]
rho = np.append(rho[1:]-rho[:-1],[np.prod(pindex.shape)-rho[-1]])
return kindex,rho,pindex
def bin_power_indices(pindex,kindex,rho,log=False,nbin=None,binbounds=None):
"""
Returns the (re)binned power indices associated with the Fourier grid.
Parameters
----------
pindex : ndarray
Index of the Fourier grid points in a numpy.ndarray ordered
following the zerocentered flag (default=None).
kindex : ndarray
Array of all k-vector lengths (default=None).
rho : ndarray
Degeneracy of the Fourier grid, indicating how many k-vectors in
Fourier space have the same length (default=None).
log : bool
Flag specifying if the binning is performed on logarithmic scale
(default: False).
nbin : integer
Number of used bins (default: None).
binbounds : {list, array}
Array-like inner boundaries of the used bins (default: None).
Returns
-------
pindex, kindex, rho : ndarrays
The (re)binned power indices.
"""
## boundaries
if(binbounds is not None):
binbounds = np.sort(binbounds)
## equal binning
else:
if(log is None):
log = False
if(log):
k = np.r_[0,np.log(kindex[1:])]
else:
k = kindex
dk = np.max(k[2:]-k[1:-1]) ## minimal dk
if(nbin is None):
nbin = int((k[-1]-0.5*(k[2]+k[1]))/dk-0.5) ## maximal nbin
else:
nbin = min(int(nbin),int((k[-1]-0.5*(k[2]+k[1]))/dk+2.5))
dk = (k[-1]-0.5*(k[2]+k[1]))/(nbin-2.5)
binbounds = np.r_[0.5*(3*k[1]-k[2]),0.5*(k[1]+k[2])+dk*np.arange(nbin-2)]
if(log):
binbounds = np.exp(binbounds)
## reordering
reorder = np.searchsorted(binbounds,kindex)
rho_ = np.zeros(len(binbounds)+1,dtype=rho.dtype)
kindex_ = np.empty(len(binbounds)+1,dtype=kindex.dtype)
for ii in range(len(reorder)):
if(rho_[reorder[ii]]==0):
kindex_[reorder[ii]] = kindex[ii]
rho_[reorder[ii]] += rho[ii]
else:
kindex_[reorder[ii]] = (kindex_[reorder[ii]]*rho_[reorder[ii]]+kindex[ii]*rho[ii])/(rho_[reorder[ii]]+rho[ii])
rho_[reorder[ii]] += rho[ii]
return reorder[pindex],kindex_,rho_
def nhermitianize(field,zerocentered):
"""
Hermitianizes an arbitrary n-dimensional field. Becomes relatively slow
for large n.
Parameters
----------
field : ndarray
The input field that should be hermitianized.
zerocentered : bool
Whether the output array should be zerocentered, i.e. starting with
negative Fourier modes going over the zero mode to positive modes,
or not zerocentered, where zero, positive and negative modes are
simpy ordered consecutively.
Returns
-------
hermfield : ndarray
The hermitianized field.
"""
## shift zerocentered axes
if(np.any(zerocentered==True)):
field = np.fft.fftshift(field, axes=shiftaxes(zerocentered))
# for index in np.ndenumerate(field):
# negind = tuple(-np.array(index[0]))
# field[negind] = np.conjugate(index[1])
# if(field[negind]==field[index[0]]):
# field[index[0]] = np.abs(index[1])*(np.sign(index[1].real)+(np.sign(index[1].real)==0)*np.sign(index[1].imag)).astype(np.int)
subshape = np.array(field.shape,dtype=np.int) ## == axes
maxindex = subshape//2
subshape[np.argmax(subshape)] = subshape[np.argmax(subshape)]//2+1 ## ~half larges axis
for ii in np.ndindex(tuple(subshape)):
negii = tuple(-np.array(ii))
field[negii] = np.conjugate(field[ii])
for ii in np.ndindex((2,)*maxindex.size):
index = tuple(ii*maxindex)
field[index] = np.abs(field[index])*(np.sign(field[index].real)+(np.sign(field[index].real)==0)*-np.sign(field[index].imag)).astype(np.int) ## minus since overwritten before
## reshift zerocentered axes
if(np.any(zerocentered==True)):
field = np.fft.fftshift(field,axes=shiftaxes(zerocentered))
return field
def nhermitianize_fast(field,zerocentered,special=False):
"""
Hermitianizes an arbitrary n-dimensional field faster.
Still becomes comparably slow for large n.
Parameters
----------
field : ndarray
The input field that should be hermitianized.
zerocentered : bool
Whether the output array should be zerocentered, i.e. starting with
negative Fourier modes going over the zero mode to positive modes,
or not zerocentered, where zero, positive and negative modes are
simpy ordered consecutively.
special : bool, *optional*
Must be True for random fields drawn from Gaussian or pm1
distributions.
Returns
-------
hermfield : ndarray
The hermitianized field.
"""
## shift zerocentered axes
if(np.any(zerocentered==True)):
field = np.fft.fftshift(field, axes=shiftaxes(zerocentered))
dummy = np.conjugate(field)
## mirror conjugate field
for ii in range(field.ndim):
dummy = np.swapaxes(dummy,0,ii)
dummy = np.flipud(dummy)
dummy = np.roll(dummy,1,axis=0)
dummy = np.swapaxes(dummy,0,ii)
if(special): ## special normalisation for certain random fields
field = np.sqrt(0.5)*(field+dummy)
maxindex = np.array(field.shape,dtype=np.int)//2
for ii in np.ndindex((2,)*maxindex.size):
index = tuple(ii*maxindex)
field[index] *= np.sqrt(0.5)
else: ## regular case
field = 0.5*(field+dummy)
## reshift zerocentered axes
if(np.any(zerocentered==True)):
field = np.fft.fftshift(field,axes=shiftaxes(zerocentered))
return field
def random_hermitian_pm1(datatype,zerocentered,shape):
"""
Draws a set of hermitianized random, complex pm1 numbers.
"""
field = np.random.randint(4,high=None,size=np.prod(shape,axis=0,dtype=np.int,out=None)).reshape(shape,order='C')
dummy = np.copy(field)
## mirror field
for ii in range(field.ndim):
dummy = np.swapaxes(dummy,0,ii)
dummy = np.flipud(dummy)
dummy = np.roll(dummy,1,axis=0)
dummy = np.swapaxes(dummy,0,ii)
field = (field+dummy+2*(field>dummy)*((field+dummy)%2))%4 ## wicked magic
x = np.array([1+0j,0+1j,-1+0j,0-1j],dtype=datatype)[field]
## (re)shift zerocentered axes
if(np.any(zerocentered==True)):
field = np.fft.fftshift(field,axes=shiftaxes(zerocentered))
return x
#-----------------------------------------------------------------------------
# Auxiliary functions
#-----------------------------------------------------------------------------
def shiftaxes(zerocentered,st_to_zero_mode=False):
"""
Shifts the axes in a special way needed for some functions
"""
axes = []
for ii in range(len(zerocentered)):
if(st_to_zero_mode==False)and(zerocentered[ii]):
axes += [ii]
if(st_to_zero_mode==True)and(not zerocentered[ii]):
axes += [ii]
return axes
def nkdict(axes,dgrid,fourier=True):
"""
Calculates an n-dimensional array with its entries being the lengths of
the k-vectors from the zero point of the Fourier grid.
"""
if(fourier):
dk = dgrid
else:
dk = np.array([1/axes[i]/dgrid[i] for i in range(len(axes))])
kdict = np.empty(axes)
for ii in np.ndindex(kdict.shape):
kdict[ii] = np.sqrt(np.sum(((ii-axes//2)*dk)**2))
return kdict
def nkdict_fast(axes,dgrid,fourier=True):
"""
Calculates an n-dimensional array with its entries being the lengths of
the k-vectors from the zero point of the Fourier grid.
"""
if(fourier):
dk = dgrid
else:
dk = np.array([1/dgrid[i]/axes[i] for i in range(len(axes))])
temp_vecs = np.array(np.where(np.ones(axes)),dtype='float').reshape(np.append(len(axes),axes))
temp_vecs = np.rollaxis(temp_vecs,0,len(temp_vecs.shape))
temp_vecs -= axes//2
temp_vecs *= dk
temp_vecs *= temp_vecs
return np.sqrt(np.sum((temp_vecs),axis=-1))
def nkdict_fast2(axes,dgrid,fourier=True):
"""
Calculates an n-dimensional array with its entries being the lengths of
the k-vectors from the zero point of the grid.
"""
if(fourier):
dk = dgrid
else:
dk = np.array([1/dgrid[i]/axes[i] for i in range(len(axes))])
inds = []
for a in axes:
inds += [slice(0,a)]
cords = np.ogrid[inds]
dists = ((cords[0]-axes[0]//2)*dk[0])**2
for ii in range(1,len(axes)):
dists = dists + ((cords[ii]-axes[ii]//2)*dk[ii])**2
dists = np.sqrt(dists)
return dists
def nklength(kdict):
return np.sort(list(set(kdict.flatten())))
#def drawherm(vector,klength,kdict,ps): ## vector = np.zeros(kdict.shape,dtype=np.complex)
# for ii in np.ndindex(vector.shape):
# if(vector[ii]==np.complex(0.,0.)):
# vector[ii] = np.sqrt(0.5*ps[np.searchsorted(klength,kdict[ii])])*np.complex(np.random.normal(0.,1.),np.random.normal(0.,1.))
# negii = tuple(-np.array(ii))
# vector[negii] = np.conjugate(vector[ii])
# if(vector[negii]==vector[ii]):
# vector[ii] = np.float(np.sqrt(ps[klength==kdict[ii]]))*np.random.normal(0.,1.)
# return vector
def drawherm(klength,kdict,ps):
"""
Draws a hermitian random field from a Gaussian distribution.
"""
# vector = np.zeros(kdict.shape,dtype='complex')
# for ii in np.ndindex(vector.shape):
# if(vector[ii]==np.complex(0.,0.)):
# vector[ii] = np.sqrt(0.5*ps[np.searchsorted(klength,kdict[ii])])*np.complex(np.random.normal(0.,1.),np.random.normal(0.,1.))
# negii = tuple(-np.array(ii))
# vector[negii] = np.conjugate(vector[ii])
# if(vector[negii]==vector[ii]):
# vector[ii] = np.float(np.sqrt(ps[np.searchsorted(klength,kdict[ii])]))*np.random.normal(0.,1.)
# return vector
vec = np.random.normal(loc=0,scale=1,size=kdict.size).reshape(kdict.shape)
vec = np.fft.fftn(vec)/np.sqrt(np.prod(kdict.shape))
for ii in np.ndindex(kdict.shape):
vec[ii] *= np.sqrt(ps[np.searchsorted(klength,kdict[ii])])
return vec
#def drawwild(vector,klength,kdict,ps,real_corr=1): ## vector = np.zeros(kdict.shape,dtype=np.complex)
# for ii in np.ndindex(vector.shape):
# vector[ii] = np.sqrt(real_corr*0.5*ps[klength==kdict[ii]])*np.complex(np.random.normal(0.,1.),np.random.normal(0.,1.))
# return vector
def drawwild(klength,kdict,ps,real_corr=1):
"""
Draws a field of arbitrary symmetry from a Gaussian distribution.
"""
vec = np.empty(kdict.size,dtype=np.complex)
vec.real = np.random.normal(loc=0,scale=np.sqrt(real_corr*0.5),size=kdict.size)
vec.imag = np.random.normal(loc=0,scale=np.sqrt(real_corr*0.5),size=kdict.size)
vec = vec.reshape(kdict.shape)
for ii in np.ndindex(kdict.shape):
vec[ii] *= np.sqrt(ps[np.searchsorted(klength,kdict[ii])])
return vec
| gpl-3.0 | 8,155,718,674,426,123,000 | 33.703655 | 181 | 0.600271 | false |
fnordahl/nova | nova/exception.py | 1 | 56858 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Nova base exception handling.
Includes decorator for re-raising Nova-type exceptions.
SHOULD include dedicated exception logging.
"""
import functools
import sys
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import six
import webob.exc
from webob import util as woutil
from nova.i18n import _, _LE
from nova import safe_utils
LOG = logging.getLogger(__name__)
exc_log_opts = [
cfg.BoolOpt('fatal_exception_format_errors',
default=False,
help='Make exception message format errors fatal'),
]
CONF = cfg.CONF
CONF.register_opts(exc_log_opts)
class ConvertedException(webob.exc.WSGIHTTPException):
def __init__(self, code, title="", explanation=""):
self.code = code
# There is a strict rule about constructing status line for HTTP:
# '...Status-Line, consisting of the protocol version followed by a
# numeric status code and its associated textual phrase, with each
# element separated by SP characters'
# (http://www.faqs.org/rfcs/rfc2616.html)
# 'code' and 'title' can not be empty because they correspond
# to numeric status code and its associated text
if title:
self.title = title
else:
try:
self.title = woutil.status_reasons[self.code]
except KeyError:
msg = _LE("Improper or unknown HTTP status code used: %d")
LOG.error(msg, code)
self.title = woutil.status_generic_reasons[self.code // 100]
self.explanation = explanation
super(ConvertedException, self).__init__()
def _cleanse_dict(original):
"""Strip all admin_password, new_pass, rescue_pass keys from a dict."""
return {k: v for k, v in six.iteritems(original) if "_pass" not in k}
def wrap_exception(notifier=None, get_notifier=None):
"""This decorator wraps a method to catch any exceptions that may
get thrown. It also optionally sends the exception to the notification
system.
"""
def inner(f):
def wrapped(self, context, *args, **kw):
# Don't store self or context in the payload, it now seems to
# contain confidential information.
try:
return f(self, context, *args, **kw)
except Exception as e:
with excutils.save_and_reraise_exception():
if notifier or get_notifier:
payload = dict(exception=e)
call_dict = safe_utils.getcallargs(f, context,
*args, **kw)
cleansed = _cleanse_dict(call_dict)
payload.update({'args': cleansed})
# If f has multiple decorators, they must use
# functools.wraps to ensure the name is
# propagated.
event_type = f.__name__
(notifier or get_notifier()).error(context,
event_type,
payload)
return functools.wraps(f)(wrapped)
return inner
class NovaException(Exception):
"""Base Nova Exception
To correctly use this class, inherit from it and define
a 'msg_fmt' property. That msg_fmt will get printf'd
with the keyword arguments provided to the constructor.
"""
msg_fmt = _("An unknown exception occurred.")
code = 500
headers = {}
safe = False
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
if not message:
try:
message = self.msg_fmt % kwargs
except Exception:
exc_info = sys.exc_info()
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_LE('Exception in string format operation'))
for name, value in six.iteritems(kwargs):
LOG.error("%s: %s" % (name, value)) # noqa
if CONF.fatal_exception_format_errors:
six.reraise(*exc_info)
else:
# at least get the core message out if something happened
message = self.msg_fmt
self.message = message
super(NovaException, self).__init__(message)
def format_message(self):
# NOTE(mrodden): use the first argument to the python Exception object
# which should be our full NovaException message, (see __init__)
return self.args[0]
class EncryptionFailure(NovaException):
msg_fmt = _("Failed to encrypt text: %(reason)s")
class DecryptionFailure(NovaException):
msg_fmt = _("Failed to decrypt text: %(reason)s")
class RevokeCertFailure(NovaException):
msg_fmt = _("Failed to revoke certificate for %(project_id)s")
class VirtualInterfaceCreateException(NovaException):
msg_fmt = _("Virtual Interface creation failed")
class VirtualInterfaceMacAddressException(NovaException):
msg_fmt = _("Creation of virtual interface with "
"unique mac address failed")
class VirtualInterfacePlugException(NovaException):
msg_fmt = _("Virtual interface plugin failed")
class GlanceConnectionFailed(NovaException):
msg_fmt = _("Connection to glance host %(host)s:%(port)s failed: "
"%(reason)s")
class CinderConnectionFailed(NovaException):
msg_fmt = _("Connection to cinder host failed: %(reason)s")
class Forbidden(NovaException):
ec2_code = 'AuthFailure'
msg_fmt = _("Not authorized.")
code = 403
class AdminRequired(Forbidden):
msg_fmt = _("User does not have admin privileges")
class PolicyNotAuthorized(Forbidden):
msg_fmt = _("Policy doesn't allow %(action)s to be performed.")
class VolumeLimitExceeded(Forbidden):
msg_fmt = _("Volume resource quota exceeded")
class ImageNotActive(NovaException):
# NOTE(jruzicka): IncorrectState is used for volumes only in EC2,
# but it still seems like the most appropriate option.
ec2_code = 'IncorrectState'
msg_fmt = _("Image %(image_id)s is not active.")
class ImageNotAuthorized(NovaException):
msg_fmt = _("Not authorized for image %(image_id)s.")
class Invalid(NovaException):
msg_fmt = _("Unacceptable parameters.")
code = 400
class InvalidBDM(Invalid):
msg_fmt = _("Block Device Mapping is Invalid.")
class InvalidBDMSnapshot(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"failed to get snapshot %(id)s.")
class InvalidBDMVolume(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"failed to get volume %(id)s.")
class InvalidBDMImage(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"failed to get image %(id)s.")
class InvalidBDMBootSequence(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"Boot sequence for the instance "
"and image/block device mapping "
"combination is not valid.")
class InvalidBDMLocalsLimit(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"You specified more local devices than the "
"limit allows")
class InvalidBDMEphemeralSize(InvalidBDM):
msg_fmt = _("Ephemeral disks requested are larger than "
"the instance type allows.")
class InvalidBDMSwapSize(InvalidBDM):
msg_fmt = _("Swap drive requested is larger than instance type allows.")
class InvalidBDMFormat(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"%(details)s")
class InvalidBDMForLegacy(InvalidBDM):
msg_fmt = _("Block Device Mapping cannot "
"be converted to legacy format. ")
class InvalidBDMVolumeNotBootable(InvalidBDM):
msg_fmt = _("Block Device %(id)s is not bootable.")
class InvalidAttribute(Invalid):
msg_fmt = _("Attribute not supported: %(attr)s")
class ValidationError(Invalid):
msg_fmt = "%(detail)s"
class VolumeUnattached(Invalid):
ec2_code = 'IncorrectState'
msg_fmt = _("Volume %(volume_id)s is not attached to anything")
class VolumeNotCreated(NovaException):
msg_fmt = _("Volume %(volume_id)s did not finish being created"
" even after we waited %(seconds)s seconds or %(attempts)s"
" attempts. And its status is %(volume_status)s.")
class VolumeEncryptionNotSupported(Invalid):
msg_fmt = _("Volume encryption is not supported for %(volume_type)s "
"volume %(volume_id)s")
class InvalidKeypair(Invalid):
ec2_code = 'InvalidKeyPair.Format'
msg_fmt = _("Keypair data is invalid: %(reason)s")
class InvalidRequest(Invalid):
msg_fmt = _("The request is invalid.")
class InvalidInput(Invalid):
msg_fmt = _("Invalid input received: %(reason)s")
class InvalidVolume(Invalid):
ec2_code = 'UnsupportedOperation'
msg_fmt = _("Invalid volume: %(reason)s")
class InvalidVolumeAccessMode(Invalid):
msg_fmt = _("Invalid volume access mode: %(access_mode)s")
class InvalidMetadata(Invalid):
msg_fmt = _("Invalid metadata: %(reason)s")
class InvalidMetadataSize(Invalid):
msg_fmt = _("Invalid metadata size: %(reason)s")
class InvalidPortRange(Invalid):
ec2_code = 'InvalidParameterValue'
msg_fmt = _("Invalid port range %(from_port)s:%(to_port)s. %(msg)s")
class InvalidIpProtocol(Invalid):
msg_fmt = _("Invalid IP protocol %(protocol)s.")
class InvalidContentType(Invalid):
msg_fmt = _("Invalid content type %(content_type)s.")
class InvalidAPIVersionString(Invalid):
msg_fmt = _("API Version String %(version)s is of invalid format. Must "
"be of format MajorNum.MinorNum.")
class VersionNotFoundForAPIMethod(Invalid):
msg_fmt = _("API version %(version)s is not supported on this method.")
class InvalidGlobalAPIVersion(Invalid):
msg_fmt = _("Version %(req_ver)s is not supported by the API. Minimum "
"is %(min_ver)s and maximum is %(max_ver)s.")
# Cannot be templated as the error syntax varies.
# msg needs to be constructed when raised.
class InvalidParameterValue(Invalid):
ec2_code = 'InvalidParameterValue'
msg_fmt = _("%(err)s")
class InvalidAggregateAction(Invalid):
msg_fmt = _("Unacceptable parameters.")
code = 400
class InvalidAggregateActionAdd(InvalidAggregateAction):
msg_fmt = _("Cannot add host to aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidAggregateActionDelete(InvalidAggregateAction):
msg_fmt = _("Cannot remove host from aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidAggregateActionUpdate(InvalidAggregateAction):
msg_fmt = _("Cannot update aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidAggregateActionUpdateMeta(InvalidAggregateAction):
msg_fmt = _("Cannot update metadata of aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidGroup(Invalid):
msg_fmt = _("Group not valid. Reason: %(reason)s")
class InvalidSortKey(Invalid):
msg_fmt = _("Sort key supplied was not valid.")
class InvalidStrTime(Invalid):
msg_fmt = _("Invalid datetime string: %(reason)s")
class InstanceInvalidState(Invalid):
msg_fmt = _("Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot "
"%(method)s while the instance is in this state.")
class InstanceNotRunning(Invalid):
msg_fmt = _("Instance %(instance_id)s is not running.")
class InstanceNotInRescueMode(Invalid):
msg_fmt = _("Instance %(instance_id)s is not in rescue mode")
class InstanceNotRescuable(Invalid):
msg_fmt = _("Instance %(instance_id)s cannot be rescued: %(reason)s")
class InstanceNotReady(Invalid):
msg_fmt = _("Instance %(instance_id)s is not ready")
class InstanceSuspendFailure(Invalid):
msg_fmt = _("Failed to suspend instance: %(reason)s")
class InstanceResumeFailure(Invalid):
msg_fmt = _("Failed to resume instance: %(reason)s")
class InstancePowerOnFailure(Invalid):
msg_fmt = _("Failed to power on instance: %(reason)s")
class InstancePowerOffFailure(Invalid):
msg_fmt = _("Failed to power off instance: %(reason)s")
class InstanceRebootFailure(Invalid):
msg_fmt = _("Failed to reboot instance: %(reason)s")
class InstanceTerminationFailure(Invalid):
msg_fmt = _("Failed to terminate instance: %(reason)s")
class InstanceDeployFailure(Invalid):
msg_fmt = _("Failed to deploy instance: %(reason)s")
class MultiplePortsNotApplicable(Invalid):
msg_fmt = _("Failed to launch instances: %(reason)s")
class InvalidFixedIpAndMaxCountRequest(Invalid):
msg_fmt = _("Failed to launch instances: %(reason)s")
class ServiceUnavailable(Invalid):
msg_fmt = _("Service is unavailable at this time.")
class ComputeResourcesUnavailable(ServiceUnavailable):
msg_fmt = _("Insufficient compute resources: %(reason)s.")
class HypervisorUnavailable(NovaException):
msg_fmt = _("Connection to the hypervisor is broken on host: %(host)s")
class ComputeServiceUnavailable(ServiceUnavailable):
msg_fmt = _("Compute service of %(host)s is unavailable at this time.")
class ComputeServiceInUse(NovaException):
msg_fmt = _("Compute service of %(host)s is still in use.")
class UnableToMigrateToSelf(Invalid):
msg_fmt = _("Unable to migrate instance (%(instance_id)s) "
"to current host (%(host)s).")
class InvalidHypervisorType(Invalid):
msg_fmt = _("The supplied hypervisor type of is invalid.")
class DestinationHypervisorTooOld(Invalid):
msg_fmt = _("The instance requires a newer hypervisor version than "
"has been provided.")
class ServiceTooOld(Invalid):
msg_fmt = _("This service is older (v%(thisver)i) than the minimum "
"(v%(minver)i) version of the rest of the deployment. "
"Unable to continue.")
class DestinationDiskExists(Invalid):
msg_fmt = _("The supplied disk path (%(path)s) already exists, "
"it is expected not to exist.")
class InvalidDevicePath(Invalid):
msg_fmt = _("The supplied device path (%(path)s) is invalid.")
class DevicePathInUse(Invalid):
msg_fmt = _("The supplied device path (%(path)s) is in use.")
code = 409
class DeviceIsBusy(Invalid):
msg_fmt = _("The supplied device (%(device)s) is busy.")
class InvalidCPUInfo(Invalid):
msg_fmt = _("Unacceptable CPU info: %(reason)s")
class InvalidIpAddressError(Invalid):
msg_fmt = _("%(address)s is not a valid IP v4/6 address.")
class InvalidVLANTag(Invalid):
msg_fmt = _("VLAN tag is not appropriate for the port group "
"%(bridge)s. Expected VLAN tag is %(tag)s, "
"but the one associated with the port group is %(pgroup)s.")
class InvalidVLANPortGroup(Invalid):
msg_fmt = _("vSwitch which contains the port group %(bridge)s is "
"not associated with the desired physical adapter. "
"Expected vSwitch is %(expected)s, but the one associated "
"is %(actual)s.")
class InvalidDiskFormat(Invalid):
msg_fmt = _("Disk format %(disk_format)s is not acceptable")
class InvalidDiskInfo(Invalid):
msg_fmt = _("Disk info file is invalid: %(reason)s")
class DiskInfoReadWriteFail(Invalid):
msg_fmt = _("Failed to read or write disk info file: %(reason)s")
class ImageUnacceptable(Invalid):
msg_fmt = _("Image %(image_id)s is unacceptable: %(reason)s")
class InstanceUnacceptable(Invalid):
msg_fmt = _("Instance %(instance_id)s is unacceptable: %(reason)s")
class InvalidEc2Id(Invalid):
msg_fmt = _("Ec2 id %(ec2_id)s is unacceptable.")
class InvalidUUID(Invalid):
msg_fmt = _("Expected a uuid but received %(uuid)s.")
class InvalidID(Invalid):
msg_fmt = _("Invalid ID received %(id)s.")
class ConstraintNotMet(NovaException):
msg_fmt = _("Constraint not met.")
code = 412
class NotFound(NovaException):
msg_fmt = _("Resource could not be found.")
code = 404
class AgentBuildNotFound(NotFound):
msg_fmt = _("No agent-build associated with id %(id)s.")
class AgentBuildExists(NovaException):
msg_fmt = _("Agent-build with hypervisor %(hypervisor)s os %(os)s "
"architecture %(architecture)s exists.")
class VolumeNotFound(NotFound):
ec2_code = 'InvalidVolume.NotFound'
msg_fmt = _("Volume %(volume_id)s could not be found.")
class BDMNotFound(NotFound):
msg_fmt = _("No Block Device Mapping with id %(id)s.")
class VolumeBDMNotFound(NotFound):
msg_fmt = _("No volume Block Device Mapping with id %(volume_id)s.")
class VolumeBDMPathNotFound(VolumeBDMNotFound):
msg_fmt = _("No volume Block Device Mapping at path: %(path)s")
class SnapshotNotFound(NotFound):
ec2_code = 'InvalidSnapshot.NotFound'
msg_fmt = _("Snapshot %(snapshot_id)s could not be found.")
class DiskNotFound(NotFound):
msg_fmt = _("No disk at %(location)s")
class VolumeDriverNotFound(NotFound):
msg_fmt = _("Could not find a handler for %(driver_type)s volume.")
class InvalidImageRef(Invalid):
msg_fmt = _("Invalid image href %(image_href)s.")
class AutoDiskConfigDisabledByImage(Invalid):
msg_fmt = _("Requested image %(image)s "
"has automatic disk resize disabled.")
class ImageNotFound(NotFound):
msg_fmt = _("Image %(image_id)s could not be found.")
class PreserveEphemeralNotSupported(Invalid):
msg_fmt = _("The current driver does not support "
"preserving ephemeral partitions.")
# NOTE(jruzicka): ImageNotFound is not a valid EC2 error code.
class ImageNotFoundEC2(ImageNotFound):
msg_fmt = _("Image %(image_id)s could not be found. The nova EC2 API "
"assigns image ids dynamically when they are listed for the "
"first time. Have you listed image ids since adding this "
"image?")
class ProjectNotFound(NotFound):
msg_fmt = _("Project %(project_id)s could not be found.")
class StorageRepositoryNotFound(NotFound):
msg_fmt = _("Cannot find SR to read/write VDI.")
class InstanceMappingNotFound(NotFound):
msg_fmt = _("Instance %(uuid)s has no mapping to a cell.")
class NetworkDuplicated(Invalid):
msg_fmt = _("Network %(network_id)s is duplicated.")
class NetworkDhcpReleaseFailed(NovaException):
msg_fmt = _("Failed to release IP %(address)s with MAC %(mac_address)s")
class NetworkInUse(NovaException):
msg_fmt = _("Network %(network_id)s is still in use.")
class NetworkSetHostFailed(NovaException):
msg_fmt = _("Network set host failed for network %(network_id)s.")
class NetworkNotCreated(Invalid):
msg_fmt = _("%(req)s is required to create a network.")
class LabelTooLong(Invalid):
msg_fmt = _("Maximum allowed length for 'label' is 255.")
class InvalidIntValue(Invalid):
msg_fmt = _("%(key)s must be an integer.")
class InvalidCidr(Invalid):
msg_fmt = _("%(cidr)s is not a valid ip network.")
class InvalidAddress(Invalid):
msg_fmt = _("%(address)s is not a valid ip address.")
class AddressOutOfRange(Invalid):
msg_fmt = _("%(address)s is not within %(cidr)s.")
class DuplicateVlan(NovaException):
msg_fmt = _("Detected existing vlan with id %(vlan)d")
code = 409
class CidrConflict(NovaException):
msg_fmt = _('Requested cidr (%(cidr)s) conflicts '
'with existing cidr (%(other)s)')
code = 409
class NetworkHasProject(NetworkInUse):
msg_fmt = _('Network must be disassociated from project '
'%(project_id)s before it can be deleted.')
class NetworkNotFound(NotFound):
msg_fmt = _("Network %(network_id)s could not be found.")
class PortNotFound(NotFound):
msg_fmt = _("Port id %(port_id)s could not be found.")
class NetworkNotFoundForBridge(NetworkNotFound):
msg_fmt = _("Network could not be found for bridge %(bridge)s")
class NetworkNotFoundForUUID(NetworkNotFound):
msg_fmt = _("Network could not be found for uuid %(uuid)s")
class NetworkNotFoundForCidr(NetworkNotFound):
msg_fmt = _("Network could not be found with cidr %(cidr)s.")
class NetworkNotFoundForInstance(NetworkNotFound):
msg_fmt = _("Network could not be found for instance %(instance_id)s.")
class NoNetworksFound(NotFound):
msg_fmt = _("No networks defined.")
class NoMoreNetworks(NovaException):
msg_fmt = _("No more available networks.")
class NetworkNotFoundForProject(NetworkNotFound):
msg_fmt = _("Either network uuid %(network_uuid)s is not present or "
"is not assigned to the project %(project_id)s.")
class NetworkAmbiguous(Invalid):
msg_fmt = _("More than one possible network found. Specify "
"network ID(s) to select which one(s) to connect to.")
class NetworkRequiresSubnet(Invalid):
msg_fmt = _("Network %(network_uuid)s requires a subnet in order to boot"
" instances on.")
class ExternalNetworkAttachForbidden(Forbidden):
msg_fmt = _("It is not allowed to create an interface on "
"external network %(network_uuid)s")
class NetworkMissingPhysicalNetwork(NovaException):
msg_fmt = _("Physical network is missing for network %(network_uuid)s")
class VifDetailsMissingVhostuserSockPath(Invalid):
msg_fmt = _("vhostuser_sock_path not present in vif_details"
" for vif %(vif_id)s")
class VifDetailsMissingMacvtapParameters(Invalid):
msg_fmt = _("Parameters %(missing_params)s not present in"
" vif_details for vif %(vif_id)s. Check your Neutron"
" configuration to validate that the macvtap parameters are"
" correct.")
class DatastoreNotFound(NotFound):
msg_fmt = _("Could not find the datastore reference(s) which the VM uses.")
class PortInUse(Invalid):
msg_fmt = _("Port %(port_id)s is still in use.")
class PortRequiresFixedIP(Invalid):
msg_fmt = _("Port %(port_id)s requires a FixedIP in order to be used.")
class PortNotUsable(Invalid):
msg_fmt = _("Port %(port_id)s not usable for instance %(instance)s.")
class PortNotFree(Invalid):
msg_fmt = _("No free port available for instance %(instance)s.")
class PortBindingFailed(Invalid):
msg_fmt = _("Binding failed for port %(port_id)s, please check neutron "
"logs for more information.")
class FixedIpExists(NovaException):
msg_fmt = _("Fixed ip %(address)s already exists.")
class FixedIpNotFound(NotFound):
msg_fmt = _("No fixed IP associated with id %(id)s.")
class FixedIpNotFoundForAddress(FixedIpNotFound):
msg_fmt = _("Fixed ip not found for address %(address)s.")
class FixedIpNotFoundForInstance(FixedIpNotFound):
msg_fmt = _("Instance %(instance_uuid)s has zero fixed ips.")
class FixedIpNotFoundForNetworkHost(FixedIpNotFound):
msg_fmt = _("Network host %(host)s has zero fixed ips "
"in network %(network_id)s.")
class FixedIpNotFoundForSpecificInstance(FixedIpNotFound):
msg_fmt = _("Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'.")
class FixedIpNotFoundForNetwork(FixedIpNotFound):
msg_fmt = _("Fixed IP address (%(address)s) does not exist in "
"network (%(network_uuid)s).")
class FixedIpAssociateFailed(NovaException):
msg_fmt = _("Fixed IP associate failed for network: %(net)s.")
class FixedIpAlreadyInUse(NovaException):
msg_fmt = _("Fixed IP address %(address)s is already in use on instance "
"%(instance_uuid)s.")
class FixedIpAssociatedWithMultipleInstances(NovaException):
msg_fmt = _("More than one instance is associated with fixed ip address "
"'%(address)s'.")
class FixedIpInvalid(Invalid):
msg_fmt = _("Fixed IP address %(address)s is invalid.")
class NoMoreFixedIps(NovaException):
ec2_code = 'UnsupportedOperation'
msg_fmt = _("No fixed IP addresses available for network: %(net)s")
class NoFixedIpsDefined(NotFound):
msg_fmt = _("Zero fixed ips could be found.")
class FloatingIpExists(NovaException):
msg_fmt = _("Floating ip %(address)s already exists.")
class FloatingIpNotFound(NotFound):
ec2_code = "UnsupportedOperation"
msg_fmt = _("Floating ip not found for id %(id)s.")
class FloatingIpDNSExists(Invalid):
msg_fmt = _("The DNS entry %(name)s already exists in domain %(domain)s.")
class FloatingIpNotFoundForAddress(FloatingIpNotFound):
msg_fmt = _("Floating ip not found for address %(address)s.")
class FloatingIpNotFoundForHost(FloatingIpNotFound):
msg_fmt = _("Floating ip not found for host %(host)s.")
class FloatingIpMultipleFoundForAddress(NovaException):
msg_fmt = _("Multiple floating ips are found for address %(address)s.")
class FloatingIpPoolNotFound(NotFound):
msg_fmt = _("Floating ip pool not found.")
safe = True
class NoMoreFloatingIps(FloatingIpNotFound):
msg_fmt = _("Zero floating ips available.")
safe = True
class FloatingIpAssociated(NovaException):
ec2_code = "UnsupportedOperation"
msg_fmt = _("Floating ip %(address)s is associated.")
class FloatingIpNotAssociated(NovaException):
msg_fmt = _("Floating ip %(address)s is not associated.")
class NoFloatingIpsDefined(NotFound):
msg_fmt = _("Zero floating ips exist.")
class NoFloatingIpInterface(NotFound):
ec2_code = "UnsupportedOperation"
msg_fmt = _("Interface %(interface)s not found.")
class FloatingIpAllocateFailed(NovaException):
msg_fmt = _("Floating IP allocate failed.")
class FloatingIpAssociateFailed(NovaException):
msg_fmt = _("Floating IP %(address)s association has failed.")
class FloatingIpBadRequest(Invalid):
ec2_code = "UnsupportedOperation"
msg_fmt = _("The floating IP request failed with a BadRequest")
class CannotDisassociateAutoAssignedFloatingIP(NovaException):
ec2_code = "UnsupportedOperation"
msg_fmt = _("Cannot disassociate auto assigned floating ip")
class KeypairNotFound(NotFound):
ec2_code = 'InvalidKeyPair.NotFound'
msg_fmt = _("Keypair %(name)s not found for user %(user_id)s")
class ServiceNotFound(NotFound):
msg_fmt = _("Service %(service_id)s could not be found.")
class ServiceBinaryExists(NovaException):
msg_fmt = _("Service with host %(host)s binary %(binary)s exists.")
class ServiceTopicExists(NovaException):
msg_fmt = _("Service with host %(host)s topic %(topic)s exists.")
class HostNotFound(NotFound):
msg_fmt = _("Host %(host)s could not be found.")
class ComputeHostNotFound(HostNotFound):
msg_fmt = _("Compute host %(host)s could not be found.")
class ComputeHostNotCreated(HostNotFound):
msg_fmt = _("Compute host %(name)s needs to be created first"
" before updating.")
class HostBinaryNotFound(NotFound):
msg_fmt = _("Could not find binary %(binary)s on host %(host)s.")
class InvalidReservationExpiration(Invalid):
msg_fmt = _("Invalid reservation expiration %(expire)s.")
class InvalidQuotaValue(Invalid):
msg_fmt = _("Change would make usage less than 0 for the following "
"resources: %(unders)s")
class InvalidQuotaMethodUsage(Invalid):
msg_fmt = _("Wrong quota method %(method)s used on resource %(res)s")
class QuotaNotFound(NotFound):
msg_fmt = _("Quota could not be found")
class QuotaExists(NovaException):
msg_fmt = _("Quota exists for project %(project_id)s, "
"resource %(resource)s")
class QuotaResourceUnknown(QuotaNotFound):
msg_fmt = _("Unknown quota resources %(unknown)s.")
class ProjectUserQuotaNotFound(QuotaNotFound):
msg_fmt = _("Quota for user %(user_id)s in project %(project_id)s "
"could not be found.")
class ProjectQuotaNotFound(QuotaNotFound):
msg_fmt = _("Quota for project %(project_id)s could not be found.")
class QuotaClassNotFound(QuotaNotFound):
msg_fmt = _("Quota class %(class_name)s could not be found.")
class QuotaUsageNotFound(QuotaNotFound):
msg_fmt = _("Quota usage for project %(project_id)s could not be found.")
class ReservationNotFound(QuotaNotFound):
msg_fmt = _("Quota reservation %(uuid)s could not be found.")
class OverQuota(NovaException):
msg_fmt = _("Quota exceeded for resources: %(overs)s")
class SecurityGroupNotFound(NotFound):
msg_fmt = _("Security group %(security_group_id)s not found.")
class SecurityGroupNotFoundForProject(SecurityGroupNotFound):
msg_fmt = _("Security group %(security_group_id)s not found "
"for project %(project_id)s.")
class SecurityGroupNotFoundForRule(SecurityGroupNotFound):
msg_fmt = _("Security group with rule %(rule_id)s not found.")
class SecurityGroupExists(Invalid):
ec2_code = 'InvalidGroup.Duplicate'
msg_fmt = _("Security group %(security_group_name)s already exists "
"for project %(project_id)s.")
class SecurityGroupExistsForInstance(Invalid):
msg_fmt = _("Security group %(security_group_id)s is already associated"
" with the instance %(instance_id)s")
class SecurityGroupNotExistsForInstance(Invalid):
msg_fmt = _("Security group %(security_group_id)s is not associated with"
" the instance %(instance_id)s")
class SecurityGroupDefaultRuleNotFound(Invalid):
msg_fmt = _("Security group default rule (%rule_id)s not found.")
class SecurityGroupCannotBeApplied(Invalid):
msg_fmt = _("Network requires port_security_enabled and subnet associated"
" in order to apply security groups.")
class SecurityGroupRuleExists(Invalid):
ec2_code = 'InvalidPermission.Duplicate'
msg_fmt = _("Rule already exists in group: %(rule)s")
class NoUniqueMatch(NovaException):
msg_fmt = _("No Unique Match Found.")
code = 409
class MigrationNotFound(NotFound):
msg_fmt = _("Migration %(migration_id)s could not be found.")
class MigrationNotFoundByStatus(MigrationNotFound):
msg_fmt = _("Migration not found for instance %(instance_id)s "
"with status %(status)s.")
class ConsolePoolNotFound(NotFound):
msg_fmt = _("Console pool %(pool_id)s could not be found.")
class ConsolePoolExists(NovaException):
msg_fmt = _("Console pool with host %(host)s, console_type "
"%(console_type)s and compute_host %(compute_host)s "
"already exists.")
class ConsolePoolNotFoundForHostType(NotFound):
msg_fmt = _("Console pool of type %(console_type)s "
"for compute host %(compute_host)s "
"on proxy host %(host)s not found.")
class ConsoleNotFound(NotFound):
msg_fmt = _("Console %(console_id)s could not be found.")
class ConsoleNotFoundForInstance(ConsoleNotFound):
msg_fmt = _("Console for instance %(instance_uuid)s could not be found.")
class ConsoleNotFoundInPoolForInstance(ConsoleNotFound):
msg_fmt = _("Console for instance %(instance_uuid)s "
"in pool %(pool_id)s could not be found.")
class ConsoleTypeInvalid(Invalid):
msg_fmt = _("Invalid console type %(console_type)s")
class ConsoleTypeUnavailable(Invalid):
msg_fmt = _("Unavailable console type %(console_type)s.")
class ConsolePortRangeExhausted(NovaException):
msg_fmt = _("The console port range %(min_port)d-%(max_port)d is "
"exhausted.")
class FlavorNotFound(NotFound):
msg_fmt = _("Flavor %(flavor_id)s could not be found.")
class FlavorNotFoundByName(FlavorNotFound):
msg_fmt = _("Flavor with name %(flavor_name)s could not be found.")
class FlavorAccessNotFound(NotFound):
msg_fmt = _("Flavor access not found for %(flavor_id)s / "
"%(project_id)s combination.")
class FlavorExtraSpecUpdateCreateFailed(NovaException):
msg_fmt = _("Flavor %(id)d extra spec cannot be updated or created "
"after %(retries)d retries.")
class CellNotFound(NotFound):
msg_fmt = _("Cell %(cell_name)s doesn't exist.")
class CellExists(NovaException):
msg_fmt = _("Cell with name %(name)s already exists.")
class CellRoutingInconsistency(NovaException):
msg_fmt = _("Inconsistency in cell routing: %(reason)s")
class CellServiceAPIMethodNotFound(NotFound):
msg_fmt = _("Service API method not found: %(detail)s")
class CellTimeout(NotFound):
msg_fmt = _("Timeout waiting for response from cell")
class CellMaxHopCountReached(NovaException):
msg_fmt = _("Cell message has reached maximum hop count: %(hop_count)s")
class NoCellsAvailable(NovaException):
msg_fmt = _("No cells available matching scheduling criteria.")
class CellsUpdateUnsupported(NovaException):
msg_fmt = _("Cannot update cells configuration file.")
class InstanceUnknownCell(NotFound):
msg_fmt = _("Cell is not known for instance %(instance_uuid)s")
class SchedulerHostFilterNotFound(NotFound):
msg_fmt = _("Scheduler Host Filter %(filter_name)s could not be found.")
class FlavorExtraSpecsNotFound(NotFound):
msg_fmt = _("Flavor %(flavor_id)s has no extra specs with "
"key %(extra_specs_key)s.")
class ComputeHostMetricNotFound(NotFound):
msg_fmt = _("Metric %(name)s could not be found on the compute "
"host node %(host)s.%(node)s.")
class FileNotFound(NotFound):
msg_fmt = _("File %(file_path)s could not be found.")
class SwitchNotFoundForNetworkAdapter(NotFound):
msg_fmt = _("Virtual switch associated with the "
"network adapter %(adapter)s not found.")
class NetworkAdapterNotFound(NotFound):
msg_fmt = _("Network adapter %(adapter)s could not be found.")
class ClassNotFound(NotFound):
msg_fmt = _("Class %(class_name)s could not be found: %(exception)s")
class InstanceTagNotFound(NotFound):
msg_fmt = _("Instance %(instance_id)s has no tag '%(tag)s'")
class RotationRequiredForBackup(NovaException):
msg_fmt = _("Rotation param is required for backup image_type")
class KeyPairExists(NovaException):
ec2_code = 'InvalidKeyPair.Duplicate'
msg_fmt = _("Key pair '%(key_name)s' already exists.")
class InstanceExists(NovaException):
msg_fmt = _("Instance %(name)s already exists.")
class FlavorExists(NovaException):
msg_fmt = _("Flavor with name %(name)s already exists.")
class FlavorIdExists(NovaException):
msg_fmt = _("Flavor with ID %(flavor_id)s already exists.")
class FlavorAccessExists(NovaException):
msg_fmt = _("Flavor access already exists for flavor %(flavor_id)s "
"and project %(project_id)s combination.")
class InvalidSharedStorage(NovaException):
msg_fmt = _("%(path)s is not on shared storage: %(reason)s")
class InvalidLocalStorage(NovaException):
msg_fmt = _("%(path)s is not on local storage: %(reason)s")
class StorageError(NovaException):
msg_fmt = _("Storage error: %(reason)s")
class MigrationError(NovaException):
msg_fmt = _("Migration error: %(reason)s")
class MigrationPreCheckError(MigrationError):
msg_fmt = _("Migration pre-check error: %(reason)s")
class MalformedRequestBody(NovaException):
msg_fmt = _("Malformed message body: %(reason)s")
# NOTE(johannes): NotFound should only be used when a 404 error is
# appropriate to be returned
class ConfigNotFound(NovaException):
msg_fmt = _("Could not find config at %(path)s")
class PasteAppNotFound(NovaException):
msg_fmt = _("Could not load paste app '%(name)s' from %(path)s")
class CannotResizeToSameFlavor(NovaException):
msg_fmt = _("When resizing, instances must change flavor!")
class ResizeError(NovaException):
msg_fmt = _("Resize error: %(reason)s")
class CannotResizeDisk(NovaException):
msg_fmt = _("Server disk was unable to be resized because: %(reason)s")
class FlavorMemoryTooSmall(NovaException):
msg_fmt = _("Flavor's memory is too small for requested image.")
class FlavorDiskTooSmall(NovaException):
msg_fmt = _("The created instance's disk would be too small.")
class FlavorDiskSmallerThanImage(FlavorDiskTooSmall):
msg_fmt = _("Flavor's disk is too small for requested image. Flavor disk "
"is %(flavor_size)i bytes, image is %(image_size)i bytes.")
class FlavorDiskSmallerThanMinDisk(FlavorDiskTooSmall):
msg_fmt = _("Flavor's disk is smaller than the minimum size specified in "
"image metadata. Flavor disk is %(flavor_size)i bytes, "
"minimum size is %(image_min_disk)i bytes.")
class VolumeSmallerThanMinDisk(FlavorDiskTooSmall):
msg_fmt = _("Volume is smaller than the minimum size specified in image "
"metadata. Volume size is %(volume_size)i bytes, minimum "
"size is %(image_min_disk)i bytes.")
class InsufficientFreeMemory(NovaException):
msg_fmt = _("Insufficient free memory on compute node to start %(uuid)s.")
class NoValidHost(NovaException):
msg_fmt = _("No valid host was found. %(reason)s")
class MaxRetriesExceeded(NoValidHost):
msg_fmt = _("Exceeded maximum number of retries. %(reason)s")
class QuotaError(NovaException):
ec2_code = 'ResourceLimitExceeded'
msg_fmt = _("Quota exceeded: code=%(code)s")
# NOTE(cyeoh): 413 should only be used for the ec2 API
# The error status code for out of quota for the nova api should be
# 403 Forbidden.
code = 413
headers = {'Retry-After': 0}
safe = True
class TooManyInstances(QuotaError):
msg_fmt = _("Quota exceeded for %(overs)s: Requested %(req)s,"
" but already used %(used)s of %(allowed)s %(overs)s")
class FloatingIpLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of floating ips exceeded")
class FixedIpLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of fixed ips exceeded")
class MetadataLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of metadata items exceeds %(allowed)d")
class OnsetFileLimitExceeded(QuotaError):
msg_fmt = _("Personality file limit exceeded")
class OnsetFilePathLimitExceeded(OnsetFileLimitExceeded):
msg_fmt = _("Personality file path too long")
class OnsetFileContentLimitExceeded(OnsetFileLimitExceeded):
msg_fmt = _("Personality file content too long")
class KeypairLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of key pairs exceeded")
class SecurityGroupLimitExceeded(QuotaError):
ec2_code = 'SecurityGroupLimitExceeded'
msg_fmt = _("Maximum number of security groups or rules exceeded")
class PortLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of ports exceeded")
class AggregateError(NovaException):
msg_fmt = _("Aggregate %(aggregate_id)s: action '%(action)s' "
"caused an error: %(reason)s.")
class AggregateNotFound(NotFound):
msg_fmt = _("Aggregate %(aggregate_id)s could not be found.")
class AggregateNameExists(NovaException):
msg_fmt = _("Aggregate %(aggregate_name)s already exists.")
class AggregateHostNotFound(NotFound):
msg_fmt = _("Aggregate %(aggregate_id)s has no host %(host)s.")
class AggregateMetadataNotFound(NotFound):
msg_fmt = _("Aggregate %(aggregate_id)s has no metadata with "
"key %(metadata_key)s.")
class AggregateHostExists(NovaException):
msg_fmt = _("Aggregate %(aggregate_id)s already has host %(host)s.")
class FlavorCreateFailed(NovaException):
msg_fmt = _("Unable to create flavor")
class InstancePasswordSetFailed(NovaException):
msg_fmt = _("Failed to set admin password on %(instance)s "
"because %(reason)s")
safe = True
class InstanceNotFound(NotFound):
ec2_code = 'InvalidInstanceID.NotFound'
msg_fmt = _("Instance %(instance_id)s could not be found.")
class InstanceInfoCacheNotFound(NotFound):
msg_fmt = _("Info cache for instance %(instance_uuid)s could not be "
"found.")
class InvalidAssociation(NotFound):
ec2_code = 'InvalidAssociationID.NotFound'
msg_fmt = _("Invalid association.")
class MarkerNotFound(NotFound):
msg_fmt = _("Marker %(marker)s could not be found.")
class InvalidInstanceIDMalformed(Invalid):
msg_fmt = _("Invalid id: %(instance_id)s (expecting \"i-...\")")
ec2_code = 'InvalidInstanceID.Malformed'
class InvalidVolumeIDMalformed(Invalid):
msg_fmt = _("Invalid id: %(volume_id)s (expecting \"i-...\")")
ec2_code = 'InvalidVolumeID.Malformed'
class CouldNotFetchImage(NovaException):
msg_fmt = _("Could not fetch image %(image_id)s")
class CouldNotUploadImage(NovaException):
msg_fmt = _("Could not upload image %(image_id)s")
class TaskAlreadyRunning(NovaException):
msg_fmt = _("Task %(task_name)s is already running on host %(host)s")
class TaskNotRunning(NovaException):
msg_fmt = _("Task %(task_name)s is not running on host %(host)s")
class InstanceIsLocked(InstanceInvalidState):
msg_fmt = _("Instance %(instance_uuid)s is locked")
class ConfigDriveInvalidValue(Invalid):
msg_fmt = _("Invalid value for Config Drive option: %(option)s")
class ConfigDriveMountFailed(NovaException):
msg_fmt = _("Could not mount vfat config drive. %(operation)s failed. "
"Error: %(error)s")
class ConfigDriveUnknownFormat(NovaException):
msg_fmt = _("Unknown config drive format %(format)s. Select one of "
"iso9660 or vfat.")
class InterfaceAttachFailed(Invalid):
msg_fmt = _("Failed to attach network adapter device to "
"%(instance_uuid)s")
class InterfaceDetachFailed(Invalid):
msg_fmt = _("Failed to detach network adapter device from "
"%(instance_uuid)s")
class InstanceUserDataTooLarge(NovaException):
msg_fmt = _("User data too large. User data must be no larger than "
"%(maxsize)s bytes once base64 encoded. Your data is "
"%(length)d bytes")
class InstanceUserDataMalformed(NovaException):
msg_fmt = _("User data needs to be valid base 64.")
class InstanceUpdateConflict(NovaException):
msg_fmt = _("Conflict updating instance %(instance_uuid)s. "
"Expected: %(expected)s. Actual: %(actual)s")
class UnknownInstanceUpdateConflict(InstanceUpdateConflict):
msg_fmt = _("Conflict updating instance %(instance_uuid)s, but we were "
"unable to determine the cause")
class UnexpectedTaskStateError(InstanceUpdateConflict):
pass
class UnexpectedDeletingTaskStateError(UnexpectedTaskStateError):
pass
class InstanceActionNotFound(NovaException):
msg_fmt = _("Action for request_id %(request_id)s on instance"
" %(instance_uuid)s not found")
class InstanceActionEventNotFound(NovaException):
msg_fmt = _("Event %(event)s not found for action id %(action_id)s")
class CryptoCAFileNotFound(FileNotFound):
msg_fmt = _("The CA file for %(project)s could not be found")
class CryptoCRLFileNotFound(FileNotFound):
msg_fmt = _("The CRL file for %(project)s could not be found")
class InstanceRecreateNotSupported(Invalid):
msg_fmt = _('Instance recreate is not supported.')
class ServiceGroupUnavailable(NovaException):
msg_fmt = _("The service from servicegroup driver %(driver)s is "
"temporarily unavailable.")
class DBNotAllowed(NovaException):
msg_fmt = _('%(binary)s attempted direct database access which is '
'not allowed by policy')
class UnsupportedVirtType(Invalid):
msg_fmt = _("Virtualization type '%(virt)s' is not supported by "
"this compute driver")
class UnsupportedHardware(Invalid):
msg_fmt = _("Requested hardware '%(model)s' is not supported by "
"the '%(virt)s' virt driver")
class Base64Exception(NovaException):
msg_fmt = _("Invalid Base 64 data for file %(path)s")
class BuildAbortException(NovaException):
msg_fmt = _("Build of instance %(instance_uuid)s aborted: %(reason)s")
class RescheduledException(NovaException):
msg_fmt = _("Build of instance %(instance_uuid)s was re-scheduled: "
"%(reason)s")
class ShadowTableExists(NovaException):
msg_fmt = _("Shadow table with name %(name)s already exists.")
class InstanceFaultRollback(NovaException):
def __init__(self, inner_exception=None):
message = _("Instance rollback performed due to: %s")
self.inner_exception = inner_exception
super(InstanceFaultRollback, self).__init__(message % inner_exception)
class OrphanedObjectError(NovaException):
msg_fmt = _('Cannot call %(method)s on orphaned %(objtype)s object')
class ObjectActionError(NovaException):
msg_fmt = _('Object action %(action)s failed because: %(reason)s')
class CoreAPIMissing(NovaException):
msg_fmt = _("Core API extensions are missing: %(missing_apis)s")
class AgentError(NovaException):
msg_fmt = _('Error during following call to agent: %(method)s')
class AgentTimeout(AgentError):
msg_fmt = _('Unable to contact guest agent. '
'The following call timed out: %(method)s')
class AgentNotImplemented(AgentError):
msg_fmt = _('Agent does not support the call: %(method)s')
class InstanceGroupNotFound(NotFound):
msg_fmt = _("Instance group %(group_uuid)s could not be found.")
class InstanceGroupIdExists(NovaException):
msg_fmt = _("Instance group %(group_uuid)s already exists.")
class InstanceGroupMemberNotFound(NotFound):
msg_fmt = _("Instance group %(group_uuid)s has no member with "
"id %(instance_id)s.")
class InstanceGroupPolicyNotFound(NotFound):
msg_fmt = _("Instance group %(group_uuid)s has no policy %(policy)s.")
class InstanceGroupSaveException(NovaException):
msg_fmt = _("%(field)s should not be part of the updates.")
class PluginRetriesExceeded(NovaException):
msg_fmt = _("Number of retries to plugin (%(num_retries)d) exceeded.")
class ImageDownloadModuleError(NovaException):
msg_fmt = _("There was an error with the download module %(module)s. "
"%(reason)s")
class ImageDownloadModuleMetaDataError(ImageDownloadModuleError):
msg_fmt = _("The metadata for this location will not work with this "
"module %(module)s. %(reason)s.")
class ImageDownloadModuleNotImplementedError(ImageDownloadModuleError):
msg_fmt = _("The method %(method_name)s is not implemented.")
class ImageDownloadModuleConfigurationError(ImageDownloadModuleError):
msg_fmt = _("The module %(module)s is misconfigured: %(reason)s.")
class ResourceMonitorError(NovaException):
msg_fmt = _("Error when creating resource monitor: %(monitor)s")
class PciDeviceWrongAddressFormat(NovaException):
msg_fmt = _("The PCI address %(address)s has an incorrect format.")
class PciDeviceInvalidAddressField(NovaException):
msg_fmt = _("Invalid PCI Whitelist: "
"The PCI address %(address)s has an invalid %(field)s.")
class PciDeviceInvalidDeviceName(NovaException):
msg_fmt = _("Invalid PCI Whitelist: "
"The PCI whitelist can specify devname or address,"
" but not both")
class PciDeviceNotFoundById(NotFound):
msg_fmt = _("PCI device %(id)s not found")
class PciDeviceNotFound(NotFound):
msg_fmt = _("PCI Device %(node_id)s:%(address)s not found.")
class PciDeviceInvalidStatus(Invalid):
msg_fmt = _(
"PCI device %(compute_node_id)s:%(address)s is %(status)s "
"instead of %(hopestatus)s")
class PciDeviceInvalidOwner(Invalid):
msg_fmt = _(
"PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s "
"instead of %(hopeowner)s")
class PciDeviceRequestFailed(NovaException):
msg_fmt = _(
"PCI device request (%requests)s failed")
class PciDevicePoolEmpty(NovaException):
msg_fmt = _(
"Attempt to consume PCI device %(compute_node_id)s:%(address)s "
"from empty pool")
class PciInvalidAlias(Invalid):
msg_fmt = _("Invalid PCI alias definition: %(reason)s")
class PciRequestAliasNotDefined(NovaException):
msg_fmt = _("PCI alias %(alias)s is not defined")
class MissingParameter(NovaException):
ec2_code = 'MissingParameter'
msg_fmt = _("Not enough parameters: %(reason)s")
code = 400
class PciConfigInvalidWhitelist(Invalid):
msg_fmt = _("Invalid PCI devices Whitelist config %(reason)s")
# Cannot be templated, msg needs to be constructed when raised.
class InternalError(NovaException):
ec2_code = 'InternalError'
msg_fmt = "%(err)s"
class PciDevicePrepareFailed(NovaException):
msg_fmt = _("Failed to prepare PCI device %(id)s for instance "
"%(instance_uuid)s: %(reason)s")
class PciDeviceDetachFailed(NovaException):
msg_fmt = _("Failed to detach PCI device %(dev)s: %(reason)s")
class PciDeviceUnsupportedHypervisor(NovaException):
msg_fmt = _("%(type)s hypervisor does not support PCI devices")
class KeyManagerError(NovaException):
msg_fmt = _("Key manager error: %(reason)s")
class VolumesNotRemoved(Invalid):
msg_fmt = _("Failed to remove volume(s): (%(reason)s)")
class InvalidVideoMode(Invalid):
msg_fmt = _("Provided video model (%(model)s) is not supported.")
class RngDeviceNotExist(Invalid):
msg_fmt = _("The provided RNG device path: (%(path)s) is not "
"present on the host.")
class RequestedVRamTooHigh(NovaException):
msg_fmt = _("The requested amount of video memory %(req_vram)d is higher "
"than the maximum allowed by flavor %(max_vram)d.")
class InvalidWatchdogAction(Invalid):
msg_fmt = _("Provided watchdog action (%(action)s) is not supported.")
class NoLiveMigrationForConfigDriveInLibVirt(NovaException):
msg_fmt = _("Live migration of instances with config drives is not "
"supported in libvirt unless libvirt instance path and "
"drive data is shared across compute nodes.")
class LiveMigrationWithOldNovaNotSafe(NovaException):
msg_fmt = _("Host %(server)s is running an old version of Nova, "
"live migrations involving that version may cause data loss. "
"Upgrade Nova on %(server)s and try again.")
class UnshelveException(NovaException):
msg_fmt = _("Error during unshelve instance %(instance_id)s: %(reason)s")
class ImageVCPULimitsRangeExceeded(Invalid):
msg_fmt = _("Image vCPU limits %(sockets)d:%(cores)d:%(threads)d "
"exceeds permitted %(maxsockets)d:%(maxcores)d:%(maxthreads)d")
class ImageVCPUTopologyRangeExceeded(Invalid):
msg_fmt = _("Image vCPU topology %(sockets)d:%(cores)d:%(threads)d "
"exceeds permitted %(maxsockets)d:%(maxcores)d:%(maxthreads)d")
class ImageVCPULimitsRangeImpossible(Invalid):
msg_fmt = _("Requested vCPU limits %(sockets)d:%(cores)d:%(threads)d "
"are impossible to satisfy for vcpus count %(vcpus)d")
class InvalidArchitectureName(Invalid):
msg_fmt = _("Architecture name '%(arch)s' is not recognised")
class ImageNUMATopologyIncomplete(Invalid):
msg_fmt = _("CPU and memory allocation must be provided for all "
"NUMA nodes")
class ImageNUMATopologyForbidden(Forbidden):
msg_fmt = _("Image property '%(name)s' is not permitted to override "
"NUMA configuration set against the flavor")
class ImageNUMATopologyAsymmetric(Invalid):
msg_fmt = _("Asymmetric NUMA topologies require explicit assignment "
"of CPUs and memory to nodes in image or flavor")
class ImageNUMATopologyCPUOutOfRange(Invalid):
msg_fmt = _("CPU number %(cpunum)d is larger than max %(cpumax)d")
class ImageNUMATopologyCPUDuplicates(Invalid):
msg_fmt = _("CPU number %(cpunum)d is assigned to two nodes")
class ImageNUMATopologyCPUsUnassigned(Invalid):
msg_fmt = _("CPU number %(cpuset)s is not assigned to any node")
class ImageNUMATopologyMemoryOutOfRange(Invalid):
msg_fmt = _("%(memsize)d MB of memory assigned, but expected "
"%(memtotal)d MB")
class InvalidHostname(Invalid):
msg_fmt = _("Invalid characters in hostname '%(hostname)s'")
class NumaTopologyNotFound(NotFound):
msg_fmt = _("Instance %(instance_uuid)s does not specify a NUMA topology")
class MigrationContextNotFound(NotFound):
msg_fmt = _("Instance %(instance_uuid)s does not specify a migration "
"context.")
class SocketPortRangeExhaustedException(NovaException):
msg_fmt = _("Not able to acquire a free port for %(host)s")
class SocketPortInUseException(NovaException):
msg_fmt = _("Not able to bind %(host)s:%(port)d, %(error)s")
class ImageSerialPortNumberInvalid(Invalid):
msg_fmt = _("Number of serial ports '%(num_ports)s' specified in "
"'%(property)s' isn't valid.")
class ImageSerialPortNumberExceedFlavorValue(Invalid):
msg_fmt = _("Forbidden to exceed flavor value of number of serial "
"ports passed in image meta.")
class InvalidImageConfigDrive(Invalid):
msg_fmt = _("Image's config drive option '%(config_drive)s' is invalid")
class InvalidHypervisorVirtType(Invalid):
msg_fmt = _("Hypervisor virtualization type '%(hv_type)s' is not "
"recognised")
class InvalidVirtualMachineMode(Invalid):
msg_fmt = _("Virtual machine mode '%(vmmode)s' is not recognised")
class InvalidToken(Invalid):
msg_fmt = _("The token '%(token)s' is invalid or has expired")
class InvalidConnectionInfo(Invalid):
msg_fmt = _("Invalid Connection Info")
class InstanceQuiesceNotSupported(Invalid):
msg_fmt = _('Quiescing is not supported in instance %(instance_id)s')
class QemuGuestAgentNotEnabled(Invalid):
msg_fmt = _('QEMU guest agent is not enabled')
class SetAdminPasswdNotSupported(Invalid):
msg_fmt = _('Set admin password is not supported')
class MemoryPageSizeInvalid(Invalid):
msg_fmt = _("Invalid memory page size '%(pagesize)s'")
class MemoryPageSizeForbidden(Invalid):
msg_fmt = _("Page size %(pagesize)s forbidden against '%(against)s'")
class MemoryPageSizeNotSupported(Invalid):
msg_fmt = _("Page size %(pagesize)s is not supported by the host.")
class CPUPinningNotSupported(Invalid):
msg_fmt = _("CPU pinning is not supported by the host: "
"%(reason)s")
class CPUPinningInvalid(Invalid):
msg_fmt = _("Cannot pin/unpin cpus %(requested)s from the following "
"pinned set %(pinned)s")
class CPUPinningUnknown(Invalid):
msg_fmt = _("CPU set to pin/unpin %(requested)s must be a subset of "
"known CPU set %(cpuset)s")
class ImageCPUPinningForbidden(Forbidden):
msg_fmt = _("Image property 'hw_cpu_policy' is not permitted to override "
"CPU pinning policy set against the flavor")
class UnsupportedPolicyException(Invalid):
msg_fmt = _("ServerGroup policy is not supported: %(reason)s")
class CellMappingNotFound(NotFound):
msg_fmt = _("Cell %(uuid)s has no mapping.")
class NUMATopologyUnsupported(Invalid):
msg_fmt = _("Host does not support guests with NUMA topology set")
class MemoryPagesUnsupported(Invalid):
msg_fmt = _("Host does not support guests with custom memory page sizes")
class EnumFieldInvalid(Invalid):
msg_fmt = _('%(typename)s in %(fieldname)s is not an instance of Enum')
class EnumFieldUnset(Invalid):
msg_fmt = _('%(fieldname)s missing field type')
class InvalidImageFormat(Invalid):
msg_fmt = _("Invalid image format '%(format)s'")
class UnsupportedImageModel(Invalid):
msg_fmt = _("Image model '%(image)s' is not supported")
class HostMappingNotFound(Invalid):
msg_fmt = _("Host '%(name)s' is not mapped to any cell")
| apache-2.0 | -6,969,698,482,296,145,000 | 28.09826 | 79 | 0.672852 | false |
google/ion | ion/dev/doxygen_filter.py | 1 | 8299 | #!/usr/bin/python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Doxygen pre-filter script for ion.
This filter processes code and adds Doxygen-compatible markup in various places
to enable Doxygen to read the docs more fully. Unlike some other Doxygen
filters, it is designed to work with Doxygen's newer markdown syntax.
In order to ensure proper syntax coloring of indented code blocks, make sure
there is a blank (commented) line both above and below the block. For example:
// Comment comment comment.
//
// int CodeBlock() {
// Goes here;
// }
//
// More comment.
"""
import re
import sys
class DoxygenFormatter(object):
"""Transforms lines of a source file to make them doxygen-friendly."""
ANYWHERE = 'anywhere'
COMMENT = 'comment'
def __init__(self, outfile):
# The file-like object to which we will write lines.
self.out = outfile
# A buffer for storing empty lines which we can use later if we need to
# retroactively insert markup without causing line number offset problems.
self.empty_line_buffer = []
# Whether we are currently inside an indented code block.
self.in_code_block = False
self.CompileExpressions()
def CompileExpressions(self):
"""Pre-compiles frequently used regexps for improved performance.
The regexps are arranged as a list of 3-tuples, where the second value is
the replacement string (which may include backreferences) and the third
value is one of the context constants ANYWHERE or COMMENT. This is a list
of tuples instead of a dictionary because order matters: earlier regexps
will be applied first, and the resulting text (not the original) will be
what is seen by subsequent regexps.
"""
self.comment_regex = re.compile(r'^\s*//')
self.substitutions = [
# Remove copyright lines.
(re.compile(r'^\s*//\s*[Cc]opyright.*Google.*'), r'', self.ANYWHERE),
# Remove any comment lines that consist of only punctuation (banners).
# We only allow a maximum of two spaces before the punctuation so we
# don't accidentally get rid of code examples with bare braces and
# whatnot.
(re.compile(r'(^\s*)//\s{0,2}[-=#/]+$'), r'\1//\n', self.ANYWHERE),
# If we find something that looks like a list item that is indented four
# or more spaces, pull it back to the left so doxygen's Markdown engine
# doesn't treat it like a code block.
(re.compile(r'(^\s*)//\s{4,}([-\d*].*)'), r'\1 \2', self.COMMENT),
(re.compile(r'TODO'), r'@todo ', self.COMMENT),
# Replace leading 'Note:' or 'Note that' in a comment with @note
(re.compile(r'(\/\/\s+)Note(?:\:| that)', re.I), r'\1@note',
self.COMMENT),
# Replace leading 'Warning:' in a comment with @warning
(re.compile(r'(\/\/\s+)Warning:', re.I), r'\1@warning', self.COMMENT),
# Replace leading 'Deprecated' in a comment with @deprecated
(re.compile(r'(\/\/\s+)Deprecated[^\w\s]*', re.I), r'\1@deprecated',
self.COMMENT),
# Replace pipe-delimited parameter names with backtick-delimiters
(re.compile(r'\|(\w+)\|'), r'`\1`', self.COMMENT),
# Convert standalone comment lines to Doxygen style.
(re.compile(r'(^\s*)//(?=[^/])'), r'\1///', self.ANYWHERE),
# Strip trailing comments from preprocessor directives.
(re.compile(r'(^#.*)//.*'), r'\1', self.ANYWHERE),
# Convert remaining trailing comments to doxygen style, unless they are
# documenting the end of a block.
(re.compile(r'([^} ]\s+)//(?=[^/])'), r'\1///<', self.ANYWHERE),
]
def Transform(self, line):
"""Performs the regexp transformations defined by self.substitutions.
Args:
line: The line to transform.
Returns:
The resulting line.
"""
for (regex, repl, where) in self.substitutions:
if where is self.COMMENT and not self.comment_regex.match(line):
return line
line = regex.sub(repl, line)
return line
def AppendToBufferedLine(self, text):
"""Appends text to the last buffered empty line.
Empty lines are buffered rather than being written out directly. This lets
us retroactively rewrite buffered lines to include markup that affects the
following line, while avoiding the line number offset that would result from
inserting a line that wasn't in the original source.
Args:
text: The text to append to the line.
Returns:
True if there was an available empty line to which text could be
appended, and False otherwise.
"""
if self.empty_line_buffer:
last_line = self.empty_line_buffer.pop().rstrip()
last_line += text + '\n'
self.empty_line_buffer.append(last_line)
return True
else:
return False
def ConvertCodeBlock(self, line):
"""Converts any code block that may begin or end on this line.
Doxygen has (at least) two kinds of code blocks. Any block indented at
least four spaces gets formatted as code, but (for some reason) no syntax
highlighting is applied. Any block surrounded by "~~~" on both sides is
also treated as code, but these are syntax highlighted intelligently
depending on the file type. We typically write code blocks in the former
style, but we'd like them to be highlighted, so this function converts them
to the latter style by adding in the ~~~ lines.
To make this a bit more complicated, we would really prefer not to insert
new lines into the file, since that will make the line numbers shown in
doxygen not match the line numbers in the actual source code. For this
reason, we only perform the conversion if at least one "blank" line (empty
comment line) appears before the start of the code block. If we get down to
the bottom of the block and there's no blank line after it, we will be
forced to add a line, since we can't go back and undo what we already did.
Args:
line: The line to process.
Returns:
The converted line.
"""
if not self.in_code_block and re.match(r'\s*///\s{4,}', line):
if self.AppendToBufferedLine(' ~~~'):
# If this fails, we'll just leave it un-highlighted.
self.in_code_block = True
elif self.in_code_block and not re.match(r'\s*///\s{4,}', line):
if not self.AppendToBufferedLine(' ~~~'):
# This is bad. We don't have a buffered line to use to end the code
# block, so we'll have to insert one. This will cause the line
# numbers to stop matching the original source, unfortunately.
line = '/// ~~~\n' + line
self.in_code_block = False
return line
def ProcessLine(self, line):
"""Processes a line.
If the line is an empty line inside a comment, we buffer it for possible
rewriting later on. Otherwise, we transform it using our regexps and
write it (as well as any buffered blank lines) out to the output.
Args:
line: The line to process.
"""
line = self.Transform(line)
if line.strip() == '///':
# We may repurpose this empty line later, so don't write it out yet.
self.empty_line_buffer.append(line)
else:
line = self.ConvertCodeBlock(line)
# Flush the line buffer and write this line as well.
for buffered_line in self.empty_line_buffer:
self.out.write(buffered_line)
self.empty_line_buffer = []
self.out.write(line)
def main(argv):
sourcefile = argv[1]
with open(sourcefile, 'r') as infile:
formatter = DoxygenFormatter(sys.stdout)
for line in infile:
formatter.ProcessLine(line)
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 | 6,935,754,025,838,835,000 | 35.884444 | 80 | 0.662369 | false |
FlannelFox/FlannelFox | tests/flannelfox/torrenttools/test_torrentQueue.py | 1 | 1999 | # -*- coding: utf-8 -*-
import unittest
from unittest.mock import patch
import os
from flannelfox.torrenttools.TorrentQueue import Queue
from flannelfox.torrenttools import Torrents
class TestTorrentQueue(unittest.TestCase):
testDatabaseFile = 'ff.db'
def removeDatabase(self):
try:
os.remove(self.testDatabaseFile)
except Exception:
pass
@patch.object(Queue, 'databaseTorrentBlacklisted')
@patch.object(Queue, 'databaseTorrentExists')
def test_Queue(self, mockDatabaseTorrentExists, mockDatabaseTorrentBlacklisted):
self.removeDatabase()
torrentQueue = Queue()
mockDatabaseTorrentBlacklisted.return_value = False
mockDatabaseTorrentExists.return_value = False
# Ensure len returns a valid answer
self.assertEqual(len(torrentQueue), 0)
# Make sure appending an item works
torrentQueue.append(Torrents.TV(torrentTitle='some.show.s01e01.720p.junk.here'))
self.assertEqual(len(torrentQueue), 1)
# Make sure appending a duplicate item does not work
torrentQueue.append(Torrents.TV(torrentTitle='some.show.s01e01.720p.junk.here'))
self.assertEqual(len(torrentQueue), 1)
# Add a different item and make sure it works
torrentQueue.append(Torrents.TV(torrentTitle='some.show.s01e02.720p.junk.here2'))
self.assertEqual(len(torrentQueue), 2)
mockDatabaseTorrentBlacklisted.return_value = True
mockDatabaseTorrentExists.return_value = False
# Check if Blacklisted torrent gets blocked
torrentQueue.append(Torrents.TV(torrentTitle='some.show.s01e02.720p.junk.here3'))
self.assertEqual(len(torrentQueue), 2)
mockDatabaseTorrentBlacklisted.return_value = False
mockDatabaseTorrentExists.return_value = True
# Check if Existing Torrent in Database gets blocked
torrentQueue.append(Torrents.TV(torrentTitle='some.show.s01e02.720p.junk.here3'))
self.assertEqual(len(torrentQueue), 2)
mockDatabaseTorrentBlacklisted.return_value = False
mockDatabaseTorrentExists.return_value = False
if __name__ == '__main__':
unittest.main()
| mit | -7,580,618,446,876,378,000 | 29.753846 | 83 | 0.78039 | false |
vhosouza/invesalius3 | invesalius/gui/task_exporter.py | 1 | 15556 | #--------------------------------------------------------------------------
# Software: InVesalius - Software de Reconstrucao 3D de Imagens Medicas
# Copyright: (C) 2001 Centro de Pesquisas Renato Archer
# Homepage: http://www.softwarepublico.gov.br
# Contact: invesalius@cti.gov.br
# License: GNU - GPL 2 (LICENSE.txt/LICENCA.txt)
#--------------------------------------------------------------------------
# Este programa e software livre; voce pode redistribui-lo e/ou
# modifica-lo sob os termos da Licenca Publica Geral GNU, conforme
# publicada pela Free Software Foundation; de acordo com a versao 2
# da Licenca.
#
# Este programa eh distribuido na expectativa de ser util, mas SEM
# QUALQUER GARANTIA; sem mesmo a garantia implicita de
# COMERCIALIZACAO ou de ADEQUACAO A QUALQUER PROPOSITO EM
# PARTICULAR. Consulte a Licenca Publica Geral GNU para obter mais
# detalhes.
#--------------------------------------------------------------------------
import os
import pathlib
import sys
import wx
try:
import wx.lib.agw.hyperlink as hl
except ImportError:
import wx.lib.hyperlink as hl
import wx.lib.platebtn as pbtn
from pubsub import pub as Publisher
import invesalius.constants as const
import invesalius.gui.dialogs as dlg
import invesalius.project as proj
import invesalius.session as ses
from invesalius import inv_paths
BTN_MASK = wx.NewId()
BTN_PICTURE = wx.NewId()
BTN_SURFACE = wx.NewId()
BTN_REPORT = wx.NewId()
BTN_REQUEST_RP = wx.NewId()
WILDCARD_SAVE_3D = "Inventor (*.iv)|*.iv|"\
"PLY (*.ply)|*.ply|"\
"Renderman (*.rib)|*.rib|"\
"STL (*.stl)|*.stl|"\
"STL ASCII (*.stl)|*.stl|"\
"VRML (*.vrml)|*.vrml|"\
"VTK PolyData (*.vtp)|*.vtp|"\
"Wavefront (*.obj)|*.obj|"\
"X3D (*.x3d)|*.x3d"
INDEX_TO_TYPE_3D = {0: const.FILETYPE_IV,
1: const.FILETYPE_PLY,
2: const.FILETYPE_RIB,
3: const.FILETYPE_STL,
4: const.FILETYPE_STL_ASCII,
5: const.FILETYPE_VRML,
6: const.FILETYPE_VTP,
7: const.FILETYPE_OBJ,
8: const.FILETYPE_X3D}
INDEX_TO_EXTENSION = {0: "iv",
1: "ply",
2: "rib",
3: "stl",
4: "stl",
5: "vrml",
6: "vtp",
7: "obj",
8: "x3d"}
WILDCARD_SAVE_2D = "BMP (*.bmp)|*.bmp|"\
"JPEG (*.jpg)|*.jpg|"\
"PNG (*.png)|*.png|"\
"PostScript (*.ps)|*.ps|"\
"Povray (*.pov)|*.pov|"\
"TIFF (*.tiff)|*.tiff"
INDEX_TO_TYPE_2D = {0: const.FILETYPE_BMP,
1: const.FILETYPE_JPG,
2: const.FILETYPE_PNG,
3: const.FILETYPE_PS,
4: const.FILETYPE_POV,
5: const.FILETYPE_OBJ}
WILDCARD_SAVE_MASK = "VTK ImageData (*.vti)|*.vti"
class TaskPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
inner_panel = InnerTaskPanel(self)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(inner_panel, 1, wx.EXPAND | wx.GROW | wx.BOTTOM | wx.RIGHT |
wx.LEFT, 7)
sizer.Fit(self)
self.SetSizer(sizer)
self.Update()
self.SetAutoLayout(1)
class InnerTaskPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
backgroud_colour = wx.Colour(255,255,255)
self.SetBackgroundColour(backgroud_colour)
self.SetAutoLayout(1)
# Counter for projects loaded in current GUI
# Fixed hyperlink items
tooltip = wx.ToolTip(_("Export InVesalius screen to an image file"))
link_export_picture = hl.HyperLinkCtrl(self, -1,
_("Export picture..."))
link_export_picture.SetUnderlines(False, False, False)
link_export_picture.SetBold(True)
link_export_picture.SetColours("BLACK", "BLACK", "BLACK")
link_export_picture.SetBackgroundColour(self.GetBackgroundColour())
link_export_picture.SetToolTip(tooltip)
link_export_picture.AutoBrowse(False)
link_export_picture.UpdateLink()
link_export_picture.Bind(hl.EVT_HYPERLINK_LEFT,
self.OnLinkExportPicture)
tooltip = wx.ToolTip(_("Export 3D surface"))
link_export_surface = hl.HyperLinkCtrl(self, -1,_("Export 3D surface..."))
link_export_surface.SetUnderlines(False, False, False)
link_export_surface.SetBold(True)
link_export_surface.SetColours("BLACK", "BLACK", "BLACK")
link_export_surface.SetBackgroundColour(self.GetBackgroundColour())
link_export_surface.SetToolTip(tooltip)
link_export_surface.AutoBrowse(False)
link_export_surface.UpdateLink()
link_export_surface.Bind(hl.EVT_HYPERLINK_LEFT,
self.OnLinkExportSurface)
#tooltip = wx.ToolTip(_("Export 3D mask (voxels)"))
#link_export_mask = hl.HyperLinkCtrl(self, -1,_("Export mask..."))
#link_export_mask.SetUnderlines(False, False, False)
#link_export_mask.SetColours("BLACK", "BLACK", "BLACK")
#link_export_mask.SetToolTip(tooltip)
#link_export_mask.AutoBrowse(False)
#link_export_mask.UpdateLink()
#link_export_mask.Bind(hl.EVT_HYPERLINK_LEFT,
# self.OnLinkExportMask)
#tooltip = wx.ToolTip("Request rapid prototyping services")
#link_request_rp = hl.HyperLinkCtrl(self,-1,"Request rapid prototyping...")
#link_request_rp.SetUnderlines(False, False, False)
#link_request_rp.SetColours("BLACK", "BLACK", "BLACK")
#link_request_rp.SetToolTip(tooltip)
#link_request_rp.AutoBrowse(False)
#link_request_rp.UpdateLink()
#link_request_rp.Bind(hl.EVT_HYPERLINK_LEFT, self.OnLinkRequestRP)
#tooltip = wx.ToolTip("Open report tool...")
#link_report = hl.HyperLinkCtrl(self,-1,"Open report tool...")
#link_report.SetUnderlines(False, False, False)
#link_report.SetColours("BLACK", "BLACK", "BLACK")
#link_report.SetToolTip(tooltip)
#link_report.AutoBrowse(False)
#link_report.UpdateLink()
#link_report.Bind(hl.EVT_HYPERLINK_LEFT, self.OnLinkReport)
# Image(s) for buttons
if sys.platform == 'darwin':
BMP_EXPORT_SURFACE = wx.Bitmap(\
os.path.join(inv_paths.ICON_DIR, "surface_export_original.png"),
wx.BITMAP_TYPE_PNG).ConvertToImage()\
.Rescale(25, 25).ConvertToBitmap()
BMP_TAKE_PICTURE = wx.Bitmap(\
os.path.join(inv_paths.ICON_DIR, "tool_photo_original.png"),
wx.BITMAP_TYPE_PNG).ConvertToImage()\
.Rescale(25, 25).ConvertToBitmap()
#BMP_EXPORT_MASK = wx.Bitmap("../icons/mask.png",
# wx.BITMAP_TYPE_PNG)
else:
BMP_EXPORT_SURFACE = wx.Bitmap(os.path.join(inv_paths.ICON_DIR, "surface_export.png"),
wx.BITMAP_TYPE_PNG).ConvertToImage()\
.Rescale(25, 25).ConvertToBitmap()
BMP_TAKE_PICTURE = wx.Bitmap(os.path.join(inv_paths.ICON_DIR, "tool_photo.png"),
wx.BITMAP_TYPE_PNG).ConvertToImage()\
.Rescale(25, 25).ConvertToBitmap()
#BMP_EXPORT_MASK = wx.Bitmap("../icons/mask_small.png",
# wx.BITMAP_TYPE_PNG)
# Buttons related to hyperlinks
button_style = pbtn.PB_STYLE_SQUARE | pbtn.PB_STYLE_DEFAULT
button_picture = pbtn.PlateButton(self, BTN_PICTURE, "",
BMP_TAKE_PICTURE,
style=button_style)
button_picture.SetBackgroundColour(self.GetBackgroundColour())
self.button_picture = button_picture
button_surface = pbtn.PlateButton(self, BTN_SURFACE, "",
BMP_EXPORT_SURFACE,
style=button_style)
button_surface.SetBackgroundColour(self.GetBackgroundColour())
#button_mask = pbtn.PlateButton(self, BTN_MASK, "",
# BMP_EXPORT_MASK,
# style=button_style)
#button_request_rp = pbtn.PlateButton(self, BTN_REQUEST_RP, "",
# BMP_IMPORT, style=button_style)
#button_report = pbtn.PlateButton(self, BTN_REPORT, "",
# BMP_IMPORT,
# style=button_style)
# When using PlaneButton, it is necessary to bind events from parent win
self.Bind(wx.EVT_BUTTON, self.OnButton)
# Tags and grid sizer for fixed items
flag_link = wx.EXPAND|wx.GROW|wx.LEFT|wx.TOP
flag_button = wx.EXPAND | wx.GROW
fixed_sizer = wx.FlexGridSizer(rows=2, cols=2, hgap=2, vgap=0)
fixed_sizer.AddGrowableCol(0, 1)
fixed_sizer.AddMany([ (link_export_picture, 1, flag_link, 3),
(button_picture, 0, flag_button),
(link_export_surface, 1, flag_link, 3),
(button_surface, 0, flag_button),])
#(link_export_mask, 1, flag_link, 3),
#(button_mask, 0, flag_button)])
#(link_report, 0, flag_link, 3),
#(button_report, 0, flag_button),
#(link_request_rp, 1, flag_link, 3),
#(button_request_rp, 0, flag_button)])
# Add line sizers into main sizer
main_sizer = wx.BoxSizer(wx.VERTICAL)
main_sizer.Add(fixed_sizer, 0, wx.GROW|wx.EXPAND)
# Update main sizer and panel layout
self.SetSizer(main_sizer)
self.Fit()
self.sizer = main_sizer
self.__init_menu()
def __init_menu(self):
menu = wx.Menu()
self.id_to_name = {const.AXIAL:_("Axial slice"),
const.CORONAL:_("Coronal slice"),
const.SAGITAL:_("Sagittal slice"),
const.VOLUME:_("Volume")}
for id in self.id_to_name:
item = wx.MenuItem(menu, id, self.id_to_name[id])
menu.Append(item)
self.menu_picture = menu
menu.Bind(wx.EVT_MENU, self.OnMenuPicture)
def OnMenuPicture(self, evt):
id = evt.GetId()
value = dlg.ExportPicture(self.id_to_name[id])
if value:
filename, filetype = value
Publisher.sendMessage('Export picture to file',
orientation=id, filename=filename, filetype=filetype)
def OnLinkExportPicture(self, evt=None):
self.button_picture.PopupMenu(self.menu_picture)
def OnLinkExportMask(self, evt=None):
project = proj.Project()
if sys.platform == 'win32':
project_name = project.name
else:
project_name = project.name+".vti"
dlg = wx.FileDialog(None,
"Save mask as...", # title
"", # last used directory
project_name, # filename
WILDCARD_SAVE_MASK,
wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
dlg.SetFilterIndex(0) # default is VTI
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
extension = "vti"
if sys.platform != 'win32':
if filename.split(".")[-1] != extension:
filename = filename + "."+ extension
filetype = const.FILETYPE_IMAGEDATA
Publisher.sendMessage('Export mask to file',
filename=filename,
filetype=filetype)
def OnLinkExportSurface(self, evt=None):
"OnLinkExportSurface"
project = proj.Project()
n_surface = 0
for index in project.surface_dict:
if project.surface_dict[index].is_shown:
n_surface += 1
if n_surface:
if sys.platform == 'win32':
project_name = pathlib.Path(project.name).stem
else:
project_name = pathlib.Path(project.name).stem + ".stl"
session = ses.Session()
last_directory = session.get('paths', 'last_directory_3d_surface', '')
dlg = wx.FileDialog(None,
_("Save 3D surface as..."), # title
last_directory, # last used directory
project_name, # filename
WILDCARD_SAVE_3D,
wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
dlg.SetFilterIndex(3) # default is STL
if dlg.ShowModal() == wx.ID_OK:
filetype_index = dlg.GetFilterIndex()
filetype = INDEX_TO_TYPE_3D[filetype_index]
filename = dlg.GetPath()
extension = INDEX_TO_EXTENSION[filetype_index]
if sys.platform != 'win32':
if filename.split(".")[-1] != extension:
filename = filename + "."+ extension
if filename:
session['paths']['last_directory_3d_surface'] = os.path.split(filename)[0]
session.WriteSessionFile()
Publisher.sendMessage('Export surface to file',
filename=filename, filetype=filetype)
if not os.path.exists(filename):
dlg = wx.MessageDialog(None,
_("It was not possible to save the surface."),
_("Error saving surface"),
wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
else:
dlg = wx.MessageDialog(None,
_("You need to create a surface and make it ") +
_("visible before exporting it."),
'InVesalius 3',
wx.OK | wx.ICON_INFORMATION)
try:
dlg.ShowModal()
finally:
dlg.Destroy()
def OnLinkRequestRP(self, evt=None):
pass
def OnLinkReport(self, evt=None):
pass
def OnButton(self, evt):
id = evt.GetId()
if id == BTN_PICTURE:
self.OnLinkExportPicture()
elif id == BTN_SURFACE:
self.OnLinkExportSurface()
elif id == BTN_REPORT:
self.OnLinkReport()
elif id == BTN_REQUEST_RP:
self.OnLinkRequestRP()
else:# id == BTN_MASK:
self.OnLinkExportMask()
| gpl-2.0 | -955,293,151,013,029,400 | 39.300518 | 98 | 0.509771 | false |
bmya/tkobr-addons | tko_web_sessions_management/main.py | 1 | 11671 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# ThinkOpen Solutions Brasil
# Copyright (C) Thinkopen Solutions <http://www.tkobr.com>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import openerp
from openerp.osv import fields, osv, orm
import pytz
from datetime import date, datetime, time, timedelta
from dateutil.relativedelta import *
from openerp.addons.base.ir.ir_cron import _intervalTypes
from openerp import SUPERUSER_ID
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.http import request
from openerp.tools.translate import _
from openerp import http
import werkzeug.contrib.sessions
from openerp.http import Response
# from openerp import pooler
_logger = logging.getLogger(__name__)
class Home_tkobr(openerp.addons.web.controllers.main.Home):
@http.route('/web/login', type='http', auth="none")
def web_login(self, redirect=None, **kw):
openerp.addons.web.controllers.main.ensure_db()
multi_ok = True
calendar_set = 0
calendar_ok = False
calendar_group = ''
unsuccessful_message = ''
now = datetime.now()
if request.httprequest.method == 'GET' and redirect and request.session.uid:
return http.redirect_with_hash(redirect)
if not request.uid:
request.uid = openerp.SUPERUSER_ID
values = request.params.copy()
if not redirect:
redirect = '/web?' + request.httprequest.query_string
values['redirect'] = redirect
try:
values['databases'] = http.db_list()
except openerp.exceptions.AccessDenied:
values['databases'] = None
if request.httprequest.method == 'POST':
old_uid = request.uid
uid = False
if 'login' in request.params and 'password' in request.params:
uid = request.session.authenticate(request.session.db, request.params[
'login'], request.params['password'])
if uid is not False:
user = request.registry.get('res.users').browse(
request.cr, request.uid, uid, request.context)
if not uid is SUPERUSER_ID:
# check for multiple sessions block
sessions = request.registry.get('ir.sessions').search(
request.cr, request.uid, [
('user_id', '=', uid), ('logged_in', '=', True)], context=request.context)
if sessions and user.multiple_sessions_block:
multi_ok = False
if multi_ok:
# check calendars
calendar_obj = request.registry.get(
'resource.calendar')
attendance_obj = request.registry.get(
'resource.calendar.attendance')
# GET USER LOCAL TIME
if user.tz:
tz = pytz.timezone(user.tz)
else:
tz = pytz.timezone('GMT')
tzoffset = tz.utcoffset(now)
now = now + tzoffset
if user.login_calendar_id:
calendar_set += 1
# check user calendar
attendances = attendance_obj.search(request.cr,
request.uid, [('calendar_id', '=', user.login_calendar_id.id),
('dayofweek', '=', str(now.weekday())),
('hour_from', '<=', now.hour + now.minute / 60.0),
('hour_to', '>=', now.hour + now.minute / 60.0)],
context=request.context)
if attendances:
calendar_ok = True
else:
unsuccessful_message = "unsuccessful login from '%s', user time out of allowed calendar defined in user" % request.params[
'login']
else:
# check user groups calendar
for group in user.groups_id:
if group.login_calendar_id:
calendar_set += 1
attendances = attendance_obj.search(request.cr,
request.uid, [('calendar_id', '=', group.login_calendar_id.id),
('dayofweek', '=', str(now.weekday())),
('hour_from', '<=', now.hour + now.minute / 60.0),
('hour_to', '>=', now.hour + now.minute / 60.0)],
context=request.context)
if attendances:
calendar_ok = True
else:
calendar_group = group.name
if sessions and group.multiple_sessions_block and multi_ok:
multi_ok = False
unsuccessful_message = "unsuccessful login from '%s', multisessions block defined in group '%s'" % (
request.params['login'], group.name)
break
if calendar_set > 0 and calendar_ok == False:
unsuccessful_message = "unsuccessful login from '%s', user time out of allowed calendar defined in group '%s'" % (
request.params['login'], calendar_group)
else:
unsuccessful_message = "unsuccessful login from '%s', multisessions block defined in user" % request.params[
'login']
else:
unsuccessful_message = "unsuccessful login from '%s', wrong username or password" % request.params[
'login']
if not unsuccessful_message or uid is SUPERUSER_ID:
self.save_session(
request.cr,
uid,
user.tz,
request.httprequest.session.sid,
context=request.context)
return http.redirect_with_hash(redirect)
user = request.registry.get('res.users').browse(
request.cr, SUPERUSER_ID, SUPERUSER_ID, request.context)
self.save_session(
request.cr,
uid,
user.tz,
request.httprequest.session.sid,
unsuccessful_message,
request.context)
_logger.error(unsuccessful_message)
request.uid = old_uid
values['error'] = 'Login failed due to one of the following reasons:'
values['reason1'] = '- Wrong login/password'
values['reason2'] = '- User not allowed to have multiple logins'
values[
'reason3'] = '- User not allowed to login at this specific time or day'
return request.render('web.login', values)
def save_session(
self,
cr,
uid,
tz,
sid,
unsuccessful_message='',
context=None):
now = fields.datetime.now()
session_obj = request.registry.get('ir.sessions')
cr = request.registry.cursor()
# for GeoIP
geo_ip_resolver = None
ip_location = ""
try:
import GeoIP
geo_ip_resolver = GeoIP.open(
'/usr/share/GeoIP/GeoIP.dat',
GeoIP.GEOIP_STANDARD)
except ImportError:
geo_ip_resolver = False
if geo_ip_resolver:
ip_location = (str(geo_ip_resolver.country_name_by_addr(
request.httprequest.remote_addr)) or "")
# autocommit: our single update request will be performed atomically.
# (In this way, there is no opportunity to have two transactions
# interleaving their cr.execute()..cr.commit() calls and have one
# of them rolled back due to a concurrent access.)
cr.autocommit(True)
user = request.registry.get('res.users').browse(
cr, request.uid, uid, request.context)
ip = request.httprequest.headers.environ['REMOTE_ADDR']
logged_in = True
if unsuccessful_message:
uid = SUPERUSER_ID
logged_in = False
sessions = False
else:
sessions = session_obj.search(cr, uid, [('session_id', '=', sid),
('ip', '=', ip),
('user_id', '=', uid),
('logged_in', '=', True)],
context=context)
if not sessions:
values = {
'user_id': uid,
'logged_in': logged_in,
'session_id': sid,
'session_seconds': user.session_default_seconds,
'multiple_sessions_block': user.multiple_sessions_block,
'date_login': now,
'expiration_date': datetime.strftime(
(datetime.strptime(
now,
DEFAULT_SERVER_DATETIME_FORMAT) +
relativedelta(
seconds=user.session_default_seconds)),
DEFAULT_SERVER_DATETIME_FORMAT),
'ip': ip,
'ip_location': ip_location,
'remote_tz': tz or 'GMT',
'unsuccessful_message': unsuccessful_message,
}
session_obj.create(cr, uid, values, context=context)
cr.commit()
cr.close()
return True
@http.route('/web/session/logout', type='http', auth="none")
def logout(self, redirect='/web'):
request.session.logout(keep_db=True, logout_type='ul')
return werkzeug.utils.redirect(redirect, 303)
| agpl-3.0 | -2,443,623,195,217,171,000 | 46.060484 | 154 | 0.476223 | false |
jlaine/django-coconuts | tests/test_render.py | 1 | 4789 | #
# django-coconuts
# Copyright (c) 2008-2019, Jeremy Lainé
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import io
from PIL import Image
from tests import BaseTest
class RenderFileTest(BaseTest):
files = ['test.jpg', 'test.mp4', 'test.png', 'test.txt', 'test_portrait.jpg', 'test_portrait.mp4', 'test_rotated.jpg', 'test_rotated.mp4']
fixtures = ['test_users.json']
def assertImage(self, response, content_type, expected_size):
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], content_type)
self.assertTrue('Expires' in response)
self.assertTrue('Last-Modified' in response)
# check size
fp = io.BytesIO(b''.join(response.streaming_content))
img = Image.open(fp)
self.assertEqual(img.size, expected_size)
def test_as_anonymous(self):
"""
Anonymous user cannot render a file.
"""
# no size
response = self.client.get('/images/render/test.jpg')
self.assertEqual(response.status_code, 401)
# bad size
response = self.client.get('/images/render/test.jpg?size=123')
self.assertEqual(response.status_code, 401)
# good size, bad type
response = self.client.get('/images/render/test.txt?size=1024')
self.assertEqual(response.status_code, 401)
# good size, good path
response = self.client.get('/images/render/test.jpg?size=1024')
self.assertEqual(response.status_code, 401)
# good size, good path
response = self.client.get('/images/render/test.png?size=1024')
self.assertEqual(response.status_code, 401)
def test_as_user_bad(self):
"""
Authenticated user can render a file.
"""
self.client.login(username="test_user_1", password="test")
# no size
response = self.client.get('/images/render/test.jpg')
self.assertEqual(response.status_code, 400)
# bad size
response = self.client.get('/images/render/test.jpg?size=123')
self.assertEqual(response.status_code, 400)
# good size, bad path
response = self.client.get('/images/render/notfound.jpg?size=1024')
self.assertEqual(response.status_code, 404)
# good size, bad type
response = self.client.get('/images/render/test.txt?size=1024')
self.assertEqual(response.status_code, 400)
def test_as_user_good(self):
self.client.login(username="test_user_1", password="test")
response = self.client.get('/images/render/test.jpg?size=1024')
self.assertImage(response, 'image/jpeg', (1024, 682))
response = self.client.get('/images/render/test_portrait.jpg?size=1024')
self.assertImage(response, 'image/jpeg', (512, 768))
response = self.client.get('/images/render/test_portrait.mp4?size=1024')
self.assertImage(response, 'image/jpeg', (432, 768))
response = self.client.get('/images/render/test_rotated.jpg?size=1024')
self.assertImage(response, 'image/jpeg', (512, 768))
response = self.client.get('/images/render/test_rotated.mp4?size=1024')
self.assertImage(response, 'image/jpeg', (432, 768))
response = self.client.get('/images/render/test.png?size=1024')
self.assertImage(response, 'image/png', (24, 24))
response = self.client.get('/images/render/test.mp4?size=1024')
self.assertImage(response, 'image/jpeg', (1024, 576))
| bsd-2-clause | -6,299,024,750,959,497,000 | 39.235294 | 142 | 0.676901 | false |
limemadness/selenium_training | test_countries_sort.py | 1 | 2050 | import pytest
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
@pytest.fixture
#def driver(request):
# wd = webdriver.Firefox(firefox_binary="c:\\Program Files (x86)\\Mozilla Firefox\\firefox.exe")
# print(wd.capabilities)
# request.addfinalizer(wd.quit)
# return wd
def driver(request):
wd = webdriver.Chrome()
wd.implicitly_wait(10)
request.addfinalizer(wd.quit)
return wd
def test_countries_sort(driver):
driver.get("http://localhost/litecart/admin/")
driver.find_element_by_name("username").click()
driver.find_element_by_name("username").send_keys("admin")
driver.find_element_by_name("password").click()
driver.find_element_by_name("password").send_keys("admin")
driver.find_element_by_xpath("//div[2]/button").click()
driver.get("http://localhost/litecart/admin/?app=countries&doc=countries")
#get country data
countries = driver.find_elements_by_css_selector("#content tr.row")
countries_timezone_url = []
country_name = []
#verify alphabetical order of country names
for country in countries:
country_name.append(country.find_element_by_css_selector("td:nth-child(5)").text)
assert sorted(country_name) == country_name
#get countries with multiple timezones
for country in countries:
if int(country.find_element_by_css_selector("td:nth-child(6)").text) > 0:
countries_timezone_url.append(country.find_element_by_css_selector("td:nth-child(5) a").get_attribute("href"))
#verify alphabetical order of timezones
for country_timezone_url in countries_timezone_url:
driver.get(country_timezone_url)
timezone_list = driver.find_elements_by_css_selector("#table-zones td:nth-child(2)")
del timezone_list[-1:]
timezones = []
for timezone in timezone_list:
timezones.append(timezone.text)
print(timezones)
assert sorted(timezones) == timezones
| apache-2.0 | -2,674,985,192,745,299,500 | 40 | 122 | 0.699024 | false |
Beyond-Imagination/BlubBlub | ChatbotServer/ChatbotEnv/Lib/site-packages/konlpy/corpus.py | 1 | 1849 | #! /usr/bin/python2.7
# -*- coding: utf-8 -*-
import os
from . import utils
class CorpusLoader():
"""Loader for corpora.
For a complete list of corpora available in KoNLPy,
refer to :ref:`corpora`.
.. code-block:: python
>>> from konlpy.corpus import kolaw
>>> fids = kolaw.fileids()
>>> fobj = kolaw.open(fids[0])
>>> print fobj.read(140)
대한민국헌법
유구한 역사와 전통에 빛나는 우리 대한국민은 3·1운동으로 건립된 대한민국임시정부의 법통과 불의에 항거한 4·19민주이념을 계승하고, 조국의 민주개혁과 평화적 통일의 사명에 입각하여 정의·인도와 동포애로써 민족의 단결을 공고히 하고, 모든 사회적 폐습과 불의를 타파하며, 자율과 조화를 바 바
"""
def abspath(self, filename=None):
"""Absolute path of corpus file.
If ``filename`` is *None*, returns absolute path of corpus.
:param filename: Name of a particular file in the corpus.
"""
basedir = '%s/data/corpus/%s' % (utils.installpath, self.name)
if filename:
return '%s/%s' % (basedir, filename)
else:
return '%s/' % basedir
def fileids(self):
"""List of file IDs in the corpus."""
return os.listdir(self.abspath())
def open(self, filename):
"""Method to open a file in the corpus.
Returns a file object.
:param filename: Name of a particular file in the corpus.
"""
return utils.load_txt(self.abspath(filename))
def __init__(self, name=None):
if not name:
raise Exception("You need to input the name of the corpus")
else:
self.name = name
kolaw = CorpusLoader('kolaw')
kobill = CorpusLoader('kobill')
| gpl-3.0 | 3,655,224,125,584,470,500 | 27.035088 | 171 | 0.58761 | false |
ErickMurillo/aprocacaho | organizacion/admin.py | 1 | 3456 | from django.contrib import admin
from .models import *
# Register your models here.
#organizacion
class InlineEscuelaCampo(admin.TabularInline):
model = EscuelaCampo
extra = 1
class OrganizacionAdmin(admin.ModelAdmin):
inlines = [InlineEscuelaCampo]
list_display = ('id','nombre','siglas')
list_display_links = ('id','nombre','siglas')
#encuesta organizacion
class InlineAspectosJuridicos(admin.TabularInline):
model = AspectosJuridicos
max_num = 1
can_delete = False
class InlineListaMiembros(admin.TabularInline):
model = ListaMiembros
extra = 1
class InlineDocumentacion(admin.TabularInline):
model = Documentacion
extra = 1
max_num = 7
class InlineProduccionComercializacion(admin.TabularInline):
model = ProduccionComercializacion
extra = 1
class InlineNivelCumplimiento(admin.TabularInline):
model = NivelCumplimiento
extra = 1
max_num = 7
# class InlineDatosProductivos(admin.TabularInline):
# model = DatosProductivos
# extra = 1
# max_num = 4
#
# class InlineDatosProductivosTabla(admin.TabularInline):
# model = DatosProductivosTabla
# extra = 1
# max_num = 2
class InlineInfraestructura(admin.TabularInline):
model = Infraestructura
extra = 1
class InlineTransporte(admin.TabularInline):
model = Transporte
max_num = 1
can_delete = False
# class InlineComercializacion(admin.TabularInline):
# model = Comercializacion
# extra = 1
# max_num = 3
#
# class InlineCacaoComercializado(admin.TabularInline):
# model = CacaoComercializado
# max_num = 1
# can_delete = False
class InlineCertificacionOrg(admin.TabularInline):
model = CertificacionOrg
max_num = 1
can_delete = False
class InlineDestinoProdCorriente(admin.TabularInline):
model = DestinoProdCorriente
extra = 1
max_num = 4
class InlineDestinoProdFermentado(admin.TabularInline):
model = DestinoProdFermentado
extra = 1
max_num = 4
class InlineFinanciamiento(admin.TabularInline):
model = Financiamiento
max_num = 1
can_delete = False
class InlineFinanciamientoProductores(admin.TabularInline):
model = FinanciamientoProductores
extra = 1
max_num = 5
class InlineInfoFinanciamiento(admin.TabularInline):
model = InfoFinanciamiento
extra = 1
max_num = 4
class EncuestaOrganicacionAdmin(admin.ModelAdmin):
# def get_queryset(self, request):
# if request.user.is_superuser:
# return EncuestaOrganicacion.objects.all()
# return EncuestaOrganicacion.objects.filter(usuario=request.user)
def save_model(self, request, obj, form, change):
obj.usuario = request.user
obj.save()
inlines = [InlineAspectosJuridicos,InlineListaMiembros,InlineDocumentacion,
InlineNivelCumplimiento,InlineProduccionComercializacion,
InlineInfraestructura,InlineTransporte,
InlineCertificacionOrg,InlineDestinoProdCorriente,InlineDestinoProdFermentado,
InlineFinanciamiento,InlineFinanciamientoProductores,InlineInfoFinanciamiento]
list_display = ('id','organizacion','fecha')
list_display_links = ('id','organizacion')
class Media:
css = {
'all': ('css/admin.css',)
}
js = ('js/admin_org.js',)
admin.site.register(Organizacion,OrganizacionAdmin)
admin.site.register(EncuestaOrganicacion,EncuestaOrganicacionAdmin)
| mit | -7,994,590,496,220,483,000 | 26.648 | 94 | 0.712095 | false |
srio/shadow3-scripts | transfocator_id30b.py | 1 | 25823 | import numpy
import xraylib
"""
transfocator_id30b : transfocator for id13b:
It can:
1) guess the lens configuration (number of lenses for each type) for a given photon energy
and target image size. Use transfocator_compute_configuration() for this task
2) for a given transfocator configuration, compute the main optical parameters
(image size, focal distance, focal position and divergence).
Use transfocator_compute_parameters() for this task
3) Performs full ray tracing. Use id30b_ray_tracing() for this task
Note that for the optimization and parameters calculations the transfocator configuration is
given in keywords. For ray tracing calculations many parameters of the transfocator are hard coded
with the values of id30b
See main program for examples.
Dependencies:
Numpy
xraylib (to compute refracion indices)
Shadow (for ray tracing only)
matplotlib (for some plots of ray=tracing)
Side effects:
When running ray tracing some files are created.
MODIFICATION HISTORY:
2015-03-25 srio@esrf.eu, written
"""
__author__ = "Manuel Sanchez del Rio"
__contact__ = "srio@esrf.eu"
__copyright__ = "ESRF, 2015"
def transfocator_compute_configuration(photon_energy_ev,s_target,\
symbol=["Be","Be","Be"], density=[1.845,1.845,1.845],\
nlenses_max = [15,3,1], nlenses_radii = [500e-4,1000e-4,1500e-4], lens_diameter=0.05, \
sigmaz=6.46e-4, alpha = 0.55, \
tf_p=5960, tf_q=3800, verbose=1 ):
"""
Computes the optimum transfocator configuration for a given photon energy and target image size.
All length units are cm
:param photon_energy_ev: the photon energy in eV
:param s_target: the target image size in cm.
:param symbol: the chemical symbol of the lens material of each type. Default symbol=["Be","Be","Be"]
:param density: the density of each type of lens. Default: density=[1.845,1.845,1.845]
:param nlenses_max: the maximum allowed number of lenases for each type of lens. nlenses_max = [15,3,1]
:param nlenses_radii: the radii in cm of each type of lens. Default: nlenses_radii = [500e-4,1000e-4,1500e-4]
:param lens_diameter: the physical diameter (acceptance) in cm of the lenses. If different for each type of lens,
consider the smaller one. Default: lens_diameter=0.05
:param sigmaz: the sigma (standard deviation) of the source in cm
:param alpha: an adjustable parameter in [0,1](see doc). Default: 0.55 (it is 0.76 for pure Gaussian beams)
:param tf_p: the distance source-transfocator in cm
:param tf_q: the distance transfocator-image in cm
:param:verbose: set to 1 for verbose text output
:return: a list with the number of lenses of each type.
"""
if s_target < 2.35*sigmaz*tf_q/tf_p:
print("Source size FWHM is: %f um"%(1e4*2.35*sigmaz))
print("Maximum Demagnifications is: %f um"%(tf_p/tf_q))
print("Minimum possible size is: %f um"%(1e4*2.35*sigmaz*tf_q/tf_p))
print("Error: redefine size")
return None
deltas = [(1.0 - xraylib.Refractive_Index_Re(symbol[i],photon_energy_ev*1e-3,density[i])) \
for i in range(len(symbol))]
focal_q_target = _tansfocator_guess_focal_position( s_target, p=tf_p, q=tf_q, sigmaz=sigmaz, alpha=alpha, \
lens_diameter=lens_diameter,method=2)
focal_f_target = 1.0 / (1.0/focal_q_target + 1.0/tf_p)
div_q_target = alpha * lens_diameter / focal_q_target
#corrections for extreme cases
source_demagnified = 2.35*sigmaz*focal_q_target/tf_p
if source_demagnified > lens_diameter: source_demagnified = lens_diameter
s_target_calc = numpy.sqrt( (div_q_target*(tf_q-focal_q_target))**2 + source_demagnified**2)
nlenses_target = _transfocator_guess_configuration(focal_f_target,deltas=deltas,\
nlenses_max=nlenses_max,radii=nlenses_radii, )
if verbose:
print("transfocator_compute_configuration: focal_f_target: %f"%(focal_f_target))
print("transfocator_compute_configuration: focal_q_target: %f cm"%(focal_q_target))
print("transfocator_compute_configuration: s_target: %f um"%(s_target_calc*1e4))
print("transfocator_compute_configuration: nlenses_target: ",nlenses_target)
return nlenses_target
def transfocator_compute_parameters(photon_energy_ev, nlenses_target,\
symbol=["Be","Be","Be"], density=[1.845,1.845,1.845],\
nlenses_max = [15,3,1], nlenses_radii = [500e-4,1000e-4,1500e-4], lens_diameter=0.05, \
sigmaz=6.46e-4, alpha = 0.55, \
tf_p=5960, tf_q=3800 ):
"""
Computes the parameters of the optical performances of a given transgocator configuration.
returns a l
All length units are cm
:param photon_energy_ev:
:param nlenses_target: a list with the lens configuration, i.e. the number of lenses of each type.
:param symbol: the chemical symbol of the lens material of each type. Default symbol=["Be","Be","Be"]
:param density: the density of each type of lens. Default: density=[1.845,1.845,1.845]
:param nlenses_max: the maximum allowed number of lenases for each type of lens. nlenses_max = [15,3,1]
TODO: remove (not used)
:param nlenses_radii: the radii in cm of each type of lens. Default: nlenses_radii = [500e-4,1000e-4,1500e-4]
:param lens_diameter: the physical diameter (acceptance) in cm of the lenses. If different for each type of lens,
consider the smaller one. Default: lens_diameter=0.05
:param sigmaz: the sigma (standard deviation) of the source in cm
:param alpha: an adjustable parameter in [0,1](see doc). Default: 0.55 (it is 0.76 for pure Gaussian beams)
:param tf_p: the distance source-transfocator in cm
:param tf_q: the distance transfocator-image in cm
:return: a list with parameters (image_siza, lens_focal_distance,
focal_position from transfocator center, divergence of beam after the transfocator)
"""
deltas = [(1.0 - xraylib.Refractive_Index_Re(symbol[i],photon_energy_ev*1e-3,density[i])) \
for i in range(len(symbol))]
focal_f = _transfocator_calculate_focal_distance( deltas=deltas,\
nlenses=nlenses_target,radii=nlenses_radii)
focal_q = 1.0 / (1.0/focal_f - 1.0/tf_p)
div_q = alpha * lens_diameter / focal_q
#corrections
source_demagnified = 2.35*sigmaz*focal_q/tf_p
if source_demagnified > lens_diameter: source_demagnified = lens_diameter
s_target = numpy.sqrt( (div_q*(tf_q-focal_q))**2 + (source_demagnified)**2 )
return (s_target,focal_f,focal_q,div_q)
def transfocator_nlenses_to_slots(nlenses,nlenses_max=None):
"""
converts the transfocator configuration from a list of the number of lenses of each type,
into a list of active (1) or inactive (0) actuators for the slots.
:param nlenses: the list with number of lenses (e.g., [5,2,0]
:param nlenses_max: the maximum number of lenses of each type, usually powers of two minus one.
E.g. [15,3,1]
:return: a list of on (1) and off (0) slots, e.g., [1, 0, 1, 0, 0, 1, 0]
(first type: 1*1+0*2+1*4+0*8=5, second type: 0*1+1*2=2, third type: 0*1=0)
"""
if nlenses_max == None:
nlenses_max = nlenses
ss = []
for i,iopt in enumerate(nlenses):
if iopt > nlenses_max[i]:
print("Error: i:%d, nlenses: %d, nlenses_max: %d"%(i,iopt,nlenses_max[i]))
ncharacters = len("{0:b}".format(nlenses_max[i]))
si = list( ("{0:0%db}"%(ncharacters)).format(int(iopt)) )
si.reverse()
ss += si
on_off = [int(i) for i in ss]
#print("transfocator_nlenses_to_slots: nlenses_max: ",nlenses_max," nlenses: ",nlenses," slots: ",on_off)
return on_off
def _transfocator_calculate_focal_distance(deltas=[0.999998],nlenses=[1],radii=[500e-4]):
inverse_focal_distance = 0.0
for i,nlensesi in enumerate(nlenses):
if nlensesi > 0:
focal_distance_i = radii[i] / (2.*nlensesi*deltas[i])
inverse_focal_distance += 1.0/focal_distance_i
if inverse_focal_distance == 0:
return 99999999999999999999999999.
else:
return 1.0/inverse_focal_distance
def _tansfocator_guess_focal_position( s_target, p=5960., q=3800.0, sigmaz=6.46e-4, \
alpha=0.66, lens_diameter=0.05, method=2):
x = 1e15
if method == 1: # simple sum
AA = 2.35*sigmaz/p
BB = -(s_target + alpha * lens_diameter)
CC = alpha*lens_diameter*q
cc = numpy.roots([AA,BB,CC])
x = cc[1]
return x
if method == 2: # sum in quadrature
AA = ( (2.35*sigmaz)**2)/(p**2)
BB = 0.0
CC = alpha**2 * lens_diameter**2 - s_target**2
DD = - 2.0 * alpha**2 * lens_diameter**2 * q
EE = alpha**2 * lens_diameter**2 * q**2
cc = numpy.roots([AA,BB,CC,DD,EE])
for i,cci in enumerate(cc):
if numpy.imag(cci) == 0:
return numpy.real(cci)
return x
def _transfocator_guess_configuration(focal_f_target,deltas=[0.999998],nlenses_max=[15],radii=[500e-4]):
nn = len(nlenses_max)
ncombinations = (1+nlenses_max[0]) * (1+nlenses_max[1]) * (1+nlenses_max[2])
icombinations = 0
aa = numpy.zeros((3,ncombinations),dtype=int)
bb = numpy.zeros(ncombinations)
for i0 in range(1+nlenses_max[0]):
for i1 in range(1+nlenses_max[1]):
for i2 in range(1+nlenses_max[2]):
aa[0,icombinations] = i0
aa[1,icombinations] = i1
aa[2,icombinations] = i2
bb[icombinations] = focal_f_target - _transfocator_calculate_focal_distance(deltas=deltas,nlenses=[i0,i1,i2],radii=radii)
icombinations += 1
bb1 = numpy.abs(bb)
ibest = bb1.argmin()
return (aa[:,ibest]).tolist()
#
#
#
def id30b_ray_tracing(emittH=4e-9,emittV=1e-11,betaH=35.6,betaV=3.0,number_of_rays=50000,\
density=1.845,symbol="Be",tf_p=1000.0,tf_q=1000.0,lens_diameter=0.05,\
slots_max=None,slots_on_off=None,photon_energy_ev=14000.0,\
slots_lens_thickness=None,slots_steps=None,slots_radii=None,\
s_target=10e-4,focal_f=10.0,focal_q=10.0,div_q=1e-6):
#=======================================================================================================================
# Gaussian undulator source
#=======================================================================================================================
import Shadow
#import Shadow.ShadowPreprocessorsXraylib as sx
sigmaXp = numpy.sqrt(emittH/betaH)
sigmaZp = numpy.sqrt(emittV/betaV)
sigmaX = emittH/sigmaXp
sigmaZ = emittV/sigmaZp
print("\n\nElectron sizes H:%f um, V:%fu m;\nelectron divergences: H:%f urad, V:%f urad"%\
(sigmaX*1e6, sigmaZ*1e6, sigmaXp*1e6, sigmaZp*1e6))
# set Gaussian source
src = Shadow.Source()
src.set_energy_monochromatic(photon_energy_ev)
src.set_gauss(sigmaX*1e2,sigmaZ*1e2,sigmaXp,sigmaZp)
print("\n\nElectron sizes stored H:%f um, V:%f um;\nelectron divergences: H:%f urad, V:%f urad"%\
(src.SIGMAX*1e4,src.SIGMAZ*1e4,src.SIGDIX*1e6,src.SIGDIZ*1e6))
src.apply_gaussian_undulator(undulator_length_in_m=2.8, user_unit_to_m=1e-2, verbose=1)
print("\n\nElectron sizes stored (undulator) H:%f um, V:%f um;\nelectron divergences: H:%f urad, V:%f urad"%\
(src.SIGMAX*1e4,src.SIGMAZ*1e4,src.SIGDIX*1e6,src.SIGDIZ*1e6))
print("\n\nSource size in vertical FWHM: %f um\n"%\
(2.35*src.SIGMAZ*1e4))
src.NPOINT = number_of_rays
src.ISTAR1 = 0 # 677543155
src.write("start.00")
# create source
beam = Shadow.Beam()
beam.genSource(src)
beam.write("begin.dat")
src.write("end.00")
#=======================================================================================================================
# complete the (detailed) transfocator description
#=======================================================================================================================
print("\nSetting detailed Transfocator for ID30B")
slots_nlenses = numpy.array(slots_max)*numpy.array(slots_on_off)
slots_empty = (numpy.array(slots_max)-slots_nlenses)
#
####interactive=True, SYMBOL="SiC",DENSITY=3.217,FILE="prerefl.dat",E_MIN=100.0,E_MAX=20000.0,E_STEP=100.0
Shadow.ShadowPreprocessorsXraylib.prerefl(interactive=False,E_MIN=2000.0,E_MAX=55000.0,E_STEP=100.0,\
DENSITY=density,SYMBOL=symbol,FILE="Be2_55.dat" )
nslots = len(slots_max)
prerefl_file = ["Be2_55.dat" for i in range(nslots)]
print("slots_max: ",slots_max)
#print("slots_target: ",slots_target)
print("slots_on_off: ",slots_on_off)
print("slots_steps: ",slots_steps)
print("slots_radii: ",slots_radii)
print("slots_nlenses: ",slots_nlenses)
print("slots_empty: ",slots_empty)
#calculate distances, nlenses and slots_empty
# these are distances p and q with TF length removed
tf_length = numpy.array(slots_steps).sum() #tf length in cm
tf_fs_before = tf_p - 0.5*tf_length #distance from source to center of transfocator
tf_fs_after = tf_q - 0.5*tf_length # distance from center of transfocator to image
# for each slot, these are the empty distances before and after the lenses
tf_p0 = numpy.zeros(nslots)
tf_q0 = numpy.array(slots_steps) - (numpy.array(slots_max) * slots_lens_thickness)
# add now the p q distances
tf_p0[0] += tf_fs_before
tf_q0[-1] += tf_fs_after
print("tf_p0: ",tf_p0)
print("tf_q0: ",tf_q0)
print("tf_length: %f cm"%(tf_length))
# build transfocator
tf = Shadow.CompoundOE(name='TF ID30B')
tf.append_transfocator(tf_p0.tolist(), tf_q0.tolist(), \
nlenses=slots_nlenses.tolist(), radius=slots_radii, slots_empty=slots_empty.tolist(),\
thickness=slots_lens_thickness, prerefl_file=prerefl_file,\
surface_shape=4, convex_to_the_beam=0, diameter=lens_diameter,\
cylinder_angle=0.0,interthickness=50e-4,use_ccc=0)
itmp = input("SHADOW Source complete. Do you want to run SHADOR trace? [1=Yes,0=No]: ")
if str(itmp) != "1":
return
#trace system
tf.dump_systemfile()
beam.traceCompoundOE(tf,write_start_files=0,write_end_files=0,write_star_files=0, write_mirr_files=0)
#write only last result file
beam.write("star_tf.dat")
print("\nFile written to disk: star_tf.dat")
#
# #ideal calculations
#
print("\n\n\n")
print("=============================================== TRANSFOCATOR OUTPUTS ==========================================")
print("\nTHEORETICAL results: ")
print("REMIND-----With these lenses we obtained (analytically): ")
print("REMIND----- focal_f: %f cm"%(focal_f))
print("REMIND----- focal_q: %f cm"%(focal_q))
print("REMIND----- s_target: %f um"%(s_target*1e4))
demagnification_factor = tf_p/focal_q
theoretical_focal_size = src.SIGMAZ*2.35/demagnification_factor
# analyze shadow results
print("\nSHADOW results: ")
st1 = beam.get_standard_deviation(3,ref=0)
st2 = beam.get_standard_deviation(3,ref=1)
print(" stDev*2.35: unweighted: %f um, weighted: %f um "%(st1*2.35*1e4,st2*2.35*1e4))
tk = beam.histo1(3, nbins=75, ref=1, nolost=1, write="HISTO1")
print(" Histogram FWHM: %f um "%(1e4*tk["fwhm"]))
print(" Transmitted intensity: %f (source was: %d) (transmission is %f %%) "%(beam.intensity(nolost=1), src.NPOINT, beam.intensity(nolost=1)/src.NPOINT*100))
#scan around image
xx1 = numpy.linspace(0.0,1.1*tf_fs_after,11) # position from TF exit plane
#xx0 = focal_q - tf_length*0.5
xx0 = focal_q - tf_length*0.5 # position of focus from TF exit plane
xx2 = numpy.linspace(xx0-100.0,xx0+100,21) # position from TF exit plane
xx3 = numpy.array([tf_fs_after])
xx = numpy.concatenate(([-0.5*tf_length],xx1,xx2,[tf_fs_after]))
xx.sort()
f = open("id30b.spec","w")
f.write("#F id30b.spec\n")
f.write("\n#S 1 calculations for id30b transfocator\n")
f.write("#N 8\n")
labels = " %18s %18s %18s %18s %18s %18s %18s %18s"%\
("pos from source","pos from image","[pos from TF]", "pos from TF center", "pos from focus",\
"fwhm shadow(stdev)","fwhm shadow(histo)","fwhm theoretical")
f.write("#L "+labels+"\n")
out = numpy.zeros((8,xx.size))
for i,pos in enumerate(xx):
beam2 = beam.duplicate()
beam2.retrace(-tf_fs_after+pos)
fwhm1 = 2.35*1e4*beam2.get_standard_deviation(3,ref=1,nolost=1)
tk = beam2.histo1(3, nbins=75, ref=1, nolost=1)
fwhm2 = 1e4*tk["fwhm"]
#fwhm_th = 1e4*transfocator_calculate_estimated_size(pos,diameter=diameter,focal_distance=focal_q)
fwhm_th2 = 1e4*numpy.sqrt( (div_q*(pos+0.5*tf_length-focal_q))**2 + theoretical_focal_size**2 )
#fwhm_th2 = 1e4*( numpy.abs(div_q*(pos-focal_q+0.5*tf_length)) + theoretical_focal_size )
out[0,i] = tf_fs_before+tf_length+pos
out[1,i] = -tf_fs_after+pos
out[2,i] = pos
out[3,i] = pos+0.5*tf_length
out[4,i] = pos+0.5*tf_length-focal_q
out[5,i] = fwhm1
out[6,i] = fwhm2
out[7,i] = fwhm_th2
f.write(" %18.3f %18.3f %18.3f %18.3f %18.3f %18.3f %18.3f %18.3f \n"%\
(tf_fs_before+tf_length+pos,\
-tf_fs_after+pos,\
pos,\
pos+0.5*tf_length,\
pos+0.5*tf_length-focal_q,\
fwhm1,fwhm2,fwhm_th2))
f.close()
print("File with beam evolution written to disk: id30b.spec")
#
# plots
#
itmp = input("Do you want to plot the intensity distribution and beam evolution? [1=yes,0=No]")
if str(itmp) != "1":
return
import matplotlib.pylab as plt
plt.figure(1)
plt.plot(out[1,:],out[5,:],'blue',label="fwhm shadow(stdev)")
plt.plot(out[1,:],out[6,:],'green',label="fwhm shadow(histo1)")
plt.plot(out[1,:],out[7,:],'red',label="fwhm theoretical")
plt.xlabel("Distance from image plane [cm]")
plt.ylabel("spot size [um] ")
ax = plt.subplot(111)
ax.legend(bbox_to_anchor=(1.1, 1.05))
print("Kill graphic to continue.")
plt.show()
Shadow.ShadowTools.histo1(beam,3,nbins=75,ref=1,nolost=1,calfwhm=1)
input("<Enter> to finish.")
return None
def id30b_full_simulation(photon_energy_ev=14000.0,s_target=20.0e-4,nlenses_target=None):
if nlenses_target == None:
force_nlenses = 0
else:
force_nlenses = 1
#
# define lens setup (general)
#
xrl_symbol = ["Be","Be","Be"]
xrl_density = [1.845,1.845,1.845]
lens_diameter = 0.05
nlenses_max = [15,3,1]
nlenses_radii = [500e-4,1000e-4,1500e-4]
sigmaz=6.46e-4
alpha = 0.55
tf_p = 5960 # position of the TF measured from the center of the transfocator
tf_q = 9760 - tf_p # position of the image plane measured from the center of the transfocator
if s_target < 2.35*sigmaz*tf_q/tf_p:
print("Source size FWHM is: %f um"%(1e4*2.35*sigmaz))
print("Maximum Demagnifications is: %f um"%(tf_p/tf_q))
print("Minimum possible size is: %f um"%(1e4*2.35*sigmaz*tf_q/tf_p))
print("Error: redefine size")
return
print("================================== TRANSFOCATOR INPUTS ")
print("Photon energy: %f eV"%(photon_energy_ev))
if force_nlenses:
print("Forced_nlenses: ",nlenses_target)
else:
print("target size: %f cm"%(s_target))
print("materials: ",xrl_symbol)
print("densities: ",xrl_density)
print("Lens diameter: %f cm"%(lens_diameter))
print("nlenses_max:",nlenses_max,"nlenses_radii: ",nlenses_radii)
print("Source size (sigma): %f um, FWHM: %f um"%(1e4*sigmaz,2.35*1e4*sigmaz))
print("Distances: tf_p: %f cm, tf_q: %f cm"%(tf_p,tf_q))
print("alpha: %f"%(alpha))
print("========================================================")
if force_nlenses != 1:
nlenses_target = transfocator_compute_configuration(photon_energy_ev,s_target,\
symbol=xrl_symbol,density=xrl_density,\
nlenses_max=nlenses_max, nlenses_radii=nlenses_radii, lens_diameter=lens_diameter, \
sigmaz=sigmaz, alpha=alpha, \
tf_p=tf_p,tf_q=tf_q, verbose=1)
(s_target,focal_f,focal_q,div_q) = \
transfocator_compute_parameters(photon_energy_ev, nlenses_target,\
symbol=xrl_symbol,density=xrl_density,\
nlenses_max=nlenses_max, nlenses_radii=nlenses_radii, \
lens_diameter=lens_diameter,\
sigmaz=sigmaz, alpha=alpha,\
tf_p=tf_p,tf_q=tf_q)
slots_max = [ 1, 2, 4, 8, 1, 2, 1] # slots
slots_on_off = transfocator_nlenses_to_slots(nlenses_target,nlenses_max=nlenses_max)
print("=============================== TRANSFOCATOR SET")
#print("deltas: ",deltas)
if force_nlenses != 1:
print("nlenses_target (optimized): ",nlenses_target)
else:
print("nlenses_target (forced): ",nlenses_target)
print("With these lenses we obtain: ")
print(" focal_f: %f cm"%(focal_f))
print(" focal_q: %f cm"%(focal_q))
print(" s_target: %f um"%(s_target*1e4))
print(" slots_max: ",slots_max)
print(" slots_on_off: ",slots_on_off)
print("==================================================")
# for theoretical calculations use the focal position and distances given by the target nlenses
itmp = input("Start SHADOW simulation? [1=yes,0=No]: ")
if str(itmp) != "1":
return
#=======================================================================================================================
# Inputs
#=======================================================================================================================
emittH = 3.9e-9
emittV = 10e-12
betaH = 35.6
betaV = 3.0
number_of_rays = 50000
nslots = len(slots_max)
slots_lens_thickness = [0.3 for i in range(nslots)] #total thickness of a single lens in cm
# for each slot, positional gap of the first lens in cm
slots_steps = [ 4, 4, 1.9, 6.1, 4, 4, slots_lens_thickness[-1]]
slots_radii = [.05, .05, .05, .05, 0.1, 0.1, 0.15] # radii of the lenses in cm
AAA= 333
id30b_ray_tracing(emittH=emittH,emittV=emittV,betaH=betaH,betaV=betaV,number_of_rays=number_of_rays,\
density=xrl_density[0],symbol=xrl_symbol[0],tf_p=tf_p,tf_q=tf_q,lens_diameter=lens_diameter,\
slots_max=slots_max,slots_on_off=slots_on_off,photon_energy_ev=photon_energy_ev,\
slots_lens_thickness=slots_lens_thickness,slots_steps=slots_steps,slots_radii=slots_radii,\
s_target=s_target,focal_f=focal_f,focal_q=focal_q,div_q=div_q)
def main():
# this performs the full simulation: calculates the optimum configuration and do the ray-tracing
itmp = input("Enter: \n 0 = optimization calculation only \n 1 = full simulation (ray tracing) \n?> ")
photon_energy_kev = float(input("Enter photon energy in keV: "))
s_target_um = float(input("Enter target focal dimension in microns: "))
if str(itmp) == "1":
id30b_full_simulation(photon_energy_ev=photon_energy_kev*1e3,s_target=s_target_um*1e-4,nlenses_target=None)
#id30b_full_simulation(photon_energy_ev=14000.0,s_target=20.0e-4,nlenses_target=[3,1,1])
else:
#this performs the calculation of the optimizad configuration
nlenses_optimum = transfocator_compute_configuration(photon_energy_kev*1e3,s_target_um*1e-4,\
symbol=["Be","Be","Be"], density=[1.845,1.845,1.845],\
nlenses_max = [15,3,1], nlenses_radii = [500e-4,1000e-4,1500e-4], lens_diameter=0.05, \
sigmaz=6.46e-4, alpha = 0.55, \
tf_p=5960, tf_q=3800, verbose=0 )
print("Optimum lens configuration is: ",nlenses_optimum)
if nlenses_optimum == None:
return
print("Activate slots: ",transfocator_nlenses_to_slots(nlenses_optimum,nlenses_max=[15,3,1]))
# this calculates the parameters (image size, etc) for a given lens configuration
(size, f, q_f, div) = transfocator_compute_parameters(photon_energy_kev*1e3, nlenses_optimum,\
symbol=["Be","Be","Be"], density=[1.845,1.845,1.845],\
nlenses_max = [15,3,1], nlenses_radii = [500e-4,1000e-4,1500e-4], lens_diameter=0.05, \
sigmaz=6.46e-4, alpha = 0.55, \
tf_p=5960, tf_q=3800 )
print("For given configuration ",nlenses_optimum," we get: ")
print(" size: %f cm, focal length: %f cm, focal distance: %f cm, divergence: %f rad: "%(size, f, q_f, div))
if __name__ == "__main__":
main() | mit | 5,677,322,547,994,774,000 | 39.224299 | 162 | 0.584905 | false |
jose187/gh_word_count | gh_word_count/__init__.py | 1 | 2681 |
from ommit_words import list_ommited_words
from re import sub
import operator
class _input_list:
def __init__(self,list_TITLES):
self.list_TITLES = list_TITLES
self.list_remove = list_ommited_words()
def _word_count(self):
# these are all the words that are in the text
dict_words = {}
# now we go through each of the lines
for str_line in self.list_TITLES:
str_raw_line1 = sub('[^a-z0-9 ]','',str_line.lower())
list_line_words = str_raw_line1.split()
for str_word in list_line_words:
# check to see if its in the ommited word list
if str_word not in self.list_remove:
# create new key if it is not there yet
if str_word not in dict_words:
dict_words[str_word] = [1]
# add if is already there
elif str_word in dict_words:
dict_words[str_word].append(1)
sorted_x = sorted(dict_words.iteritems(),
key=operator.itemgetter(1),
reverse=True)
list_OUTPUT = []
for each_item in sorted_x:
int_COUNT = sum(each_item[1])
if int_COUNT > 1:
tup_ONE_COUNT = ('%s' % each_item[0],
'%d' % int_COUNT)
list_OUTPUT.append(tup_ONE_COUNT)
return list_OUTPUT
# gets the top x according to frequency
# returns list
def _get_top(self,int_TOP):
list_TOP_N = []
for str_WORD in self._word_count()[:int_TOP]:
list_TOP_N.append(str_WORD)
return list_TOP_N
# displays the count on the terminal
def _show_count(list_TUPS,entries=0):
if entries == 0:
int_TOP = len(list_TUPS)
else:
int_TOP = entries
print 'Count\tWord\n'
for tup_ITEM in list_TUPS[:int_TOP]:
print '%d\t%s' % (int(tup_ITEM[1]),str(tup_ITEM[0]))
# saves the count to csv file
def _save_counts(list_COUNTS,str_FILE_PATH,entries=0):
if entries == 0:
int_TOP = len(list_COUNTS)
else:
int_TOP = entries
list_OUTPUT = ['"Count","Word"']
for tup_ITEM in list_COUNTS[:int_TOP]:
str_OUTPUT = '%d,"%s"' % (int(tup_ITEM[1]),str(tup_ITEM[0]))
list_OUTPUT.append(str_OUTPUT)
fw_OUTPUT = open(str_FILE_PATH,'w')
fw_OUTPUT.write('\n'.join(list_OUTPUT))
fw_OUTPUT.close()
| bsd-2-clause | -7,451,965,439,748,880,000 | 30.916667 | 69 | 0.497576 | false |
CSD-Public/stonix | src/tests/rules/unit_tests/zzzTestRuleDisableOpenSafeSafari.py | 1 | 4752 | ###############################################################################
# #
# Copyright 2019. Triad National Security, LLC. All rights reserved. #
# This program was produced under U.S. Government contract 89233218CNA000001 #
# for Los Alamos National Laboratory (LANL), which is operated by Triad #
# National Security, LLC for the U.S. Department of Energy/National Nuclear #
# Security Administration. #
# #
# All rights in the program are reserved by Triad National Security, LLC, and #
# the U.S. Department of Energy/National Nuclear Security Administration. The #
# Government is granted for itself and others acting on its behalf a #
# nonexclusive, paid-up, irrevocable worldwide license in this material to #
# reproduce, prepare derivative works, distribute copies to the public, #
# perform publicly and display publicly, and to permit others to do so. #
# #
###############################################################################
'''
This is a Unit Test for Rule DisableOpenSafeSafari
Created on Jan 22, 2015
@author: dwalker
@change: 2015-02-25 - ekkehard - Updated to make unit test work
@change: 2016/02/10 roy Added sys.path.append for being able to unit test this
file as well as with the test harness.
'''
import unittest
import sys
sys.path.append("../../../..")
from src.tests.lib.RuleTestTemplate import RuleTest
from src.stonix_resources.CommandHelper import CommandHelper
from src.tests.lib.logdispatcher_mock import LogPriority
from src.stonix_resources.rules.DisableOpenSafeSafari import DisableOpenSafeSafari
class zzzTestRuleDisableOpenSafeSafari(RuleTest):
def setUp(self):
RuleTest.setUp(self)
self.rule = DisableOpenSafeSafari(self.config,
self.environ,
self.logdispatch,
self.statechglogger)
self.rulename = self.rule.rulename
self.rulenumber = self.rule.rulenumber
self.ch = CommandHelper(self.logdispatch)
self.dc = "/usr/bin/defaults"
self.path = "com.apple.Safari"
self.key = "AutoOpenSafeDownloads"
def tearDown(self):
pass
def runTest(self):
self.simpleRuleTest()
def setConditionsForRule(self):
'''This makes sure the intial report fails by executing the following
commands:
defaults write com.apple.Safari AutoOpenSafeDownloads -bool yes
:param self: essential if you override this definition
:returns: boolean - If successful True; If failure False
@author: dwalker
'''
success = False
cmd = [self.dc, "write", self.path, self.key, "-bool", "yes"]
self.logdispatch.log(LogPriority.DEBUG, str(cmd))
if self.ch.executeCommand(cmd):
success = self.checkReportForRule(False, True)
return success
def checkReportForRule(self, pCompliance, pRuleSuccess):
'''To see what happended run these commands:
defaults read com.apple.Safari AutoOpenSafeDownloads
:param self: essential if you override this definition
:param pCompliance:
:param pRuleSuccess:
:returns: boolean - If successful True; If failure False
@author: ekkehard j. koch
'''
success = True
self.logdispatch.log(LogPriority.DEBUG, "pCompliance = " + \
str(pCompliance) + ".")
self.logdispatch.log(LogPriority.DEBUG, "pRuleSuccess = " + \
str(pRuleSuccess) + ".")
cmd = [self.dc, "read", self.path, self.key]
self.logdispatch.log(LogPriority.DEBUG, str(cmd))
if self.ch.executeCommand(cmd):
output = self.ch.getOutputString()
return success
def checkFixForRule(self, pRuleSuccess):
self.logdispatch.log(LogPriority.DEBUG, "pRuleSuccess = " + \
str(pRuleSuccess) + ".")
success = self.checkReportForRule(True, pRuleSuccess)
return success
def checkUndoForRule(self, pRuleSuccess):
self.logdispatch.log(LogPriority.DEBUG, "pRuleSuccess = " + \
str(pRuleSuccess) + ".")
success = self.checkReportForRule(False, pRuleSuccess)
return success
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| gpl-2.0 | 6,575,902,741,037,396,000 | 42.2 | 82 | 0.580177 | false |
HomeRad/TorCleaner | wc/filter/rules/FolderRule.py | 1 | 3945 | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2009 Bastian Kleineidam
"""
Group filter rules into folders.
"""
from ... import fileutil, configuration
from . import Rule
def recalc_up_down(rules):
"""
Add .up and .down attributes to rules, used for display up/down
arrows in GUIs
"""
upper = len(rules)-1
for i, rule in enumerate(rules):
rule.up = (i>0)
rule.down = (i<upper)
class FolderRule(Rule.Rule):
"""
Container for a list of rules.
"""
def __init__(self, sid=None, titles=None, descriptions=None,
disable=0, filename=""):
"""
Initialize rule data.
"""
super(FolderRule, self).__init__(sid=sid, titles=titles,
descriptions=descriptions, disable=disable)
# make filename read-only
self._filename = filename
self.rules = []
self.attrnames.extend(('oid', 'configversion'))
self.intattrs.append('oid')
self.oid = None
self.configversion = "-"
def __str__(self):
"""
Return rule data as string.
"""
return super(FolderRule, self).__str__() + \
("\nrules: %d" % len(self.rules))
def filename_get(self):
"""
Get filename where this folder is stored.
"""
return self._filename
filename = property(filename_get)
def append_rule(self, r):
"""
Append rule to folder.
"""
r.oid = len(self.rules)
# note: the rules are added in order
self.rules.append(r)
r.parent = self
def delete_rule(self, i):
"""
Delete rule from folder with index i.
"""
del self.rules[i]
recalc_up_down(self.rules)
def update(self, rule, dryrun=False, log=None):
"""
Update this folder with given folder rule data.
"""
chg = super(FolderRule, self).update(rule, dryrun=dryrun, log=log)
for child in rule.rules:
if child.sid is None or not child.sid.startswith("wc"):
# ignore local rules
continue
oldrule = self.get_rule(child.sid)
if oldrule is not None:
if oldrule.update(child, dryrun=dryrun, log=log):
chg = True
else:
print >> log, _("inserting new rule %s") % \
child.tiptext()
if not dryrun:
self.rules.append(child)
chg = True
if chg:
recalc_up_down(self.rules)
return chg
def get_rule(self, sid):
"""
Return rule with given sid or None if not found.
"""
for rule in self.rules:
if rule.sid == sid:
return rule
return None
def toxml(self):
"""
Rule data as XML for storing.
"""
s = u"""<?xml version="1.0" encoding="%s"?>
<!DOCTYPE folder SYSTEM "filter.dtd">
%s oid="%d" configversion="%s">""" % \
(configuration.ConfigCharset, super(FolderRule, self).toxml(),
self.oid, self.configversion)
s += u"\n"+self.title_desc_toxml()+u"\n"
for r in self.rules:
s += u"\n%s\n" % r.toxml()
return s+u"</folder>\n"
def write(self, fd=None):
"""
Write xml data into filename.
@raise: OSError if file could not be written.
"""
s = self.toxml().encode("iso-8859-1", "replace")
if fd is None:
fileutil.write_file(self.filename, s)
else:
fd.write(s)
def tiptext(self):
"""
Return short info for gui display.
"""
l = len(self.rules)
if l == 1:
text = _("with 1 rule")
else:
text = _("with %d rules") % l
return "%s %s" % (super(FolderRule, self).tiptext(), text)
| gpl-2.0 | 716,819,241,840,942,200 | 27.79562 | 78 | 0.509759 | false |
RAJSD2610/SDNopenflowSwitchAnalysis | TotalFlowPlot.py | 1 | 2742 | import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn
seaborn.set()
path= os.path.expanduser("~/Desktop/ece671/udpt8")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
u8=[]
i=0
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
while i<(num_files/2) :
# df+=[]
j=i+1
path ="/home/vetri/Desktop/ece671/udpt8/ftotal."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<0:
y=0
u8.append(y)
i+=1
print(u8)
path= os.path.expanduser("~/Desktop/ece671/udpnone")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
i=0
j=0
u=[]
while i<(num_files/2):
j=i+1
path ="/home/vetri/Desktop/ece671/udpnone/ftotal."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<0:
y=0
u.append(y)
i+=1
print(u)
path= os.path.expanduser("~/Desktop/ece671/tcpnone")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
i=0
j=0
t=[]
while i<(num_files/2):
j=i+1
path ="/home/vetri/Desktop/ece671/tcpnone/ftotal."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<0:
y=0
t.append(y)
i+=1
print(t)
path= os.path.expanduser("~/Desktop/ece671/tcpt8")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
i=0
j=0
t8=[]
while i<(num_files/2):
j=i+1
path ="/home/vetri/Desktop/ece671/tcpt8/ftotal."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<0:
y=0
t8.append(y)
i+=1
print(t8)
#plt.figure(figsize=(4, 5))
plt.plot(list(range(1,len(u8)+1)),u8, '.-',label="udpt8")
plt.plot(list(range(1,len(u)+1)),u, '.-',label="udpnone")
plt.plot(list(range(1,len(t)+1)),t, '.-',label="tcpnone")
plt.plot(list(range(1,len(t8)+1)),t8, '.-',label="tcpt8")
plt.title("Total Flows Present after 1st flow")
plt.xlabel("time(s)")
plt.ylabel("flows")
#plt.frameon=True
plt.legend()
plt.show()
| gpl-3.0 | 3,297,434,910,053,564,400 | 24.388889 | 86 | 0.591174 | false |
cschenck/blender_sim | fluid_sim_deps/blender-2.69/2.69/scripts/addons/io_scene_3ds/__init__.py | 1 | 6950 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
bl_info = {
"name": "Autodesk 3DS format",
"author": "Bob Holcomb, Campbell Barton",
"blender": (2, 57, 0),
"location": "File > Import-Export",
"description": "Import-Export 3DS, meshes, uvs, materials, textures, "
"cameras & lamps",
"warning": "",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/Import-Export/Autodesk_3DS",
"tracker_url": "",
"support": 'OFFICIAL',
"category": "Import-Export"}
if "bpy" in locals():
import imp
if "import_3ds" in locals():
imp.reload(import_3ds)
if "export_3ds" in locals():
imp.reload(export_3ds)
import bpy
from bpy.props import StringProperty, FloatProperty, BoolProperty, EnumProperty
from bpy_extras.io_utils import (ImportHelper,
ExportHelper,
axis_conversion,
)
class Import3DS(bpy.types.Operator, ImportHelper):
"""Import from 3DS file format (.3ds)"""
bl_idname = "import_scene.autodesk_3ds"
bl_label = 'Import 3DS'
bl_options = {'UNDO'}
filename_ext = ".3ds"
filter_glob = StringProperty(default="*.3ds", options={'HIDDEN'})
constrain_size = FloatProperty(
name="Size Constraint",
description="Scale the model by 10 until it reaches the "
"size constraint (0 to disable)",
min=0.0, max=1000.0,
soft_min=0.0, soft_max=1000.0,
default=10.0,
)
use_image_search = BoolProperty(
name="Image Search",
description="Search subdirectories for any associated images "
"(Warning, may be slow)",
default=True,
)
use_apply_transform = BoolProperty(
name="Apply Transform",
description="Workaround for object transformations "
"importing incorrectly",
default=True,
)
axis_forward = EnumProperty(
name="Forward",
items=(('X', "X Forward", ""),
('Y', "Y Forward", ""),
('Z', "Z Forward", ""),
('-X', "-X Forward", ""),
('-Y', "-Y Forward", ""),
('-Z', "-Z Forward", ""),
),
default='Y',
)
axis_up = EnumProperty(
name="Up",
items=(('X', "X Up", ""),
('Y', "Y Up", ""),
('Z', "Z Up", ""),
('-X', "-X Up", ""),
('-Y', "-Y Up", ""),
('-Z', "-Z Up", ""),
),
default='Z',
)
def execute(self, context):
from . import import_3ds
keywords = self.as_keywords(ignore=("axis_forward",
"axis_up",
"filter_glob",
))
global_matrix = axis_conversion(from_forward=self.axis_forward,
from_up=self.axis_up,
).to_4x4()
keywords["global_matrix"] = global_matrix
return import_3ds.load(self, context, **keywords)
class Export3DS(bpy.types.Operator, ExportHelper):
"""Export to 3DS file format (.3ds)"""
bl_idname = "export_scene.autodesk_3ds"
bl_label = 'Export 3DS'
filename_ext = ".3ds"
filter_glob = StringProperty(
default="*.3ds",
options={'HIDDEN'},
)
use_selection = BoolProperty(
name="Selection Only",
description="Export selected objects only",
default=False,
)
axis_forward = EnumProperty(
name="Forward",
items=(('X', "X Forward", ""),
('Y', "Y Forward", ""),
('Z', "Z Forward", ""),
('-X', "-X Forward", ""),
('-Y', "-Y Forward", ""),
('-Z', "-Z Forward", ""),
),
default='Y',
)
axis_up = EnumProperty(
name="Up",
items=(('X', "X Up", ""),
('Y', "Y Up", ""),
('Z', "Z Up", ""),
('-X', "-X Up", ""),
('-Y', "-Y Up", ""),
('-Z', "-Z Up", ""),
),
default='Z',
)
def execute(self, context):
from . import export_3ds
keywords = self.as_keywords(ignore=("axis_forward",
"axis_up",
"filter_glob",
"check_existing",
))
global_matrix = axis_conversion(to_forward=self.axis_forward,
to_up=self.axis_up,
).to_4x4()
keywords["global_matrix"] = global_matrix
return export_3ds.save(self, context, **keywords)
# Add to a menu
def menu_func_export(self, context):
self.layout.operator(Export3DS.bl_idname, text="3D Studio (.3ds)")
def menu_func_import(self, context):
self.layout.operator(Import3DS.bl_idname, text="3D Studio (.3ds)")
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_import.append(menu_func_import)
bpy.types.INFO_MT_file_export.append(menu_func_export)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_import.remove(menu_func_import)
bpy.types.INFO_MT_file_export.remove(menu_func_export)
# NOTES:
# why add 1 extra vertex? and remove it when done? -
# "Answer - eekadoodle - would need to re-order UV's without this since face
# order isnt always what we give blender, BMesh will solve :D"
#
# disabled scaling to size, this requires exposing bb (easy) and understanding
# how it works (needs some time)
if __name__ == "__main__":
register()
| gpl-3.0 | -6,396,439,276,597,938,000 | 32.095238 | 79 | 0.496259 | false |
HyperloopTeam/FullOpenMDAO | cantera-2.0.2/interfaces/python/MixMaster/Units/unit.py | 1 | 2833 | import operator
class unit:
_zero = (0,) * 7
_negativeOne = (-1, ) * 7
_labels = ('m', 'kg', 's', 'A', 'K', 'mol', 'cd')
def __init__(self, value, derivation):
self.value = value
self.derivation = derivation
return
def __add__(self, other):
if not self.derivation == other.derivation:
raise ImcompatibleUnits(self, other)
return unit(self.value + other.value, self.derivation)
def __sub__(self, other):
if not self.derivation == other.derivation:
raise ImcompatibleUnits(self, other)
return unit(self.value - other.value, self.derivation)
def __mul__(self, other):
if type(other) == type(0) or type(other) == type(0.0):
return unit(other*self.value, self.derivation)
value = self.value * other.value
derivation = tuple(map(operator.add, self.derivation, other.derivation))
return unit(value, derivation)
def __div__(self, other):
if type(other) == type(0) or type(other) == type(0.0):
return unit(self.value/other, self.derivation)
value = self.value / other.value
derivation = tuple(map(operator.sub, self.derivation, other.derivation))
return unit(value, derivation)
def __pow__(self, other):
if type(other) != type(0) and type(other) != type(0.0):
raise BadOperation
value = self.value ** other
derivation = tuple(map(operator.mul, [other]*7, self.derivation))
return unit(value, derivation)
def __pos__(self): return self
def __neg__(self): return unit(-self.value, self.derivation)
def __abs__(self): return unit(abs(self.value), self.derivation)
def __invert__(self):
value = 1./self.value
derivation = tuple(map(operator.mul, self._negativeOne, self.derivation))
return unit(value, derivation)
def __rmul__(self, other):
return unit.__mul__(self, other)
def __rdiv__(self, other):
if type(other) != type(0) and type(other) != type(0.0):
raise BadOperation(self, other)
value = other/self.value
derivation = tuple(map(operator.mul, self._negativeOne, self.derivation))
return unit(value, derivation)
def __float__(self):
return self.value
#if self.derivation == self._zero: return self.value
#raise BadConversion(self)
def __str__(self):
str = "%g" % self.value
for i in range(0, 7):
exponent = self.derivation[i]
if exponent == 0: continue
if exponent == 1:
str = str + " %s" % (self._labels[i])
else:
str = str + " %s^%d" % (self._labels[i], exponent)
return str
dimensionless = unit(1, unit._zero)
| gpl-2.0 | 3,539,097,702,608,451,000 | 25.476636 | 81 | 0.570773 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/v2016_03_30/models/windows_configuration_py3.py | 1 | 2719 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class WindowsConfiguration(Model):
"""Specifies Windows operating system settings on the virtual machine.
:param provision_vm_agent: Indicates whether virtual machine agent should
be provisioned on the virtual machine. <br><br> When this property is not
specified in the request body, default behavior is to set it to true.
This will ensure that VM Agent is installed on the VM so that extensions
can be added to the VM later.
:type provision_vm_agent: bool
:param enable_automatic_updates: Indicates whether virtual machine is
enabled for automatic updates.
:type enable_automatic_updates: bool
:param time_zone: Specifies the time zone of the virtual machine. e.g.
"Pacific Standard Time"
:type time_zone: str
:param additional_unattend_content: Specifies additional base-64 encoded
XML formatted information that can be included in the Unattend.xml file,
which is used by Windows Setup.
:type additional_unattend_content:
list[~azure.mgmt.compute.v2016_03_30.models.AdditionalUnattendContent]
:param win_rm: Specifies the Windows Remote Management listeners. This
enables remote Windows PowerShell.
:type win_rm: ~azure.mgmt.compute.v2016_03_30.models.WinRMConfiguration
"""
_attribute_map = {
'provision_vm_agent': {'key': 'provisionVMAgent', 'type': 'bool'},
'enable_automatic_updates': {'key': 'enableAutomaticUpdates', 'type': 'bool'},
'time_zone': {'key': 'timeZone', 'type': 'str'},
'additional_unattend_content': {'key': 'additionalUnattendContent', 'type': '[AdditionalUnattendContent]'},
'win_rm': {'key': 'winRM', 'type': 'WinRMConfiguration'},
}
def __init__(self, *, provision_vm_agent: bool=None, enable_automatic_updates: bool=None, time_zone: str=None, additional_unattend_content=None, win_rm=None, **kwargs) -> None:
super(WindowsConfiguration, self).__init__(**kwargs)
self.provision_vm_agent = provision_vm_agent
self.enable_automatic_updates = enable_automatic_updates
self.time_zone = time_zone
self.additional_unattend_content = additional_unattend_content
self.win_rm = win_rm
| mit | 7,741,615,214,177,697,000 | 49.351852 | 180 | 0.670835 | false |
matiboy/django_safari_notifications | django_safari_notifications/apps.py | 1 | 1111 | # -*- coding: utf-8
from django.apps import AppConfig
import logging
class DjangoSafariNotificationsConfig(AppConfig):
name = 'django_safari_notifications'
verbose_name = 'Safari Push Notifications'
version = 'v1'
service_base = 'push'
userinfo_key = 'userinfo'
logger = logging.getLogger('django_safari_notifications')
# Provide path to a pem file containing the certificate, the key as well as Apple's WWDRCA
cert = 'path/to/your/cert'
passphrase = 'pass:xxxx' # this will be used with -passin in the openssl command so could be with pass, env etc
# If single site, just set these values. Otherwise create Domain entries
website_conf = None
# sample single site: do not include the authenticationToken
"""
website_conf = {
"websiteName": "Bay Airlines",
"websitePushID": "web.com.example.domain",
"allowedDomains": ["http://domain.example.com"],
"urlFormatString": "http://domain.example.com/%@/?flight=%@",
"webServiceURL": "https://example.com/push"
}
"""
iconset_folder = '/path/to/your/iconset'
| mit | -6,515,032,378,796,969,000 | 38.678571 | 115 | 0.673267 | false |
TomSkelly/MatchAnnot | showAnnot.py | 1 | 2299 | #!/usr/bin/env python
# Read annotation file, print selected stuff in human-readable format.
# AUTHOR: Tom Skelly (thomas.skelly@fnlcr.nih.gov)
import os
import sys
import optparse
import re # regular expressions
import cPickle as pickle
from tt_log import logger
import Annotations as anno
VERSION = '20150417.01'
def main ():
logger.debug('version %s starting' % VERSION)
opt, args = getParms()
if opt.gtfpickle is not None:
handle = open (opt.gtfpickle, 'r')
pk = pickle.Unpickler (handle)
annotList = pk.load()
handle.close()
else:
annotList = anno.AnnotationList (opt.gtf)
geneList = annotList.getGene (opt.gene)
if geneList is None:
print 'gene %s not found in annotations' % opt.gene
elif len(geneList) != 1:
print 'there are %d occurrences of gene %s in annotations' % (len(geneList), opt.gene)
else:
geneEnt = geneList[0]
print 'gene: ',
printEnt (geneEnt)
for transEnt in geneEnt.getChildren():
print '\ntr: ',
printTran (transEnt)
for exonEnt in transEnt.getChildren():
print 'exon: ',
printEnt (exonEnt)
logger.debug('finished')
return
def printEnt (ent):
print '%-15s %9d %9d %6d' % (ent.name, ent.start, ent.end, ent.end-ent.start+1)
return
def printTran (ent):
print '%-15s %9d %9d %6d' % (ent.name, ent.start, ent.end, ent.end-ent.start+1),
if hasattr (ent, 'startcodon'):
print ' start: %9d' % ent.startcodon,
if hasattr (ent, 'stopcodon'):
print ' stop: %9d' % ent.stopcodon,
print
return
def getParms (): # use default input sys.argv[1:]
parser = optparse.OptionParser(usage='%prog [options] <fasta_file> ... ')
parser.add_option ('--gtf', help='annotations in gtf format')
parser.add_option ('--gtfpickle', help='annotations in pickled gtf format')
parser.add_option ('--gene', help='gene to print')
parser.set_defaults (gtf=None,
gtfpickle=None,
gene=None,
)
opt, args = parser.parse_args()
return opt, args
if __name__ == "__main__":
main()
| gpl-3.0 | -1,754,153,406,302,757,400 | 24.263736 | 94 | 0.579382 | false |
Midrya/chromium | rietveld.py | 1 | 26054 | # coding: utf-8
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Defines class Rietveld to easily access a rietveld instance.
Security implications:
The following hypothesis are made:
- Rietveld enforces:
- Nobody else than issue owner can upload a patch set
- Verifies the issue owner credentials when creating new issues
- A issue owner can't change once the issue is created
- A patch set cannot be modified
"""
import copy
import errno
import json
import logging
import re
import socket
import ssl
import sys
import time
import urllib
import urllib2
import urlparse
import patch
from third_party import upload
import third_party.oauth2client.client as oa2client
from third_party import httplib2
# Appengine replies with 302 when authentication fails (sigh.)
oa2client.REFRESH_STATUS_CODES.append(302)
upload.LOGGER.setLevel(logging.WARNING) # pylint: disable=E1103
class Rietveld(object):
"""Accesses rietveld."""
def __init__(
self, url, auth_config, email=None, extra_headers=None, maxtries=None):
self.url = url.rstrip('/')
self.rpc_server = upload.GetRpcServer(self.url, auth_config, email)
self._xsrf_token = None
self._xsrf_token_time = None
self._maxtries = maxtries or 40
def xsrf_token(self):
if (not self._xsrf_token_time or
(time.time() - self._xsrf_token_time) > 30*60):
self._xsrf_token_time = time.time()
self._xsrf_token = self.get(
'/xsrf_token',
extra_headers={'X-Requesting-XSRF-Token': '1'})
return self._xsrf_token
def get_pending_issues(self):
"""Returns an array of dict of all the pending issues on the server."""
# TODO: Convert this to use Rietveld::search(), defined below.
return json.loads(
self.get('/search?format=json&commit=2&closed=3&'
'keys_only=True&limit=1000&order=__key__'))['results']
def close_issue(self, issue):
"""Closes the Rietveld issue for this changelist."""
logging.info('closing issue %d' % issue)
self.post("/%d/close" % issue, [('xsrf_token', self.xsrf_token())])
def get_description(self, issue):
"""Returns the issue's description.
Converts any CRLF into LF and strip extraneous whitespace.
"""
return '\n'.join(self.get('/%d/description' % issue).strip().splitlines())
def get_issue_properties(self, issue, messages):
"""Returns all the issue's metadata as a dictionary."""
url = '/api/%d' % issue
if messages:
url += '?messages=true'
data = json.loads(self.get(url, retry_on_404=True))
data['description'] = '\n'.join(data['description'].strip().splitlines())
return data
def get_depends_on_patchset(self, issue, patchset):
"""Returns the patchset this patchset depends on if it exists."""
url = '/%d/patchset/%d/get_depends_on_patchset' % (issue, patchset)
resp = None
try:
resp = json.loads(self.post(url, []))
except (urllib2.HTTPError, ValueError):
# The get_depends_on_patchset endpoint does not exist on this Rietveld
# instance yet. Ignore the error and proceed.
# TODO(rmistry): Make this an error when all Rietveld instances have
# this endpoint.
pass
return resp
def get_patchset_properties(self, issue, patchset):
"""Returns the patchset properties."""
url = '/api/%d/%d' % (issue, patchset)
return json.loads(self.get(url))
def get_file_content(self, issue, patchset, item):
"""Returns the content of a new file.
Throws HTTP 302 exception if the file doesn't exist or is not a binary file.
"""
# content = 0 is the old file, 1 is the new file.
content = 1
url = '/%d/binary/%d/%d/%d' % (issue, patchset, item, content)
return self.get(url)
def get_file_diff(self, issue, patchset, item):
"""Returns the diff of the file.
Returns a useless diff for binary files.
"""
url = '/download/issue%d_%d_%d.diff' % (issue, patchset, item)
return self.get(url)
def get_patch(self, issue, patchset):
"""Returns a PatchSet object containing the details to apply this patch."""
props = self.get_patchset_properties(issue, patchset) or {}
out = []
for filename, state in props.get('files', {}).iteritems():
logging.debug('%s' % filename)
# If not status, just assume it's a 'M'. Rietveld often gets it wrong and
# just has status: null. Oh well.
status = state.get('status') or 'M'
if status[0] not in ('A', 'D', 'M', 'R'):
raise patch.UnsupportedPatchFormat(
filename, 'Change with status \'%s\' is not supported.' % status)
svn_props = self.parse_svn_properties(
state.get('property_changes', ''), filename)
if state.get('is_binary'):
if status[0] == 'D':
if status[0] != status.strip():
raise patch.UnsupportedPatchFormat(
filename, 'Deleted file shouldn\'t have property change.')
out.append(patch.FilePatchDelete(filename, state['is_binary']))
else:
content = self.get_file_content(issue, patchset, state['id'])
if not content:
# As a precaution due to a bug in upload.py for git checkout, refuse
# empty files. If it's empty, it's not a binary file.
raise patch.UnsupportedPatchFormat(
filename,
'Binary file is empty. Maybe the file wasn\'t uploaded in the '
'first place?')
out.append(patch.FilePatchBinary(
filename,
content,
svn_props,
is_new=(status[0] == 'A')))
continue
try:
diff = self.get_file_diff(issue, patchset, state['id'])
except urllib2.HTTPError, e:
if e.code == 404:
raise patch.UnsupportedPatchFormat(
filename, 'File doesn\'t have a diff.')
raise
# FilePatchDiff() will detect file deletion automatically.
p = patch.FilePatchDiff(filename, diff, svn_props)
out.append(p)
if status[0] == 'A':
# It won't be set for empty file.
p.is_new = True
if (len(status) > 1 and
status[1] == '+' and
not (p.source_filename or p.svn_properties)):
raise patch.UnsupportedPatchFormat(
filename, 'Failed to process the svn properties')
return patch.PatchSet(out)
@staticmethod
def parse_svn_properties(rietveld_svn_props, filename):
"""Returns a list of tuple [('property', 'newvalue')].
rietveld_svn_props is the exact format from 'svn diff'.
"""
rietveld_svn_props = rietveld_svn_props.splitlines()
svn_props = []
if not rietveld_svn_props:
return svn_props
# 1. Ignore svn:mergeinfo.
# 2. Accept svn:eol-style and svn:executable.
# 3. Refuse any other.
# \n
# Added: svn:ignore\n
# + LF\n
spacer = rietveld_svn_props.pop(0)
if spacer or not rietveld_svn_props:
# svn diff always put a spacer between the unified diff and property
# diff
raise patch.UnsupportedPatchFormat(
filename, 'Failed to parse svn properties.')
while rietveld_svn_props:
# Something like 'Added: svn:eol-style'. Note the action is localized.
# *sigh*.
action = rietveld_svn_props.pop(0)
match = re.match(r'^(\w+): (.+)$', action)
if not match or not rietveld_svn_props:
raise patch.UnsupportedPatchFormat(
filename,
'Failed to parse svn properties: %s, %s' % (action, svn_props))
if match.group(2) == 'svn:mergeinfo':
# Silently ignore the content.
rietveld_svn_props.pop(0)
continue
if match.group(1) not in ('Added', 'Modified'):
# Will fail for our French friends.
raise patch.UnsupportedPatchFormat(
filename, 'Unsupported svn property operation.')
if match.group(2) in ('svn:eol-style', 'svn:executable', 'svn:mime-type'):
# ' + foo' where foo is the new value. That's fragile.
content = rietveld_svn_props.pop(0)
match2 = re.match(r'^ \+ (.*)$', content)
if not match2:
raise patch.UnsupportedPatchFormat(
filename, 'Unsupported svn property format.')
svn_props.append((match.group(2), match2.group(1)))
return svn_props
def update_description(self, issue, description):
"""Sets the description for an issue on Rietveld."""
logging.info('new description for issue %d' % issue)
self.post('/%d/description' % issue, [
('description', description),
('xsrf_token', self.xsrf_token())])
def add_comment(self, issue, message, add_as_reviewer=False):
max_message = 10000
tail = '…\n(message too large)'
if len(message) > max_message:
message = message[:max_message-len(tail)] + tail
logging.info('issue %d; comment: %s' % (issue, message.strip()[:300]))
return self.post('/%d/publish' % issue, [
('xsrf_token', self.xsrf_token()),
('message', message),
('message_only', 'True'),
('add_as_reviewer', str(bool(add_as_reviewer))),
('send_mail', 'True'),
('no_redirect', 'True')])
def add_inline_comment(
self, issue, text, side, snapshot, patchset, patchid, lineno):
logging.info('add inline comment for issue %d' % issue)
return self.post('/inline_draft', [
('issue', str(issue)),
('text', text),
('side', side),
('snapshot', snapshot),
('patchset', str(patchset)),
('patch', str(patchid)),
('lineno', str(lineno))])
def set_flag(self, issue, patchset, flag, value):
return self.post('/%d/edit_flags' % issue, [
('last_patchset', str(patchset)),
('xsrf_token', self.xsrf_token()),
(flag, str(value))])
def search(
self,
owner=None, reviewer=None,
base=None,
closed=None, private=None, commit=None,
created_before=None, created_after=None,
modified_before=None, modified_after=None,
per_request=None, keys_only=False,
with_messages=False):
"""Yields search results."""
# These are expected to be strings.
string_keys = {
'owner': owner,
'reviewer': reviewer,
'base': base,
'created_before': created_before,
'created_after': created_after,
'modified_before': modified_before,
'modified_after': modified_after,
}
# These are either None, False or True.
three_state_keys = {
'closed': closed,
'private': private,
'commit': commit,
}
url = '/search?format=json'
# Sort the keys mainly to ease testing.
for key in sorted(string_keys):
value = string_keys[key]
if value:
url += '&%s=%s' % (key, urllib2.quote(value))
for key in sorted(three_state_keys):
value = three_state_keys[key]
if value is not None:
url += '&%s=%d' % (key, int(value) + 1)
if keys_only:
url += '&keys_only=True'
if with_messages:
url += '&with_messages=True'
if per_request:
url += '&limit=%d' % per_request
cursor = ''
while True:
output = self.get(url + cursor)
if output.startswith('<'):
# It's an error message. Return as no result.
break
data = json.loads(output) or {}
if not data.get('results'):
break
for i in data['results']:
yield i
cursor = '&cursor=%s' % data['cursor']
def trigger_try_jobs(
self, issue, patchset, reason, clobber, revision, builders_and_tests,
master=None, category='cq'):
"""Requests new try jobs.
|builders_and_tests| is a map of builders: [tests] to run.
|master| is the name of the try master the builders belong to.
|category| is used to distinguish regular jobs and experimental jobs.
Returns the keys of the new TryJobResult entites.
"""
params = [
('reason', reason),
('clobber', 'True' if clobber else 'False'),
('builders', json.dumps(builders_and_tests)),
('xsrf_token', self.xsrf_token()),
('category', category),
]
if revision:
params.append(('revision', revision))
if master:
# Temporarily allow empty master names for old configurations. The try
# job will not be associated with a master name on rietveld. This is
# going to be deprecated.
params.append(('master', master))
return self.post('/%d/try/%d' % (issue, patchset), params)
def trigger_distributed_try_jobs(
self, issue, patchset, reason, clobber, revision, masters,
category='cq'):
"""Requests new try jobs.
|masters| is a map of masters: map of builders: [tests] to run.
|category| is used to distinguish regular jobs and experimental jobs.
"""
for (master, builders_and_tests) in masters.iteritems():
self.trigger_try_jobs(
issue, patchset, reason, clobber, revision, builders_and_tests,
master, category)
def get_pending_try_jobs(self, cursor=None, limit=100):
"""Retrieves the try job requests in pending state.
Returns a tuple of the list of try jobs and the cursor for the next request.
"""
url = '/get_pending_try_patchsets?limit=%d' % limit
extra = ('&cursor=' + cursor) if cursor else ''
data = json.loads(self.get(url + extra))
return data['jobs'], data['cursor']
def get(self, request_path, **kwargs):
kwargs.setdefault('payload', None)
return self._send(request_path, **kwargs)
def post(self, request_path, data, **kwargs):
ctype, body = upload.EncodeMultipartFormData(data, [])
return self._send(request_path, payload=body, content_type=ctype, **kwargs)
def _send(self, request_path, retry_on_404=False, **kwargs):
"""Sends a POST/GET to Rietveld. Returns the response body."""
# rpc_server.Send() assumes timeout=None by default; make sure it's set
# to something reasonable.
kwargs.setdefault('timeout', 15)
logging.debug('POSTing to %s, args %s.', request_path, kwargs)
try:
# Sadly, upload.py calls ErrorExit() which does a sys.exit(1) on HTTP
# 500 in AbstractRpcServer.Send().
old_error_exit = upload.ErrorExit
def trap_http_500(msg):
"""Converts an incorrect ErrorExit() call into a HTTPError exception."""
m = re.search(r'(50\d) Server Error', msg)
if m:
# Fake an HTTPError exception. Cheezy. :(
raise urllib2.HTTPError(
request_path, int(m.group(1)), msg, None, None)
old_error_exit(msg)
upload.ErrorExit = trap_http_500
for retry in xrange(self._maxtries):
try:
logging.debug('%s' % request_path)
result = self.rpc_server.Send(request_path, **kwargs)
# Sometimes GAE returns a HTTP 200 but with HTTP 500 as the content.
# How nice.
return result
except urllib2.HTTPError, e:
if retry >= (self._maxtries - 1):
raise
flake_codes = [500, 502, 503]
if retry_on_404:
flake_codes.append(404)
if e.code not in flake_codes:
raise
except urllib2.URLError, e:
if retry >= (self._maxtries - 1):
raise
if (not 'Name or service not known' in e.reason and
not 'EOF occurred in violation of protocol' in e.reason and
# On windows we hit weird bug http://crbug.com/537417
# with message '[Errno 10060] A connection attempt failed...'
not (sys.platform.startswith('win') and
isinstance(e.reason, socket.error) and
e.reason.errno == errno.ETIMEDOUT
)
):
# Usually internal GAE flakiness.
raise
except ssl.SSLError, e:
if retry >= (self._maxtries - 1):
raise
if not 'timed out' in str(e):
raise
# If reaching this line, loop again. Uses a small backoff.
time.sleep(min(10, 1+retry*2))
except urllib2.HTTPError as e:
print 'Request to %s failed: %s' % (e.geturl(), e.read())
raise
finally:
upload.ErrorExit = old_error_exit
# DEPRECATED.
Send = get
class OAuthRpcServer(object):
def __init__(self,
host,
client_email,
client_private_key,
private_key_password='notasecret',
user_agent=None,
timeout=None,
extra_headers=None):
"""Wrapper around httplib2.Http() that handles authentication.
client_email: email associated with the service account
client_private_key: encrypted private key, as a string
private_key_password: password used to decrypt the private key
"""
# Enforce https
host_parts = urlparse.urlparse(host)
if host_parts.scheme == 'https': # fine
self.host = host
elif host_parts.scheme == 'http':
upload.logging.warning('Changing protocol to https')
self.host = 'https' + host[4:]
else:
msg = 'Invalid url provided: %s' % host
upload.logging.error(msg)
raise ValueError(msg)
self.host = self.host.rstrip('/')
self.extra_headers = extra_headers or {}
if not oa2client.HAS_OPENSSL:
logging.error("No support for OpenSSL has been found, "
"OAuth2 support requires it.")
logging.error("Installing pyopenssl will probably solve this issue.")
raise RuntimeError('No OpenSSL support')
self.creds = oa2client.SignedJwtAssertionCredentials(
client_email,
client_private_key,
'https://www.googleapis.com/auth/userinfo.email',
private_key_password=private_key_password,
user_agent=user_agent)
self._http = self.creds.authorize(httplib2.Http(timeout=timeout))
def Send(self,
request_path,
payload=None,
content_type='application/octet-stream',
timeout=None,
extra_headers=None,
**kwargs):
"""Send a POST or GET request to the server.
Args:
request_path: path on the server to hit. This is concatenated with the
value of 'host' provided to the constructor.
payload: request is a POST if not None, GET otherwise
timeout: in seconds
extra_headers: (dict)
"""
# This method signature should match upload.py:AbstractRpcServer.Send()
method = 'GET'
headers = self.extra_headers.copy()
headers.update(extra_headers or {})
if payload is not None:
method = 'POST'
headers['Content-Type'] = content_type
prev_timeout = self._http.timeout
try:
if timeout:
self._http.timeout = timeout
# TODO(pgervais) implement some kind of retry mechanism (see upload.py).
url = self.host + request_path
if kwargs:
url += "?" + urllib.urlencode(kwargs)
# This weird loop is there to detect when the OAuth2 token has expired.
# This is specific to appengine *and* rietveld. It relies on the
# assumption that a 302 is triggered only by an expired OAuth2 token. This
# prevents any usage of redirections in pages accessed this way.
# This variable is used to make sure the following loop runs only twice.
redirect_caught = False
while True:
try:
ret = self._http.request(url,
method=method,
body=payload,
headers=headers,
redirections=0)
except httplib2.RedirectLimit:
if redirect_caught or method != 'GET':
logging.error('Redirection detected after logging in. Giving up.')
raise
redirect_caught = True
logging.debug('Redirection detected. Trying to log in again...')
self.creds.access_token = None
continue
break
return ret[1]
finally:
self._http.timeout = prev_timeout
class JwtOAuth2Rietveld(Rietveld):
"""Access to Rietveld using OAuth authentication.
This class is supposed to be used only by bots, since this kind of
access is restricted to service accounts.
"""
# The parent__init__ is not called on purpose.
# pylint: disable=W0231
def __init__(self,
url,
client_email,
client_private_key_file,
private_key_password=None,
extra_headers=None,
maxtries=None):
if private_key_password is None: # '' means 'empty password'
private_key_password = 'notasecret'
self.url = url.rstrip('/')
bot_url = self.url
if self.url.endswith('googleplex.com'):
bot_url = self.url + '/bots'
with open(client_private_key_file, 'rb') as f:
client_private_key = f.read()
logging.info('Using OAuth login: %s' % client_email)
self.rpc_server = OAuthRpcServer(bot_url,
client_email,
client_private_key,
private_key_password=private_key_password,
extra_headers=extra_headers or {})
self._xsrf_token = None
self._xsrf_token_time = None
self._maxtries = maxtries or 40
class CachingRietveld(Rietveld):
"""Caches the common queries.
Not to be used in long-standing processes, like the commit queue.
"""
def __init__(self, *args, **kwargs):
super(CachingRietveld, self).__init__(*args, **kwargs)
self._cache = {}
def _lookup(self, function_name, args, update):
"""Caches the return values corresponding to the arguments.
It is important that the arguments are standardized, like None vs False.
"""
function_cache = self._cache.setdefault(function_name, {})
if args not in function_cache:
function_cache[args] = update(*args)
return copy.deepcopy(function_cache[args])
def get_description(self, issue):
return self._lookup(
'get_description',
(issue,),
super(CachingRietveld, self).get_description)
def get_issue_properties(self, issue, messages):
"""Returns the issue properties.
Because in practice the presubmit checks often ask without messages first
and then with messages, always ask with messages and strip off if not asked
for the messages.
"""
# It's a tad slower to request with the message but it's better than
# requesting the properties twice.
data = self._lookup(
'get_issue_properties',
(issue, True),
super(CachingRietveld, self).get_issue_properties)
if not messages:
# Assumes self._lookup uses deepcopy.
del data['messages']
return data
def get_patchset_properties(self, issue, patchset):
return self._lookup(
'get_patchset_properties',
(issue, patchset),
super(CachingRietveld, self).get_patchset_properties)
class ReadOnlyRietveld(object):
"""
Only provides read operations, and simulates writes locally.
Intentionally do not inherit from Rietveld to avoid any write-issuing
logic to be invoked accidentally.
"""
# Dictionary of local changes, indexed by issue number as int.
_local_changes = {}
def __init__(self, *args, **kwargs):
# We still need an actual Rietveld instance to issue reads, just keep
# it hidden.
self._rietveld = Rietveld(*args, **kwargs)
@classmethod
def _get_local_changes(cls, issue):
"""Returns dictionary of local changes for |issue|, if any."""
return cls._local_changes.get(issue, {})
@property
def url(self):
return self._rietveld.url
def get_pending_issues(self):
pending_issues = self._rietveld.get_pending_issues()
# Filter out issues we've closed or unchecked the commit checkbox.
return [issue for issue in pending_issues
if not self._get_local_changes(issue).get('closed', False) and
self._get_local_changes(issue).get('commit', True)]
def close_issue(self, issue): # pylint:disable=R0201
logging.info('ReadOnlyRietveld: closing issue %d' % issue)
ReadOnlyRietveld._local_changes.setdefault(issue, {})['closed'] = True
def get_issue_properties(self, issue, messages):
data = self._rietveld.get_issue_properties(issue, messages)
data.update(self._get_local_changes(issue))
return data
def get_patchset_properties(self, issue, patchset):
return self._rietveld.get_patchset_properties(issue, patchset)
def get_depends_on_patchset(self, issue, patchset):
return self._rietveld.get_depends_on_patchset(issue, patchset)
def get_patch(self, issue, patchset):
return self._rietveld.get_patch(issue, patchset)
def update_description(self, issue, description): # pylint:disable=R0201
logging.info('ReadOnlyRietveld: new description for issue %d: %s' %
(issue, description))
def add_comment(self, # pylint:disable=R0201
issue,
message,
add_as_reviewer=False):
logging.info('ReadOnlyRietveld: posting comment "%s" to issue %d' %
(message, issue))
def set_flag(self, issue, patchset, flag, value): # pylint:disable=R0201
logging.info('ReadOnlyRietveld: setting flag "%s" to "%s" for issue %d' %
(flag, value, issue))
ReadOnlyRietveld._local_changes.setdefault(issue, {})[flag] = value
def trigger_try_jobs( # pylint:disable=R0201
self, issue, patchset, reason, clobber, revision, builders_and_tests,
master=None, category='cq'):
logging.info('ReadOnlyRietveld: triggering try jobs %r for issue %d' %
(builders_and_tests, issue))
def trigger_distributed_try_jobs( # pylint:disable=R0201
self, issue, patchset, reason, clobber, revision, masters,
category='cq'):
logging.info('ReadOnlyRietveld: triggering try jobs %r for issue %d' %
(masters, issue))
| bsd-3-clause | 759,426,349,750,002,600 | 34.253045 | 80 | 0.620183 | false |
jezdez/django-hosts | tests/test_defaults.py | 3 | 3037 | from django.core.exceptions import ImproperlyConfigured
from django_hosts.defaults import host, patterns
from django_hosts.resolvers import get_host_patterns
from .base import HostsTestCase
class PatternsTests(HostsTestCase):
def test_pattern(self):
host_patterns = patterns('',
host(r'api', 'api.urls', name='api'),
)
self.assertEqual(len(host_patterns), 1)
self.assertTrue(isinstance(host_patterns[0], host))
self.assertEqual(repr(host_patterns[0]),
"<host api: regex='api' urlconf='api.urls' "
"scheme='//' port=''>")
def test_pattern_as_tuple(self):
host_patterns = patterns('',
(r'api', 'api.urls', 'api'),
)
self.assertEqual(len(host_patterns), 1)
self.assertTrue(isinstance(host_patterns[0], host))
def test_pattern_with_duplicate(self):
api_host = host(r'api', 'api.urls', name='api')
self.assertRaises(ImproperlyConfigured,
patterns, '', api_host, api_host)
def test_pattern_with_prefix(self):
host_patterns = patterns('mysite',
host(r'api', 'api.urls', name='api'),
)
self.assertEqual(len(host_patterns), 1)
self.assertTrue(isinstance(host_patterns[0], host))
self.assertEqual(host_patterns[0].urlconf, 'mysite.api.urls')
class HostTests(HostsTestCase):
def test_host(self):
api_host = host(r'api', 'api.urls', name='api')
self.assertTrue(isinstance(api_host, host))
def test_host_prefix(self):
api_host = host(r'api', 'api.urls', name='api', prefix='spam.eggs')
self.assertEqual(api_host.urlconf, 'spam.eggs.api.urls')
def test_host_string_callback(self):
api_host = host(r'api', 'api.urls', name='api',
callback='django_hosts.resolvers.get_host_patterns')
self.assertEqual(api_host.callback, get_host_patterns)
def test_host_callable_callback(self):
api_host = host(r'api', 'api.urls', name='api',
callback=get_host_patterns)
self.assertEqual(api_host.callback, get_host_patterns)
def test_host_nonexistent_callback(self):
api_host = host(r'api', 'api.urls', name='api',
callback='whatever.non_existent')
self.assertRaisesMessage(ImproperlyConfigured,
"Could not import 'whatever'. Error was: No module named",
lambda: api_host.callback)
api_host = host(r'api', 'api.urls', name='api',
callback='django_hosts.non_existent')
self.assertRaisesMessage(ImproperlyConfigured,
"Could not import 'django_hosts.non_existent'. "
"Callable does not exist in module",
lambda: api_host.callback)
api_host = host(r'api', 'api.urls', name='api',
callback='tests.broken_module.yeah_yeah')
self.assertRaises(ImproperlyConfigured, lambda: api_host.callback)
| bsd-3-clause | 7,119,394,531,383,091,000 | 38.441558 | 76 | 0.603556 | false |
gonicus/gosa | backend/src/gosa/backend/plugins/samba/logonhours.py | 1 | 2755 | # This file is part of the GOsa framework.
#
# http://gosa-project.org
#
# Copyright:
# (C) 2016 GONICUS GmbH, Germany, http://www.gonicus.de
#
# See the LICENSE file in the project's top-level directory for details.
import time
from gosa.backend.objects.types import AttributeType
class SambaLogonHoursAttribute(AttributeType):
"""
This is a special object-attribute-type for sambaLogonHours.
This call can convert sambaLogonHours to a UnicodeString and vice versa.
It is used in the samba-object definition file.
"""
__alias__ = "SambaLogonHours"
def values_match(self, value1, value2):
return str(value1) == str(value2)
def is_valid_value(self, value):
if len(value):
try:
# Check if each week day contains 24 values.
if type(value[0]) is not str or len(value[0]) != 168 or len(set(value[0]) - set('01')):
return False
return True
except:
return False
def _convert_to_unicodestring(self, value):
"""
This method is a converter used when values gets read from or written to the backend.
Converts the 'SambaLogonHours' object-type into a 'UnicodeString'-object.
"""
if len(value):
# Combine the binary strings
lstr = value[0]
# New reverse every 8 bit part, and toggle high- and low-tuple (4Bits)
new = ""
for i in range(0, 21):
n = lstr[i * 8:((i + 1) * 8)]
n = n[0:4] + n[4:]
n = n[::-1]
n = str(hex(int(n, 2)))[2::].rjust(2, '0')
new += n
value = [new.upper()]
return value
def _convert_from_string(self, value):
return self._convert_from_unicodestring(value)
def _convert_from_unicodestring(self, value):
"""
This method is a converter used when values gets read from or written to the backend.
Converts a 'UnicodeString' attribute into the 'SambaLogonHours' object-type.
"""
if len(value):
# Convert each hex-pair into binary values.
# Then reverse the binary result and switch high and low pairs.
value = value[0]
lstr = ""
for i in range(0, 42, 2):
n = (bin(int(value[i:i + 2], 16))[2::]).rjust(8, '0')
n = n[::-1]
lstr += n[0:4] + n[4:]
# Shift lster by timezone offset
shift_by = int((168 + (time.timezone/3600)) % 168)
lstr = lstr[shift_by:] + lstr[:shift_by]
# Parse result into more readable value
value = [lstr]
return value
| lgpl-2.1 | 5,620,265,410,477,604,000 | 29.611111 | 103 | 0.549546 | false |
mice-software/maus | tests/integration/test_simulation/test_beam_maker/binomial_beam_config.py | 1 | 4151 | # This file is part of MAUS: http://micewww.pp.rl.ac.uk:8080/projects/maus
#
# MAUS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MAUS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MAUS. If not, see <http://www.gnu.org/licenses/>.
"""
Configuration to generate a beam distribution with binomial distribution in
the spill and various distributions for difference particle types
"""
#pylint: disable = C0103, R0801
import os
mrd = os.environ["MAUS_ROOT_DIR"]
simulation_geometry_filename = os.path.join(
mrd, "tests", "integration", "test_simulation", "test_beam_maker",
"BeamTest.dat"
)
output_root_file_name = os.path.join(mrd, "tmp", "test_beammaker_output.root")
input_root_file_name = output_root_file_name # for conversion
spill_generator_number_of_spills = 1000
verbose_level = 1
beam = {
"particle_generator":"binomial", # routine for generating empty primaries
"binomial_n":20, # number of coin tosses
"binomial_p":0.1, # probability of making a particle on each toss
"random_seed":5, # random seed for beam generation; controls also how the MC
# seeds are generated
"definitions":[
##### MUONS #######
{
"reference":{
"position":{"x":0.0, "y":0.0, "z":3.0},
"momentum":{"x":0.0, "y":0.0, "z":1.0},
"spin":{"x":0.0, "y":0.0, "z":1.0},
"particle_id":-13,
"energy":226.0,
"time":2.e6,
"random_seed":0
}, # reference particle
"random_seed_algorithm":"incrementing_random", # algorithm for seeding MC
"weight":90., # probability of generating a particle
"transverse":{
"transverse_mode":"penn",
"emittance_4d":6.,
"beta_4d":333.,
"alpha_4d":1.,
"normalised_angular_momentum":2.,
"bz":4.e-3
},
"longitudinal":{
"longitudinal_mode":"sawtooth_time",
"momentum_variable":"p",
"sigma_p":25.,
"t_start":-1.e6,
"t_end":+1.e6},
"coupling":{"coupling_mode":"none"}
},
##### PIONS #####
{ # as above...
"reference":{
"position":{"x":0.0, "y":-0.0, "z":0.0},
"momentum":{"x":0.0, "y":0.0, "z":1.0},
"spin":{"x":0.0, "y":0.0, "z":1.0},
"particle_id":211, "energy":285.0, "time":0.0, "random_seed":10
},
"random_seed_algorithm":"incrementing_random",
"weight":2.,
"transverse":{"transverse_mode":"constant_solenoid", "emittance_4d":6.,
"normalised_angular_momentum":0.1, "bz":4.e-3},
"longitudinal":{"longitudinal_mode":"uniform_time",
"momentum_variable":"p",
"sigma_p":25.,
"t_start":-1.e6,
"t_end":+1.e6},
"coupling":{"coupling_mode":"none"}
},
##### ELECTRONS #####
{ # as above...
"reference":{
"position":{"x":0.0, "y":-0.0, "z":0.0},
"momentum":{"x":0.0, "y":0.0, "z":1.0},
"spin":{"x":0.0, "y":0.0, "z":1.0},
"particle_id":-11, "energy":200.0, "time":0.0, "random_seed":10
},
"random_seed_algorithm":"incrementing_random",
"weight":8.,
"transverse":{"transverse_mode":"constant_solenoid", "emittance_4d":6.,
"normalised_angular_momentum":0.1, "bz":4.e-3},
"longitudinal":{"longitudinal_mode":"uniform_time",
"momentum_variable":"p",
"sigma_p":25.,
"t_start":-2.e6,
"t_end":+1.e6},
"coupling":{"coupling_mode":"none"}
}]
}
| gpl-3.0 | 8,497,676,633,273,780,000 | 37.082569 | 80 | 0.544688 | false |
berkmancenter/mediacloud | apps/common/tests/python/mediawords/util/test_extract_article_html_from_page_html.py | 1 | 3359 | import multiprocessing
from typing import Union
from unittest import TestCase
from mediawords.test.hash_server import HashServer
from mediawords.util.config.common import CommonConfig
from mediawords.util.extract_article_from_page import extract_article_html_from_page_html
from mediawords.util.network import random_unused_port
from mediawords.util.parse_json import encode_json
def test_extract_article_html_from_page_html():
"""Basic test."""
content = """
<html>
<head>
<title>I'm a test</title>
</head>
<body>
<p>Hi test, I'm dad!</p>
</body>
</html>
"""
response = extract_article_html_from_page_html(content=content)
assert response
assert 'extracted_html' in response
assert 'extractor_version' in response
assert "I'm a test" in response['extracted_html']
assert "Hi test, I'm dad!" in response['extracted_html']
assert 'readabilityBody' in response['extracted_html'] # <body id="readabilityBody">
assert "readability-lxml" in response['extractor_version']
class TestExtractConnectionErrors(TestCase):
"""Extract the page but fail the first response."""
__slots__ = [
'is_first_response',
]
expected_extracted_text = "Extraction worked the second time!"
def __extract_but_initially_fail(self, _: HashServer.Request) -> Union[str, bytes]:
"""Page callback that fails initially but then changes its mind."""
with self.is_first_response.get_lock():
if self.is_first_response.value == 1:
self.is_first_response.value = 0
# Closest to a connection error that we can get
raise Exception("Whoops!")
else:
response = ""
response += "HTTP/1.0 200 OK\r\n"
response += "Content-Type: application/json; charset=UTF-8\r\n"
response += "\r\n"
response += encode_json({
'extracted_html': self.expected_extracted_text,
'extractor_version': 'readability-lxml',
})
return response
def test_extract_article_html_from_page_html_connection_errors(self):
"""Try extracting with connection errors."""
# Use multiprocessing.Value() because request might be handled in a fork
self.is_first_response = multiprocessing.Value('i', 1)
pages = {
'/extract': {
'callback': self.__extract_but_initially_fail,
}
}
port = random_unused_port()
hs = HashServer(port=port, pages=pages)
hs.start()
class MockExtractorCommonConfig(CommonConfig):
"""Mock configuration which points to our unstable extractor."""
def extractor_api_url(self) -> str:
return f'http://localhost:{port}/extract'
extractor_response = extract_article_html_from_page_html(content='whatever', config=MockExtractorCommonConfig())
hs.stop()
assert extractor_response
assert 'extracted_html' in extractor_response
assert 'extractor_version' in extractor_response
assert extractor_response['extracted_html'] == self.expected_extracted_text
assert not self.is_first_response.value, "Make sure the initial extractor call failed."
| agpl-3.0 | -8,327,418,223,361,509,000 | 32.257426 | 120 | 0.627865 | false |
amagnus/pulsegig | app/models.py | 1 | 1894 | from django.db import models
from django.contrib.auth.models import User
class Guy(models.Model):
user = models.OneToOneField(User, primary_key=True)
cell = models.CharField(max_length=15)
metroarea_name = models.CharField(max_length=30, default=None, null=True)
metroareaID = models.IntegerField(default=None, null=True)
created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.user
class Band(models.Model):
name = models.CharField(max_length=100)
genre = models.CharField(max_length=100)
skID = models.IntegerField()
created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.name
class SimilarBand(models.Model):
band_input = models.ForeignKey(Band, related_name='band_input')
band_suggest = models.ForeignKey(Band, related_name='band_suggest')
disabled = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.band_input.name
class Alert(models.Model):
user = models.ForeignKey(User)
band = models.ForeignKey(Band)
disabled = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.band.name
class AlertLog(models.Model):
user = models.ForeignKey(User)
band = models.ForeignKey(Band)
eventskID = models.IntegerField(default=None)
showDate = models.DateField()
showURL = models.CharField(max_length=255)
is_similar = models.BooleanField(default=False)
send_on = models.DateTimeField()
has_sent = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.band.name
| mit | -6,112,383,756,835,243,000 | 30.566667 | 77 | 0.705913 | false |
karstenw/FMPLayoutExporter | fmpa10.py | 1 | 5014 | version = 1.1
path = u'/Applications/FileMaker/FileMaker Pro 11 Advanced/FileMaker Pro Advanced.app'
classes = \
[('application', 'capp'),
('window', 'cwin'),
('document', 'docu'),
('database', 'cDB '),
('table', 'cTBL'),
('FileMaker_script', 'cSCP'),
('layout', 'ctbl'),
('field', 'ccol'),
('record', 'crow'),
('cell', 'ccel'),
('repetition', 'cREP'),
('request', 'cRQT'),
('menu_item', 'cmen'),
('menu', 'cmnu')]
enums = \
[('table', 'TABL'),
('view', 'VIEW'),
('read_only', 'nmod'),
('formulas_protected', 'fpro'),
('read_write', 'modf'),
('no_access', '\x00\x00\x00\x00'),
('read', '\x00\x00\x00\x01'),
('write', '\x00\x00\x00\x02'),
('update', '\x00\x00\x00\x04'),
('create', '\x00\x00\x00\x08'),
('delete', '\x00\x00\x00\x10'),
('read_write', '\x00\x00\x00\x03'),
('read_update', '\x00\x00\x00\x05'),
('read_create', '\x00\x00\x00\t'),
('read_delete', '\x00\x00\x00\x11'),
('write_update', '\x00\x00\x00\x06'),
('write_create', '\x00\x00\x00\n'),
('write_delete', '\x00\x00\x00\x12'),
('update_create', '\x00\x00\x00\x0c'),
('update_delete', '\x00\x00\x00\x14'),
('write_delete', '\x00\x00\x00\x18'),
('read_write_update', '\x00\x00\x00\x07'),
('read_write_create', '\x00\x00\x00\x0b'),
('read_write_delete', '\x00\x00\x00\x13'),
('write_update_create', '\x00\x00\x00\x0e'),
('write_update_delete', '\x00\x00\x00\x16'),
('update_create_delete', '\x00\x00\x00\x1c'),
('read_create_delete', '\x00\x00\x00\x19'),
('read_update_delete', '\x00\x00\x00\x15'),
('write_create_delete', '\x00\x00\x00\x1a'),
('read_update_create', '\x00\x00\x00\r'),
('no_delete', '\x00\x00\x00\x0f'),
('no_create', '\x00\x00\x00\x17'),
('no_update', '\x00\x00\x00\x1b'),
('no_read', '\x00\x00\x00\x1e'),
('no_write', '\x00\x00\x00\x1d'),
('full', '\x00\x00\x00\x1f'),
('ascending', '\x00\x00\x00\x00'),
('descending', '\x00\x00\x00\x01'),
('custom', '\x00\x00\x00\x04'),
('sum', 'TOTL'),
('count', 'CONT'),
('mean', 'MEAN'),
('standard_deviation', 'STDV'),
('average', 'AVRG'),
('minimum', 'MIN '),
('maximum', 'MAX '),
('unlocked', 'NOLK'),
('shared_lock', 'SHLK'),
('exclusive_lock', 'EXLK'),
('false', 'fals'),
('sharing_hidden', 'mltH'),
('true', 'true'),
('single', 'rSgl'),
('repeated', 'rFxd'),
('guest', 'pGST'),
('before_', 'befo'),
('after_', 'afte'),
('beginning_', 'bgng'),
('end_', 'end '),
('replace', 'rplc'),
('index', 'indx'),
('named', 'name'),
('ID_', 'ID ')]
properties = \
[('best_type', 'pbst'),
('class_', 'pcls'),
('default_type', 'deft'),
('frontmost', 'pisf'),
('name', 'pnam'),
('version', 'vers'),
('bounds', 'pbnd'),
('visible', 'pvis'),
('index', 'pidx'),
('floating', 'isfl'),
('zoomable', 'iszm'),
('zoomed', 'pzum'),
('modal', 'pmod'),
('resizable', 'prsz'),
('has_close_box', 'hclb'),
('has_title_bar', 'ptit'),
('current_layout', 'pCLY'),
('current_record', 'pCRW'),
('current_table', 'pCTB'),
('current_cell', 'pCCL'),
('modified', 'imod'),
('multiuser', 'pMUr'),
('lock', 'pLCK'),
('access', 'pACS'),
('ID_', 'ID '),
('protection', 'ppro'),
('kind', 'pKND'),
('choices', 'pCHS'),
('formula', 'pfor'),
('nulls_OK', 'pNLS'),
('repeats', 'pRPT'),
('repeat_size', 'pRPS'),
('unique_value', 'pUNQ'),
('globalValue', 'pGLL'),
('cellValue', 'vlue'),
('omitted', 'pOMT'),
('enabled', 'enbl'),
('item_number', 'itmn'),
('checked', 'pCHK')]
elements = \
[('applications', 'capp'),
('windows', 'cwin'),
('documents', 'docu'),
('databases', 'cDB '),
('tables', 'cTBL'),
('FileMaker_scripts', 'cSCP'),
('layouts', 'ctbl'),
('fields', 'ccol'),
('records', 'crow'),
('cells', 'ccel'),
('repetitions', 'cREP'),
('requests', 'cRQT'),
('menu_items', 'cmen'),
('menus', 'cmnu')]
commands = \
[('getURL', 'GURLGURL', [('for_accounts', 'pACT')]),
('exists', 'coredoex', []),
('show', 'miscmvis', []),
('close', 'coreclos', []),
('redo', 'miscredo', []),
('find', 'FMPRFIND', []),
('quit', 'aevtquit', []),
('cut', 'misccut ', []),
('get_remote_URL', 'FMPROPRM', []),
('open', 'aevtodoc', [('with_passwords', 'pPAS'), ('for_Accounts', 'pACT')]),
('create',
'corecrel',
[('new', 'kocl'),
('at', 'insh'),
('with_data', 'data'),
('with_properties', 'prdt')]),
('get_data', 'coregetd', [('as_', 'rtyp')]),
('event_info', 'coregtei', [('in_', 'wrcd')]),
('print_',
'aevtpdoc',
[('with_password', 'pPAS'),
('for_Accounts', 'pACT'),
('from_page', 'StPg'),
('to_page', 'NdPg'),
('with_copies', 'Cpis')]),
('duplicate', 'coreclon', [('to', 'insh')]),
('save', 'coresave', []),
('data_size', 'coredsiz', [('as_', 'rtyp')]),
('go_to', 'FMPRGOTO', []),
('sort', 'DATASORT', [('by', 'SRTE'), ('in_order', 'SRTT')]),
('undo', 'miscundo', []),
('set_data', 'coresetd', [('to', 'data')]),
('copy', 'misccopy', []),
('paste', 'miscpast', []),
('count', 'corecnte', [('class_', 'kocl')]),
('do_script', 'miscdosc', []),
('class_info', 'coreqobj', [('in_', 'wrcd')]),
('do_menu', 'miscmenu', [('menu_named', 'menn')]),
('delete', 'coredelo', [])]
| bsd-2-clause | 5,253,162,485,293,056,000 | 26.549451 | 86 | 0.52513 | false |
PyBossa/pybossa | pybossa/auth/token.py | 1 | 1271 | # -*- coding: utf8 -*-
# This file is part of PYBOSSA.
#
# Copyright (C) 2015 Scifabric LTD.
#
# PYBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PYBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>.
class TokenAuth(object):
_specific_actions = []
@property
def specific_actions(self):
return self._specific_actions
def can(self, user, action, _, token=None):
action = ''.join(['_', action])
return getattr(self, action)(user, token)
def _create(self, user, token=None):
return False
def _read(self, user, token=None):
return not user.is_anonymous()
def _update(self, user, token):
return False
def _delete(self, user, token):
return False
| agpl-3.0 | 7,030,215,194,736,983,000 | 30 | 77 | 0.683714 | false |
ekumenlabs/terminus | terminus/generators/rndf_id_mapper.py | 1 | 2695 | """
Copyright (C) 2017 Open Source Robotics Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from city_visitor import CityVisitor
from models.polyline_geometry import PolylineGeometry
class RNDFIdMapper(CityVisitor):
"""Simple city visitor that generates the RNDF ids for segments,
lanes and waypoints. Ids and objects are stored in two dictionaries,
so we can later perform lookups in either way"""
# Note: For the time being we treat streets and trunks in the same way,
# hence generating a single lane for any of them. This will change in the
# future, when we properly support multi-lanes trunks.
def run(self):
self.segment_id = 0
self.waypoint_id = 0
self.lane_id = 0
self.object_to_id_level_1 = {}
self.object_to_id_level_2 = {}
self.id_to_object = {}
super(RNDFIdMapper, self).run()
def id_for(self, object):
try:
return self.object_to_id_level_1[id(object)]
except KeyError:
return self.object_to_id_level_2[object]
def object_for(self, id):
return self.id_to_object[id]
def map_road(self, road):
self.segment_id = self.segment_id + 1
self.lane_id = 0
self._register(str(self.segment_id), road)
def start_street(self, street):
self.map_road(street)
def start_trunk(self, trunk):
self.map_road(trunk)
def start_lane(self, lane):
self.lane_id = self.lane_id + 1
rndf_lane_id = str(self.segment_id) + '.' + str(self.lane_id)
self._register(rndf_lane_id, lane)
self.waypoint_id = 0
for waypoint in lane.waypoints_for(PolylineGeometry):
self.waypoint_id = self.waypoint_id + 1
rndf_waypoint_id = rndf_lane_id + '.' + str(self.waypoint_id)
self._register(rndf_waypoint_id, waypoint)
def _register(self, rndf_id, object):
"""We do some caching by id, to avoid computing hashes if they are
expensive, but keep the hash-based dict as a fallback"""
self.object_to_id_level_1[id(object)] = rndf_id
self.object_to_id_level_2[object] = rndf_id
self.id_to_object[rndf_id] = object
| apache-2.0 | -5,815,910,434,938,483,000 | 35.418919 | 77 | 0.661224 | false |
wkia/kodi-addon-repo | plugin.audio.openlast/default.py | 1 | 6672 | # -*- coding: utf-8 -*-
import os
import sys
import urllib
import urlparse
import xbmcaddon
import xbmcgui
import xbmcplugin
if sys.version_info < (2, 7):
import simplejson as json
else:
import json
from logging import log
from util import build_url
__addon__ = xbmcaddon.Addon()
#__addonid__ = __addon__.getAddonInfo('id')
#__settings__ = xbmcaddon.Addon(id='xbmc-vk.svoka.com')
#__language__ = __settings__.getLocalizedString
#LANGUAGE = __addon__.getLocalizedString
ADDONVERSION = __addon__.getAddonInfo('version')
CWD = __addon__.getAddonInfo('path').decode("utf-8")
log('start -----------------------------------------------------')
log('script version %s started' % ADDONVERSION)
#xbmc.log(str(sys.argv))
addonUrl = sys.argv[0]
addon_handle = int(sys.argv[1])
args = urlparse.parse_qs(sys.argv[2][1:])
#my_addon = xbmcaddon.Addon()
# lastfmUser = my_addon.getSetting('lastfm_username')
xbmcplugin.setContent(addon_handle, 'audio')
lastfmApi = 'http://ws.audioscrobbler.com/2.0/'
lastfmApiKey = '47608ece2138b2edae9538f83f703457' # TODO use Openlast key
lastfmAddon = None
lastfmUser = ''
try:
lastfmAddon = xbmcaddon.Addon('service.scrobbler.lastfm')
lastfmUser = lastfmAddon.getSetting('lastfmuser')
except RuntimeError:
pass
#xbmc.log(str(args))
action = args.get('action', None)
folder = args.get('folder', None)
#xbmc.log('openlast: folder=' + str(folder)) #, xbmc.LOGDEBUG)
#xbmc.log('openlast: action=' + str(action)) #, xbmc.LOGDEBUG)
if folder is None:
url = build_url(addonUrl, {'folder': 'similarArtist'})
li = xbmcgui.ListItem('Similar artist radio', iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=True)
if '' != lastfmUser:
url = build_url(addonUrl, {'folder': 'lastfm', 'username': lastfmUser})
# xbmc.log(url)
li = xbmcgui.ListItem('Personal radio for Last.fm user: ' + lastfmUser, iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=True)
url = build_url(addonUrl, {'folder': 'lastfm'})
li = xbmcgui.ListItem('Personal radio for Last.fm user...', iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
elif folder[0] == 'lastfm':
username = ''
if None != args.get('username'):
username = args.get('username')[0]
playcount = 0
if None != args.get('playcount'):
playcount = int(args.get('playcount')[0])
if username == '':
user_keyboard = xbmc.Keyboard()
user_keyboard.setHeading('Last.FM user name') # __language__(30001))
user_keyboard.setHiddenInput(False)
user_keyboard.setDefault(lastfmUser)
user_keyboard.doModal()
if user_keyboard.isConfirmed():
username = user_keyboard.getText()
else:
raise Exception("Login input was cancelled.")
if action is None:
url = build_url(lastfmApi, {'method': 'user.getInfo', 'user': username,
'format': 'json', 'api_key': lastfmApiKey})
reply = urllib.urlopen(url)
resp = json.load(reply)
if "error" in resp:
raise Exception("Error! DATA: " + str(resp))
else:
# xbmc.log(str(resp))
pass
playcount = int(resp['user']['playcount'])
img = resp['user']['image'][2]['#text']
if '' == img:
img = 'DefaultAudio.png'
url = build_url(addonUrl, {'folder': folder[0], 'action': 'lovedTracks', 'username': username})
li = xbmcgui.ListItem('Listen to loved tracks', iconImage=img)
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=False)
url = build_url(addonUrl, {'folder': folder[0], 'action': 'topTracks', 'username': username, 'playcount': playcount})
li = xbmcgui.ListItem('Listen to track library', iconImage=img)
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=False)
url = build_url(addonUrl, {'folder': folder[0], 'action': 'topArtists', 'username': username, 'playcount': playcount})
li = xbmcgui.ListItem('Listen to artist library', iconImage=img)
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=False)
url = build_url(addonUrl, {'folder': folder[0], 'action': 'syncLibrary', 'username': username, 'playcount': playcount})
li = xbmcgui.ListItem('[EXPERIMENTAL] Syncronize library to folder', iconImage=img)
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=False)
xbmcplugin.endOfDirectory(addon_handle)
elif action[0] == 'lovedTracks':
script = os.path.join(CWD, "run_app.py")
log('running script %s...' % script)
xbmc.executebuiltin('XBMC.RunScript(%s, %s, %s)' % (script, action[0], username))
elif action[0] == 'topTracks':
script = os.path.join(CWD, "run_app.py")
log('running script %s...' % script)
xbmc.executebuiltin('XBMC.RunScript(%s, %s, %s, %s)' % (script, action[0], username, playcount))
elif action[0] == 'topArtists':
script = os.path.join(CWD, "run_app.py")
log('running script %s...' % script)
xbmc.executebuiltin('XBMC.RunScript(%s, %s, %s, %s)' % (script, action[0], username, playcount))
elif action[0] == 'syncLibrary':
script = os.path.join(CWD, "run_app.py")
log('running script %s...' % script)
xbmc.executebuiltin('XBMC.RunScript(%s, %s, %s)' % (script, action[0], username))
elif folder[0] == 'similarArtist':
if action is None:
url = build_url(lastfmApi, {'method': 'chart.getTopArtists',
'format': 'json', 'api_key': lastfmApiKey})
reply = urllib.urlopen(url)
resp = json.load(reply)
if "error" in resp:
raise Exception("Error! DATA: " + str(resp))
else:
#log(str(resp))
pass
for a in resp['artists']['artist']:
url = build_url(addonUrl, {'folder': folder[0], 'action': a['name'].encode('utf-8')})
li = xbmcgui.ListItem(a['name'])
li.setArt({'icon': a['image'][2]['#text'], 'thumb': a['image'][2]['#text'], 'fanart': a['image'][4]['#text']})
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=False)
pass
xbmcplugin.endOfDirectory(addon_handle)
log('end -----------------------------------------------------')
| gpl-2.0 | 4,180,069,975,239,429,000 | 37.566474 | 127 | 0.618855 | false |
mozman/ezdxf | tests/test_06_math/test_630b_bezier4p_functions.py | 1 | 4662 | # Copyright (c) 2010-2020 Manfred Moitzi
# License: MIT License
import pytest
import random
from ezdxf.math import (
cubic_bezier_interpolation, Vec3, Bezier3P, quadratic_to_cubic_bezier,
Bezier4P, have_bezier_curves_g1_continuity, bezier_to_bspline,
)
def test_vertex_interpolation():
points = [(0, 0), (3, 1), (5, 3), (0, 8)]
result = list(cubic_bezier_interpolation(points))
assert len(result) == 3
c1, c2, c3 = result
p = c1.control_points
assert p[0].isclose((0, 0))
assert p[1].isclose((0.9333333333333331, 0.3111111111111111))
assert p[2].isclose((1.8666666666666663, 0.6222222222222222))
assert p[3].isclose((3, 1))
p = c2.control_points
assert p[0].isclose((3, 1))
assert p[1].isclose((4.133333333333334, 1.3777777777777778))
assert p[2].isclose((5.466666666666667, 1.822222222222222))
assert p[3].isclose((5, 3))
p = c3.control_points
assert p[0].isclose((5, 3))
assert p[1].isclose((4.533333333333333, 4.177777777777778))
assert p[2].isclose((2.2666666666666666, 6.088888888888889))
assert p[3].isclose((0, 8))
def test_quadratic_to_cubic_bezier():
r = random.Random(0)
def random_vec() -> Vec3:
return Vec3(r.uniform(-10, 10), r.uniform(-10, 10), r.uniform(-10, 10))
for i in range(1000):
quadratic = Bezier3P((random_vec(), random_vec(), random_vec()))
quadratic_approx = list(quadratic.approximate(10))
cubic = quadratic_to_cubic_bezier(quadratic)
cubic_approx = list(cubic.approximate(10))
assert len(quadratic_approx) == len(cubic_approx)
for p1, p2 in zip(quadratic_approx, cubic_approx):
assert p1.isclose(p2)
# G1 continuity: normalized end-tangent == normalized start-tangent of next curve
B1 = Bezier4P([(0, 0), (1, 1), (2, 1), (3, 0)])
# B1/B2 has G1 continuity:
B2 = Bezier4P([(3, 0), (4, -1), (5, -1), (6, 0)])
# B1/B3 has no G1 continuity:
B3 = Bezier4P([(3, 0), (4, 1), (5, 1), (6, 0)])
# B1/B4 G1 continuity off tolerance:
B4 = Bezier4P([(3, 0), (4, -1.03), (5, -1.0), (6, 0)])
# B1/B5 has a gap between B1 end and B5 start:
B5 = Bezier4P([(4, 0), (5, -1), (6, -1), (7, 0)])
def test_g1_continuity_for_bezier_curves():
assert have_bezier_curves_g1_continuity(B1, B2) is True
assert have_bezier_curves_g1_continuity(B1, B3) is False
assert have_bezier_curves_g1_continuity(B1, B4, g1_tol=1e-4) is False, \
"should be outside of tolerance "
assert have_bezier_curves_g1_continuity(B1, B5) is False, \
"end- and start point should match"
D1 = Bezier4P([(0, 0), (1, 1), (3, 0), (3, 0)])
D2 = Bezier4P([(3, 0), (3, 0), (5, -1), (6, 0)])
def test_g1_continuity_for_degenerated_bezier_curves():
assert have_bezier_curves_g1_continuity(D1, B2) is False
assert have_bezier_curves_g1_continuity(B1, D2) is False
assert have_bezier_curves_g1_continuity(D1, D2) is False
@pytest.mark.parametrize('curve', [D1, D2])
def test_flatten_degenerated_bezier_curves(curve):
# Degenerated Bezier curves behave like regular curves!
assert len(list(curve.flattening(0.1))) > 4
@pytest.mark.parametrize("b1,b2", [
(B1, B2), # G1 continuity, the common case
(B1, B3), # without G1 continuity is also a regular B-spline
(B1, B5), # regular B-spline, but first control point of B5 is lost
], ids=["G1", "without G1", "gap"])
def test_bezier_curves_to_bspline(b1, b2):
bspline = bezier_to_bspline([b1, b2])
# Remove duplicate control point between two adjacent curves:
expected = list(b1.control_points) + list(b2.control_points)[1:]
assert bspline.degree == 3, "should be a cubic B-spline"
assert bspline.control_points == tuple(expected)
def test_quality_of_bezier_to_bspline_conversion_1():
# This test shows the close relationship between cubic Bézier- and
# cubic B-spline curves.
points0 = B1.approximate(10)
points1 = bezier_to_bspline([B1]).approximate(10)
for p0, p1 in zip(points0, points1):
assert p0.isclose(p1) is True, "conversion should be perfect"
def test_quality_of_bezier_to_bspline_conversion_2():
# This test shows the close relationship between cubic Bézier- and
# cubic B-spline curves.
# Remove duplicate point between the two curves:
points0 = list(B1.approximate(10)) + list(B2.approximate(10))[1:]
points1 = bezier_to_bspline([B1, B2]).approximate(20)
for p0, p1 in zip(points0, points1):
assert p0.isclose(p1) is True, "conversion should be perfect"
def test_bezier_curves_to_bspline_error():
with pytest.raises(ValueError):
bezier_to_bspline([]) # one or more curves expected
| mit | 2,218,089,063,526,213,400 | 35.40625 | 81 | 0.65794 | false |
fdouetteau/PyBabe | pybabe/format_csv.py | 1 | 3107 |
from base import BabeBase, StreamHeader, StreamFooter
import csv
from charset import UTF8Recoder, UTF8RecoderWithCleanup, PrefixReader, UnicodeCSVWriter
import codecs
import logging
log = logging.getLogger("csv")
def linepull(stream, dialect, kwargs):
it = iter(stream)
fields = kwargs.get('fields', None)
if not fields:
fields = [it.next().rstrip('\r\n')]
metainfo = StreamHeader(**dict(kwargs, fields=fields))
yield metainfo
for row in it:
yield metainfo.t._make([row.rstrip('\r\n')])
yield StreamFooter()
def build_value(x, null_value):
if x == null_value:
return None
else:
return unicode(x, "utf-8")
def csvpull(stream, dialect, kwargs):
reader = csv.reader(stream, dialect)
fields = kwargs.get('fields', None)
null_value = kwargs.get('null_value', "")
ignore_malformed = kwargs.get('ignore_bad_lines', False)
if not fields:
fields = reader.next()
metainfo = StreamHeader(**dict(kwargs, fields=fields))
yield metainfo
for row in reader:
try:
yield metainfo.t._make([build_value(x, null_value) for x in row])
except Exception, e:
if ignore_malformed:
log.warn("Malformed line: %s, %s" % (row, e))
else:
raise e
yield StreamFooter()
def pull(format, stream, kwargs):
if kwargs.get('utf8_cleanup', False):
stream = UTF8RecoderWithCleanup(stream, kwargs.get('encoding', 'utf-8'))
elif codecs.getreader(kwargs.get('encoding', 'utf-8')) != codecs.getreader('utf-8'):
stream = UTF8Recoder(stream, kwargs.get('encoding', None))
else:
pass
delimiter = kwargs.get('delimiter', None)
sniff_read = stream.next()
stream = PrefixReader(sniff_read, stream, linefilter=kwargs.get("linefilter", None))
dialect = csv.Sniffer().sniff(sniff_read)
if sniff_read.endswith('\r\n'):
dialect.lineterminator = '\r\n'
else:
dialect.lineterminator = '\n'
if dialect.delimiter.isalpha() and not delimiter:
# http://bugs.python.org/issue2078
for row in linepull(stream, dialect, kwargs):
yield row
return
if delimiter:
dialect.delimiter = delimiter
for row in csvpull(stream, dialect, kwargs):
yield row
class default_dialect(csv.Dialect):
lineterminator = '\n'
delimiter = ','
doublequote = False
escapechar = '\\'
quoting = csv.QUOTE_MINIMAL
quotechar = '"'
def push(format, metainfo, instream, outfile, encoding, delimiter=None, **kwargs):
if not encoding:
encoding = "utf8"
dialect = kwargs.get('dialect', default_dialect)
if delimiter:
dialect.delimiter = delimiter
writer = UnicodeCSVWriter(outfile, dialect=dialect, encoding=encoding)
writer.writerow(metainfo.fields)
for k in instream:
if isinstance(k, StreamFooter):
break
else:
writer.writerow(k)
BabeBase.addPullPlugin('csv', ['csv', 'tsv', 'txt'], pull)
BabeBase.addPushPlugin('csv', ['csv', 'tsv', 'txt'], push)
| bsd-3-clause | -8,952,105,549,496,500,000 | 30.07 | 88 | 0.631799 | false |
filippog/pysnmp | examples/hlapi/asyncore/sync/agent/ntforg/v3-trap.py | 1 | 1601 | """
SNMPv3 TRAP: auth SHA, privacy: AES128
++++++++++++++++++++++++++++++++++++++
Send SNMP notification using the following options:
* SNMPv3
* with authoritative snmpEngineId = 0x8000000001020304
(USM must be configured at the Receiver accordingly)
* with user 'usr-sha-aes128', auth: SHA, priv: AES128
* over IPv4/UDP
* send TRAP notification
* with TRAP ID 'authenticationFailure' specified as a MIB symbol
* do not include any additional managed object information
SNMPv3 TRAPs requires pre-sharing the Notification Originator's
value of SnmpEngineId with Notification Receiver. To facilitate that
we will use static (e.g. not autogenerated) version of snmpEngineId.
Functionally similar to:
| $ snmptrap -v3 -e 8000000001020304 -l authPriv -u usr-sha-aes -A authkey1 -X privkey1 -a SHA -x AES demo.snmplabs.com 12345 1.3.6.1.4.1.20408.4.1.1.2 1.3.6.1.2.1.1.1.0 s "my system"
"""#
from pysnmp.hlapi import *
errorIndication, errorStatus, errorIndex, varBinds = next(
sendNotification(SnmpEngine(OctetString(hexValue='8000000001020304')),
UsmUserData('usr-sha-aes128', 'authkey1', 'privkey1',
authProtocol=usmHMACSHAAuthProtocol,
privProtocol=usmAesCfb128Protocol),
UdpTransportTarget(('demo.snmplabs.com', 162)),
ContextData(),
'trap',
NotificationType(
ObjectIdentity('SNMPv2-MIB', 'authenticationFailure')
)
)
)
if errorIndication:
print(errorIndication)
| bsd-3-clause | 7,296,825,608,207,418,000 | 38.04878 | 183 | 0.647096 | false |
sander76/home-assistant | homeassistant/components/trace/__init__.py | 1 | 3528 | """Support for script and automation tracing and debugging."""
from __future__ import annotations
import datetime as dt
from itertools import count
from typing import Any, Deque
from homeassistant.core import Context
from homeassistant.helpers.trace import (
TraceElement,
trace_id_get,
trace_id_set,
trace_set_child_id,
)
import homeassistant.util.dt as dt_util
from . import websocket_api
from .const import DATA_TRACE, STORED_TRACES
from .utils import LimitedSizeDict
DOMAIN = "trace"
async def async_setup(hass, config):
"""Initialize the trace integration."""
hass.data[DATA_TRACE] = {}
websocket_api.async_setup(hass)
return True
def async_store_trace(hass, trace):
"""Store a trace if its item_id is valid."""
key = trace.key
if key[1]:
traces = hass.data[DATA_TRACE]
if key not in traces:
traces[key] = LimitedSizeDict(size_limit=STORED_TRACES)
traces[key][trace.run_id] = trace
class ActionTrace:
"""Base container for an script or automation trace."""
_run_ids = count(0)
def __init__(
self,
key: tuple[str, str],
config: dict[str, Any],
context: Context,
):
"""Container for script trace."""
self._trace: dict[str, Deque[TraceElement]] | None = None
self._config: dict[str, Any] = config
self.context: Context = context
self._error: Exception | None = None
self._state: str = "running"
self.run_id: str = str(next(self._run_ids))
self._timestamp_finish: dt.datetime | None = None
self._timestamp_start: dt.datetime = dt_util.utcnow()
self.key: tuple[str, str] = key
if trace_id_get():
trace_set_child_id(self.key, self.run_id)
trace_id_set((key, self.run_id))
def set_trace(self, trace: dict[str, Deque[TraceElement]]) -> None:
"""Set trace."""
self._trace = trace
def set_error(self, ex: Exception) -> None:
"""Set error."""
self._error = ex
def finished(self) -> None:
"""Set finish time."""
self._timestamp_finish = dt_util.utcnow()
self._state = "stopped"
def as_dict(self) -> dict[str, Any]:
"""Return dictionary version of this ActionTrace."""
result = self.as_short_dict()
traces = {}
if self._trace:
for key, trace_list in self._trace.items():
traces[key] = [item.as_dict() for item in trace_list]
result.update(
{
"trace": traces,
"config": self._config,
"context": self.context,
}
)
if self._error is not None:
result["error"] = str(self._error)
return result
def as_short_dict(self) -> dict[str, Any]:
"""Return a brief dictionary version of this ActionTrace."""
last_step = None
if self._trace:
last_step = list(self._trace)[-1]
result = {
"last_step": last_step,
"run_id": self.run_id,
"state": self._state,
"timestamp": {
"start": self._timestamp_start,
"finish": self._timestamp_finish,
},
"domain": self.key[0],
"item_id": self.key[1],
}
if self._error is not None:
result["error"] = str(self._error)
if last_step is not None:
result["last_step"] = last_step
return result
| apache-2.0 | -2,714,262,162,881,511,000 | 27.451613 | 71 | 0.562642 | false |
elzaggo/pydoop | test/avro/test_io.py | 1 | 4807 | # BEGIN_COPYRIGHT
#
# Copyright 2009-2018 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
import os
import unittest
import itertools as it
import avro.datafile as avdf
from avro.io import DatumReader, DatumWriter
from pydoop.mapreduce.pipes import InputSplit
from pydoop.avrolib import (
SeekableDataFileReader, AvroReader, AvroWriter, parse
)
from pydoop.test_utils import WDTestCase
from pydoop.utils.py3compat import czip, cmap
import pydoop.hdfs as hdfs
from common import avro_user_record
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class TestAvroIO(WDTestCase):
def setUp(self):
super(TestAvroIO, self).setUp()
with open(os.path.join(THIS_DIR, "user.avsc")) as f:
self.schema = parse(f.read())
def write_avro_file(self, rec_creator, n_samples, sync_interval):
avdf.SYNC_INTERVAL = sync_interval
self.assertEqual(avdf.SYNC_INTERVAL, sync_interval)
fo = self._mkf('data.avro', mode='wb')
with avdf.DataFileWriter(fo, DatumWriter(), self.schema) as writer:
for i in range(n_samples):
writer.append(rec_creator(i))
return fo.name
def test_seekable(self):
fn = self.write_avro_file(avro_user_record, 500, 1024)
with open(fn, 'rb') as f:
sreader = SeekableDataFileReader(f, DatumReader())
res = [t for t in czip(cmap(
lambda _: f.tell(), it.repeat(1)
), sreader)]
sreader.align_after(res[-1][0])
with self.assertRaises(StopIteration):
r = next(sreader)
sreader.align_after(0)
r = next(sreader)
self.assertEqual(r, res[0][1])
def offset_iterator():
s = -1
for o, r in res:
sreader.align_after(o)
t = f.tell()
if t == s:
continue
s = t
x = next(sreader)
yield (t, x)
i = 0
for xo, x in offset_iterator():
sreader.align_after(xo)
for o, r in res[i:]:
if o >= xo:
self.assertEqual(x, r)
break
i += 1
def test_avro_reader(self):
N = 500
fn = self.write_avro_file(avro_user_record, N, 1024)
url = hdfs.path.abspath(fn, local=True)
class FunkyCtx(object):
def __init__(self, isplit):
self.input_split = isplit
def get_areader(offset, length):
isplit = InputSplit(InputSplit.to_string(url, offset, length))
ctx = FunkyCtx(isplit)
return AvroReader(ctx)
areader = get_areader(0, 14)
file_length = areader.reader.file_length
with self.assertRaises(StopIteration):
next(areader)
areader = get_areader(0, file_length)
with SeekableDataFileReader(open(fn, 'rb'), DatumReader()) as sreader:
for (o, a), s in czip(areader, sreader):
self.assertEqual(a, s)
mid_len = int(file_length / 2)
lows = [x for x in get_areader(0, mid_len)]
highs = [x for x in get_areader(mid_len, file_length)]
self.assertEqual(N, len(lows) + len(highs))
def test_avro_writer(self):
class FunkyCtx(object):
def __init__(self_, job_conf):
self_.job_conf = job_conf
class AWriter(AvroWriter):
schema = self.schema
def emit(self_, key, value):
self_.writer.append(key)
ctx = FunkyCtx({
'mapreduce.task.partition': 1,
'mapreduce.task.output.dir': hdfs.path.abspath(self.wd, local=True)
})
awriter = AWriter(ctx)
N = 10
for i in range(N):
awriter.emit(avro_user_record(i), '')
awriter.close()
def suite():
suite_ = unittest.TestSuite()
suite_.addTest(TestAvroIO('test_seekable'))
suite_.addTest(TestAvroIO('test_avro_reader'))
suite_.addTest(TestAvroIO('test_avro_writer'))
return suite_
if __name__ == '__main__':
_RUNNER = unittest.TextTestRunner(verbosity=2)
_RUNNER.run((suite()))
| apache-2.0 | 8,125,486,267,982,131,000 | 30.418301 | 79 | 0.575619 | false |
SU-ECE-17-7/hotspotter | hsviz/draw_func2.py | 1 | 54605 | ''' Lots of functions for drawing and plotting visiony things '''
# TODO: New naming scheme
# viz_<func_name> will clear everything. The current axes and fig: clf, cla. # Will add annotations
# interact_<func_name> will clear everything and start user interactions.
# show_<func_name> will always clear the current axes, but not fig: cla # Might # add annotates?
# plot_<func_name> will not clear the axes or figure. More useful for graphs
# draw_<func_name> same as plot for now. More useful for images
from __future__ import division, print_function
from hscom import __common__
(print, print_, print_on, print_off, rrr, profile,
printDBG) = __common__.init(__name__, '[df2]', DEBUG=False, initmpl=True)
# Python
from itertools import izip
from os.path import splitext, split, join, normpath, exists
import colorsys
import itertools
import pylab
import sys
import textwrap
import time
import warnings
# Matplotlib / Qt
import matplotlib
import matplotlib as mpl # NOQA
from matplotlib.collections import PatchCollection, LineCollection
from matplotlib.font_manager import FontProperties
from matplotlib.patches import Rectangle, Circle, FancyArrow
from matplotlib.transforms import Affine2D
from matplotlib.backends import backend_qt4
import matplotlib.pyplot as plt
# Qt
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import Qt
# Scientific
import numpy as np
import scipy.stats
import cv2
# HotSpotter
from hscom import helpers
from hscom import tools
from hscom.Printable import DynStruct
#================
# GLOBALS
#================
TMP_mevent = None
QT4_WINS = []
plotWidget = None
# GENERAL FONTS
SMALLER = 8
SMALL = 10
MED = 12
LARGE = 14
#fpargs = dict(family=None, style=None, variant=None, stretch=None, fname=None)
FONTS = DynStruct()
FONTS.small = FontProperties(weight='light', size=SMALL)
FONTS.smaller = FontProperties(weight='light', size=SMALLER)
FONTS.med = FontProperties(weight='light', size=MED)
FONTS.large = FontProperties(weight='light', size=LARGE)
FONTS.medbold = FontProperties(weight='bold', size=MED)
FONTS.largebold = FontProperties(weight='bold', size=LARGE)
# SPECIFIC FONTS
FONTS.legend = FONTS.small
FONTS.figtitle = FONTS.med
FONTS.axtitle = FONTS.med
FONTS.subtitle = FONTS.med
FONTS.xlabel = FONTS.smaller
FONTS.ylabel = FONTS.small
FONTS.relative = FONTS.smaller
# COLORS
ORANGE = np.array((255, 127, 0, 255)) / 255.0
RED = np.array((255, 0, 0, 255)) / 255.0
GREEN = np.array(( 0, 255, 0, 255)) / 255.0
BLUE = np.array(( 0, 0, 255, 255)) / 255.0
YELLOW = np.array((255, 255, 0, 255)) / 255.0
BLACK = np.array(( 0, 0, 0, 255)) / 255.0
WHITE = np.array((255, 255, 255, 255)) / 255.0
GRAY = np.array((127, 127, 127, 255)) / 255.0
DEEP_PINK = np.array((255, 20, 147, 255)) / 255.0
PINK = np.array((255, 100, 100, 255)) / 255.0
FALSE_RED = np.array((255, 51, 0, 255)) / 255.0
TRUE_GREEN = np.array(( 0, 255, 0, 255)) / 255.0
DARK_ORANGE = np.array((127, 63, 0, 255)) / 255.0
DARK_YELLOW = np.array((127, 127, 0, 255)) / 255.0
PURPLE = np.array((102, 0, 153, 255)) / 255.0
UNKNOWN_PURP = PURPLE
# FIGURE GEOMETRY
DPI = 80
#DPI = 160
#FIGSIZE = (24) # default windows fullscreen
FIGSIZE_MED = (12, 6)
FIGSIZE_SQUARE = (12, 12)
FIGSIZE_BIGGER = (24, 12)
FIGSIZE_HUGE = (32, 16)
FIGSIZE = FIGSIZE_MED
# Quality drawings
#FIGSIZE = FIGSIZE_SQUARE
#DPI = 120
tile_within = (-1, 30, 969, 1041)
if helpers.get_computer_name() == 'Ooo':
TILE_WITHIN = (-1912, 30, -969, 1071)
# DEFAULTS. (TODO: Can these be cleaned up?)
DISTINCT_COLORS = True # and False
DARKEN = None
ELL_LINEWIDTH = 1.5
if DISTINCT_COLORS:
ELL_ALPHA = .6
LINE_ALPHA = .35
else:
ELL_ALPHA = .4
LINE_ALPHA = .4
LINE_ALPHA_OVERRIDE = helpers.get_arg('--line-alpha-override', type_=float, default=None)
ELL_ALPHA_OVERRIDE = helpers.get_arg('--ell-alpha-override', type_=float, default=None)
#LINE_ALPHA_OVERRIDE = None
#ELL_ALPHA_OVERRIDE = None
ELL_COLOR = BLUE
LINE_COLOR = RED
LINE_WIDTH = 1.4
SHOW_LINES = True # True
SHOW_ELLS = True
POINT_SIZE = 2
base_fnum = 9001
def next_fnum():
global base_fnum
base_fnum += 1
return base_fnum
def my_prefs():
global LINE_COLOR
global ELL_COLOR
global ELL_LINEWIDTH
global ELL_ALPHA
LINE_COLOR = (1, 0, 0)
ELL_COLOR = (0, 0, 1)
ELL_LINEWIDTH = 2
ELL_ALPHA = .5
def execstr_global():
execstr = ['global' + key for key in globals().keys()]
return execstr
def register_matplotlib_widget(plotWidget_):
'talks to PyQt4 guis'
global plotWidget
plotWidget = plotWidget_
#fig = plotWidget.figure
#axes_list = fig.get_axes()
#ax = axes_list[0]
#plt.sca(ax)
def unregister_qt4_win(win):
global QT4_WINS
if win == 'all':
QT4_WINS = []
def register_qt4_win(win):
global QT4_WINS
QT4_WINS.append(win)
def OooScreen2():
nRows = 1
nCols = 1
x_off = 30 * 4
y_off = 30 * 4
x_0 = -1920
y_0 = 30
w = (1912 - x_off) / nRows
h = (1080 - y_off) / nCols
return dict(num_rc=(1, 1), wh=(w, h), xy_off=(x_0, y_0), wh_off=(0, 10),
row_first=True, no_tile=False)
def deterministic_shuffle(list_):
randS = int(np.random.rand() * np.uint(0 - 2) / 2)
np.random.seed(len(list_))
np.random.shuffle(list_)
np.random.seed(randS)
def distinct_colors(N, brightness=.878):
# http://blog.jianhuashao.com/2011/09/generate-n-distinct-colors.html
sat = brightness
val = brightness
HSV_tuples = [(x * 1.0 / N, sat, val) for x in xrange(N)]
RGB_tuples = map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)
deterministic_shuffle(RGB_tuples)
return RGB_tuples
def add_alpha(colors):
return [list(color) + [1] for color in colors]
def _axis_xy_width_height(ax, xaug=0, yaug=0, waug=0, haug=0):
'gets geometry of a subplot'
autoAxis = ax.axis()
xy = (autoAxis[0] + xaug, autoAxis[2] + yaug)
width = (autoAxis[1] - autoAxis[0]) + waug
height = (autoAxis[3] - autoAxis[2]) + haug
return xy, width, height
def draw_border(ax, color=GREEN, lw=2, offset=None):
'draws rectangle border around a subplot'
xy, width, height = _axis_xy_width_height(ax, -.7, -.2, 1, .4)
if offset is not None:
xoff, yoff = offset
xy = [xoff, yoff]
height = - height - yoff
width = width - xoff
rect = matplotlib.patches.Rectangle(xy, width, height, lw=lw)
rect = ax.add_patch(rect)
rect.set_clip_on(False)
rect.set_fill(False)
rect.set_edgecolor(color)
def draw_roi(roi, label=None, bbox_color=(1, 0, 0),
lbl_bgcolor=(0, 0, 0), lbl_txtcolor=(1, 1, 1), theta=0, ax=None):
if ax is None:
ax = gca()
(rx, ry, rw, rh) = roi
#cos_ = np.cos(theta)
#sin_ = np.sin(theta)
#rot_t = Affine2D([( cos_, -sin_, 0),
#( sin_, cos_, 0),
#( 0, 0, 1)])
#scale_t = Affine2D([( rw, 0, 0),
#( 0, rh, 0),
#( 0, 0, 1)])
#trans_t = Affine2D([( 1, 0, rx + rw / 2),
#( 0, 1, ry + rh / 2),
#( 0, 0, 1)])
#t_end = scale_t + rot_t + trans_t + t_start
# Transformations are specified in backwards order.
trans_roi = Affine2D()
trans_roi.scale(rw, rh)
trans_roi.rotate(theta)
trans_roi.translate(rx + rw / 2, ry + rh / 2)
t_end = trans_roi + ax.transData
bbox = matplotlib.patches.Rectangle((-.5, -.5), 1, 1, lw=2, transform=t_end)
arw_x, arw_y, arw_dx, arw_dy = (-0.5, -0.5, 1.0, 0.0)
arrowargs = dict(head_width=.1, transform=t_end, length_includes_head=True)
arrow = FancyArrow(arw_x, arw_y, arw_dx, arw_dy, **arrowargs)
bbox.set_fill(False)
#bbox.set_transform(trans)
bbox.set_edgecolor(bbox_color)
arrow.set_edgecolor(bbox_color)
arrow.set_facecolor(bbox_color)
ax.add_patch(bbox)
ax.add_patch(arrow)
#ax.add_patch(arrow2)
if label is not None:
ax_absolute_text(rx, ry, label, ax=ax,
horizontalalignment='center',
verticalalignment='center',
color=lbl_txtcolor,
backgroundcolor=lbl_bgcolor)
# ---- GENERAL FIGURE COMMANDS ----
def sanatize_img_fname(fname):
fname_clean = fname
search_replace_list = [(' ', '_'), ('\n', '--'), ('\\', ''), ('/', '')]
for old, new in search_replace_list:
fname_clean = fname_clean.replace(old, new)
fname_noext, ext = splitext(fname_clean)
fname_clean = fname_noext + ext.lower()
# Check for correct extensions
if not ext.lower() in helpers.IMG_EXTENSIONS:
fname_clean += '.png'
return fname_clean
def sanatize_img_fpath(fpath):
[dpath, fname] = split(fpath)
fname_clean = sanatize_img_fname(fname)
fpath_clean = join(dpath, fname_clean)
fpath_clean = normpath(fpath_clean)
return fpath_clean
def set_geometry(fnum, x, y, w, h):
fig = get_fig(fnum)
qtwin = fig.canvas.manager.window
qtwin.setGeometry(x, y, w, h)
def get_geometry(fnum):
fig = get_fig(fnum)
qtwin = fig.canvas.manager.window
(x1, y1, x2, y2) = qtwin.geometry().getCoords()
(x, y, w, h) = (x1, y1, x2 - x1, y2 - y1)
return (x, y, w, h)
def get_screen_info():
from PyQt4 import Qt, QtGui # NOQA
desktop = QtGui.QDesktopWidget()
mask = desktop.mask() # NOQA
layout_direction = desktop.layoutDirection() # NOQA
screen_number = desktop.screenNumber() # NOQA
normal_geometry = desktop.normalGeometry() # NOQA
num_screens = desktop.screenCount() # NOQA
avail_rect = desktop.availableGeometry() # NOQA
screen_rect = desktop.screenGeometry() # NOQA
QtGui.QDesktopWidget().availableGeometry().center() # NOQA
normal_geometry = desktop.normalGeometry() # NOQA
def get_all_figures():
all_figures_ = [manager.canvas.figure for manager in
matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
all_figures = []
# Make sure you dont show figures that this module closed
for fig in iter(all_figures_):
if not 'df2_closed' in fig.__dict__.keys() or not fig.df2_closed:
all_figures.append(fig)
# Return all the figures sorted by their number
all_figures = sorted(all_figures, key=lambda fig: fig.number)
return all_figures
def get_all_qt4_wins():
return QT4_WINS
def all_figures_show():
if plotWidget is not None:
plotWidget.figure.show()
plotWidget.figure.canvas.draw()
for fig in iter(get_all_figures()):
time.sleep(.1)
fig.show()
fig.canvas.draw()
def all_figures_tight_layout():
for fig in iter(get_all_figures()):
fig.tight_layout()
#adjust_subplots()
time.sleep(.1)
def get_monitor_geom(monitor_num=0):
from PyQt4 import QtGui # NOQA
desktop = QtGui.QDesktopWidget()
rect = desktop.availableGeometry()
geom = (rect.x(), rect.y(), rect.width(), rect.height())
return geom
def golden_wh(x):
'returns a width / height with a golden aspect ratio'
return map(int, map(round, (x * .618, x * .312)))
def all_figures_tile(num_rc=(3, 4), wh=1000, xy_off=(0, 0), wh_off=(0, 10),
row_first=True, no_tile=False, override1=False):
'Lays out all figures in a grid. if wh is a scalar, a golden ratio is used'
# RCOS TODO:
# I want this function to layout all the figures and qt windows within the
# bounds of a rectangle. (taken from the get_monitor_geom, or specified by
# the user i.e. left half of monitor 0). It should lay them out
# rectangularly and choose figure sizes such that all of them will fit.
if no_tile:
return
if not np.iterable(wh):
wh = golden_wh(wh)
all_figures = get_all_figures()
all_qt4wins = get_all_qt4_wins()
if override1:
if len(all_figures) == 1:
fig = all_figures[0]
win = fig.canvas.manager.window
win.setGeometry(0, 0, 900, 900)
update()
return
#nFigs = len(all_figures) + len(all_qt4_wins)
num_rows, num_cols = num_rc
w, h = wh
x_off, y_off = xy_off
w_off, h_off = wh_off
x_pad, y_pad = (0, 0)
printDBG('[df2] Tile all figures: ')
printDBG('[df2] wh = %r' % ((w, h),))
printDBG('[df2] xy_offsets = %r' % ((x_off, y_off),))
printDBG('[df2] wh_offsets = %r' % ((w_off, h_off),))
printDBG('[df2] xy_pads = %r' % ((x_pad, y_pad),))
if sys.platform == 'win32':
h_off += 0
w_off += 40
x_off += 40
y_off += 40
x_pad += 0
y_pad += 100
def position_window(i, win):
isqt4_mpl = isinstance(win, backend_qt4.MainWindow)
isqt4_back = isinstance(win, QtGui.QMainWindow)
if not isqt4_mpl and not isqt4_back:
raise NotImplementedError('%r-th Backend %r is not a Qt Window' % (i, win))
if row_first:
y = (i % num_rows) * (h + h_off) + 40
x = (int(i / num_rows)) * (w + w_off) + x_pad
else:
x = (i % num_cols) * (w + w_off) + 40
y = (int(i / num_cols)) * (h + h_off) + y_pad
x += x_off
y += y_off
win.setGeometry(x, y, w, h)
ioff = 0
for i, win in enumerate(all_qt4wins):
position_window(i, win)
ioff += 1
for i, fig in enumerate(all_figures):
win = fig.canvas.manager.window
position_window(i + ioff, win)
def all_figures_bring_to_front():
all_figures = get_all_figures()
for fig in iter(all_figures):
bring_to_front(fig)
def close_all_figures():
all_figures = get_all_figures()
for fig in iter(all_figures):
close_figure(fig)
def close_figure(fig):
fig.clf()
fig.df2_closed = True
qtwin = fig.canvas.manager.window
qtwin.close()
def bring_to_front(fig):
#what is difference between show and show normal?
qtwin = fig.canvas.manager.window
qtwin.raise_()
qtwin.activateWindow()
qtwin.setWindowFlags(Qt.WindowStaysOnTopHint)
qtwin.setWindowFlags(Qt.WindowFlags(0))
qtwin.show()
def show():
all_figures_show()
all_figures_bring_to_front()
plt.show()
def reset():
close_all_figures()
def draw():
all_figures_show()
def update():
draw()
all_figures_bring_to_front()
def present(*args, **kwargs):
'execing present should cause IPython magic'
print('[df2] Presenting figures...')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
all_figures_tile(*args, **kwargs)
all_figures_show()
all_figures_bring_to_front()
# Return an exec string
execstr = helpers.ipython_execstr()
execstr += textwrap.dedent('''
if not embedded:
print('[df2] Presenting in normal shell.')
print('[df2] ... plt.show()')
plt.show()
''')
return execstr
def save_figure(fnum=None, fpath=None, usetitle=False, overwrite=True):
#import warnings
#warnings.simplefilter("error")
# Find the figure
if fnum is None:
fig = gcf()
else:
fig = plt.figure(fnum, figsize=FIGSIZE, dpi=DPI)
# Enforce inches and DPI
fig.set_size_inches(FIGSIZE[0], FIGSIZE[1])
fnum = fig.number
if fpath is None:
# Find the title
fpath = sanatize_img_fname(fig.canvas.get_window_title())
if usetitle:
title = sanatize_img_fname(fig.canvas.get_window_title())
fpath = join(fpath, title)
# Add in DPI information
fpath_noext, ext = splitext(fpath)
size_suffix = '_DPI=%r_FIGSIZE=%d,%d' % (DPI, FIGSIZE[0], FIGSIZE[1])
fpath = fpath_noext + size_suffix + ext
# Sanatize the filename
fpath_clean = sanatize_img_fpath(fpath)
#fname_clean = split(fpath_clean)[1]
print('[df2] save_figure() %r' % (fpath_clean,))
#adjust_subplots()
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
if not exists(fpath_clean) or overwrite:
fig.savefig(fpath_clean, dpi=DPI)
def set_ticks(xticks, yticks):
ax = gca()
ax.set_xticks(xticks)
ax.set_yticks(yticks)
def set_xticks(tick_set):
ax = gca()
ax.set_xticks(tick_set)
def set_yticks(tick_set):
ax = gca()
ax.set_yticks(tick_set)
def set_xlabel(lbl, ax=None):
if ax is None:
ax = gca()
ax.set_xlabel(lbl, fontproperties=FONTS.xlabel)
def set_title(title, ax=None):
if ax is None:
ax = gca()
ax.set_title(title, fontproperties=FONTS.axtitle)
def set_ylabel(lbl):
ax = gca()
ax.set_ylabel(lbl, fontproperties=FONTS.xlabel)
def plot(*args, **kwargs):
return plt.plot(*args, **kwargs)
def plot2(x_data, y_data, marker='o', title_pref='', x_label='x', y_label='y', *args,
**kwargs):
do_plot = True
ax = gca()
if len(x_data) != len(y_data):
warnstr = '[df2] ! Warning: len(x_data) != len(y_data). Cannot plot2'
warnings.warn(warnstr)
draw_text(warnstr)
do_plot = False
if len(x_data) == 0:
warnstr = '[df2] ! Warning: len(x_data) == 0. Cannot plot2'
warnings.warn(warnstr)
draw_text(warnstr)
do_plot = False
if do_plot:
ax.plot(x_data, y_data, marker, *args, **kwargs)
min_ = min(x_data.min(), y_data.min())
max_ = max(x_data.max(), y_data.max())
# Equal aspect ratio
ax.set_xlim(min_, max_)
ax.set_ylim(min_, max_)
ax.set_aspect('equal')
ax.set_xlabel(x_label, fontproperties=FONTS.xlabel)
ax.set_ylabel(y_label, fontproperties=FONTS.xlabel)
ax.set_title(title_pref + ' ' + x_label + ' vs ' + y_label,
fontproperties=FONTS.axtitle)
def adjust_subplots_xlabels():
adjust_subplots(left=.03, right=.97, bottom=.2, top=.9, hspace=.15)
def adjust_subplots_xylabels():
adjust_subplots(left=.03, right=1, bottom=.1, top=.9, hspace=.15)
def adjust_subplots_safe(left=.1, right=.9, bottom=.1, top=.9, wspace=.3, hspace=.5):
adjust_subplots(left, bottom, right, top, wspace, hspace)
def adjust_subplots(left=0.02, bottom=0.02,
right=0.98, top=0.90,
wspace=0.1, hspace=0.15):
'''
left = 0.125 # the left side of the subplots of the figure
right = 0.9 # the right side of the subplots of the figure
bottom = 0.1 # the bottom of the subplots of the figure
top = 0.9 # the top of the subplots of the figure
wspace = 0.2 # the amount of width reserved for blank space between subplots
hspace = 0.2
'''
#print('[df2] adjust_subplots(%r)' % locals())
plt.subplots_adjust(left, bottom, right, top, wspace, hspace)
#=======================
# TEXT FUNCTIONS
# TODO: I have too many of these. Need to consolidate
#=======================
def upperleft_text(txt):
txtargs = dict(horizontalalignment='left',
verticalalignment='top',
#fontsize='smaller',
#fontweight='ultralight',
backgroundcolor=(0, 0, 0, .5),
color=ORANGE)
ax_relative_text(.02, .02, txt, **txtargs)
def upperright_text(txt, offset=None):
txtargs = dict(horizontalalignment='right',
verticalalignment='top',
#fontsize='smaller',
#fontweight='ultralight',
backgroundcolor=(0, 0, 0, .5),
color=ORANGE,
offset=offset)
ax_relative_text(.98, .02, txt, **txtargs)
def lowerright_text(txt):
txtargs = dict(horizontalalignment='right',
verticalalignment='top',
#fontsize='smaller',
#fontweight='ultralight',
backgroundcolor=(0, 0, 0, .5),
color=ORANGE)
ax_relative_text(.98, .92, txt, **txtargs)
def absolute_lbl(x_, y_, txt, roffset=(-.02, -.02), **kwargs):
txtargs = dict(horizontalalignment='right',
verticalalignment='top',
backgroundcolor=(0, 0, 0, .5),
color=ORANGE,
**kwargs)
ax_absolute_text(x_, y_, txt, roffset=roffset, **txtargs)
def ax_relative_text(x, y, txt, ax=None, offset=None, **kwargs):
if ax is None:
ax = gca()
xy, width, height = _axis_xy_width_height(ax)
x_, y_ = ((xy[0]) + x * width, (xy[1] + height) - y * height)
if offset is not None:
xoff, yoff = offset
x_ += xoff
y_ += yoff
ax_absolute_text(x_, y_, txt, ax=ax, **kwargs)
def ax_absolute_text(x_, y_, txt, ax=None, roffset=None, **kwargs):
if ax is None:
ax = gca()
if 'fontproperties' in kwargs:
kwargs['fontproperties'] = FONTS.relative
if roffset is not None:
xroff, yroff = roffset
xy, width, height = _axis_xy_width_height(ax)
x_ += xroff * width
y_ += yroff * height
ax.text(x_, y_, txt, **kwargs)
def fig_relative_text(x, y, txt, **kwargs):
kwargs['horizontalalignment'] = 'center'
kwargs['verticalalignment'] = 'center'
fig = gcf()
#xy, width, height = _axis_xy_width_height(ax)
#x_, y_ = ((xy[0]+width)+x*width, (xy[1]+height)-y*height)
fig.text(x, y, txt, **kwargs)
def draw_text(text_str, rgb_textFG=(0, 0, 0), rgb_textBG=(1, 1, 1)):
ax = gca()
xy, width, height = _axis_xy_width_height(ax)
text_x = xy[0] + (width / 2)
text_y = xy[1] + (height / 2)
ax.text(text_x, text_y, text_str,
horizontalalignment='center',
verticalalignment='center',
color=rgb_textFG,
backgroundcolor=rgb_textBG)
def set_figtitle(figtitle, subtitle='', forcefignum=True, incanvas=True):
if figtitle is None:
figtitle = ''
fig = gcf()
if incanvas:
if subtitle != '':
subtitle = '\n' + subtitle
fig.suptitle(figtitle + subtitle, fontsize=14, fontweight='bold')
#fig.suptitle(figtitle, x=.5, y=.98, fontproperties=FONTS.figtitle)
#fig_relative_text(.5, .96, subtitle, fontproperties=FONTS.subtitle)
else:
fig.suptitle('')
window_figtitle = ('fig(%d) ' % fig.number) + figtitle
fig.canvas.set_window_title(window_figtitle)
def convert_keypress_event_mpl_to_qt4(mevent):
global TMP_mevent
TMP_mevent = mevent
# Grab the key from the mpl.KeyPressEvent
key = mevent.key
print('[df2] convert event mpl -> qt4')
print('[df2] key=%r' % key)
# dicts modified from backend_qt4.py
mpl2qtkey = {'control': Qt.Key_Control, 'shift': Qt.Key_Shift,
'alt': Qt.Key_Alt, 'super': Qt.Key_Meta,
'enter': Qt.Key_Return, 'left': Qt.Key_Left, 'up': Qt.Key_Up,
'right': Qt.Key_Right, 'down': Qt.Key_Down,
'escape': Qt.Key_Escape, 'f1': Qt.Key_F1, 'f2': Qt.Key_F2,
'f3': Qt.Key_F3, 'f4': Qt.Key_F4, 'f5': Qt.Key_F5,
'f6': Qt.Key_F6, 'f7': Qt.Key_F7, 'f8': Qt.Key_F8,
'f9': Qt.Key_F9, 'f10': Qt.Key_F10, 'f11': Qt.Key_F11,
'f12': Qt.Key_F12, 'home': Qt.Key_Home, 'end': Qt.Key_End,
'pageup': Qt.Key_PageUp, 'pagedown': Qt.Key_PageDown}
# Reverse the control and super (aka cmd/apple) keys on OSX
if sys.platform == 'darwin':
mpl2qtkey.update({'super': Qt.Key_Control, 'control': Qt.Key_Meta, })
# Try to reconstruct QtGui.KeyEvent
type_ = QtCore.QEvent.Type(QtCore.QEvent.KeyPress) # The type should always be KeyPress
text = ''
# Try to extract the original modifiers
modifiers = QtCore.Qt.NoModifier # initialize to no modifiers
if key.find(u'ctrl+') >= 0:
modifiers = modifiers | QtCore.Qt.ControlModifier
key = key.replace(u'ctrl+', u'')
print('[df2] has ctrl modifier')
text += 'Ctrl+'
if key.find(u'alt+') >= 0:
modifiers = modifiers | QtCore.Qt.AltModifier
key = key.replace(u'alt+', u'')
print('[df2] has alt modifier')
text += 'Alt+'
if key.find(u'super+') >= 0:
modifiers = modifiers | QtCore.Qt.MetaModifier
key = key.replace(u'super+', u'')
print('[df2] has super modifier')
text += 'Super+'
if key.isupper():
modifiers = modifiers | QtCore.Qt.ShiftModifier
print('[df2] has shift modifier')
text += 'Shift+'
# Try to extract the original key
try:
if key in mpl2qtkey:
key_ = mpl2qtkey[key]
else:
key_ = ord(key.upper()) # Qt works with uppercase keys
text += key.upper()
except Exception as ex:
print('[df2] ERROR key=%r' % key)
print('[df2] ERROR %r' % ex)
raise
autorep = False # default false
count = 1 # default 1
text = QtCore.QString(text) # The text is somewhat arbitrary
# Create the QEvent
print('----------------')
print('[df2] Create event')
print('[df2] type_ = %r' % type_)
print('[df2] text = %r' % text)
print('[df2] modifiers = %r' % modifiers)
print('[df2] autorep = %r' % autorep)
print('[df2] count = %r ' % count)
print('----------------')
qevent = QtGui.QKeyEvent(type_, key_, modifiers, text, autorep, count)
return qevent
def test_build_qkeyevent():
import draw_func2 as df2
qtwin = df2.QT4_WINS[0]
# This reconstructs an test mplevent
canvas = df2.figure(1).canvas
mevent = matplotlib.backend_bases.KeyEvent('key_press_event', canvas, u'ctrl+p', x=672, y=230.0)
qevent = df2.convert_keypress_event_mpl_to_qt4(mevent)
app = qtwin.backend.app
app.sendEvent(qtwin.ui, mevent)
#type_ = QtCore.QEvent.Type(QtCore.QEvent.KeyPress) # The type should always be KeyPress
#text = QtCore.QString('A') # The text is somewhat arbitrary
#modifiers = QtCore.Qt.NoModifier # initialize to no modifiers
#modifiers = modifiers | QtCore.Qt.ControlModifier
#modifiers = modifiers | QtCore.Qt.AltModifier
#key_ = ord('A') # Qt works with uppercase keys
#autorep = False # default false
#count = 1 # default 1
#qevent = QtGui.QKeyEvent(type_, key_, modifiers, text, autorep, count)
return qevent
# This actually doesn't matter
def on_key_press_event(event):
'redirects keypress events to main window'
global QT4_WINS
print('[df2] %r' % event)
print('[df2] %r' % str(event.__dict__))
for qtwin in QT4_WINS:
qevent = convert_keypress_event_mpl_to_qt4(event)
app = qtwin.backend.app
print('[df2] attempting to send qevent to qtwin')
app.sendEvent(qtwin, qevent)
# TODO: FINISH ME
#PyQt4.QtGui.QKeyEvent
#qtwin.keyPressEvent(event)
#fig.canvas.manager.window.keyPressEvent()
def customize_figure(fig, docla):
if not 'user_stat_list' in fig.__dict__.keys() or docla:
fig.user_stat_list = []
fig.user_notes = []
# We dont need to catch keypress events because you just need to set it as
# an application level shortcut
# Catch key press events
#key_event_cbid = fig.__dict__.get('key_event_cbid', None)
#if key_event_cbid is not None:
#fig.canvas.mpl_disconnect(key_event_cbid)
#fig.key_event_cbid = fig.canvas.mpl_connect('key_press_event', on_key_press_event)
fig.df2_closed = False
def gcf():
if plotWidget is not None:
#print('is plotwidget visible = %r' % plotWidget.isVisible())
fig = plotWidget.figure
return fig
return plt.gcf()
def gca():
if plotWidget is not None:
#print('is plotwidget visible = %r' % plotWidget.isVisible())
axes_list = plotWidget.figure.get_axes()
current = 0
ax = axes_list[current]
return ax
return plt.gca()
def cla():
return plt.cla()
def clf():
return plt.clf()
def get_fig(fnum=None):
printDBG('[df2] get_fig(fnum=%r)' % fnum)
fig_kwargs = dict(figsize=FIGSIZE, dpi=DPI)
if plotWidget is not None:
return gcf()
if fnum is None:
try:
fig = gcf()
except Exception as ex:
printDBG('[df2] get_fig(): ex=%r' % ex)
fig = plt.figure(**fig_kwargs)
fnum = fig.number
else:
try:
fig = plt.figure(fnum, **fig_kwargs)
except Exception as ex:
print(repr(ex))
warnings.warn(repr(ex))
fig = gcf()
return fig
def get_ax(fnum=None, pnum=None):
figure(fnum=fnum, pnum=pnum)
ax = gca()
return ax
def figure(fnum=None, docla=False, title=None, pnum=(1, 1, 1), figtitle=None,
doclf=False, **kwargs):
'''
fnum = fignum = figure number
pnum = plotnum = plot tuple
'''
#matplotlib.pyplot.xkcd()
fig = get_fig(fnum)
axes_list = fig.get_axes()
# Ensure my customized settings
customize_figure(fig, docla)
# Convert pnum to tuple format
if tools.is_int(pnum):
nr = pnum // 100
nc = pnum // 10 - (nr * 10)
px = pnum - (nr * 100) - (nc * 10)
pnum = (nr, nc, px)
if doclf: # a bit hacky. Need to rectify docla and doclf
fig.clf()
# Get the subplot
if docla or len(axes_list) == 0:
printDBG('[df2] *** NEW FIGURE %r.%r ***' % (fnum, pnum))
if not pnum is None:
#ax = plt.subplot(*pnum)
ax = fig.add_subplot(*pnum)
ax.cla()
else:
ax = gca()
else:
printDBG('[df2] *** OLD FIGURE %r.%r ***' % (fnum, pnum))
if not pnum is None:
ax = plt.subplot(*pnum) # fig.add_subplot fails here
#ax = fig.add_subplot(*pnum)
else:
ax = gca()
#ax = axes_list[0]
# Set the title
if not title is None:
ax = gca()
ax.set_title(title, fontproperties=FONTS.axtitle)
# Add title to figure
if figtitle is None and pnum == (1, 1, 1):
figtitle = title
if not figtitle is None:
set_figtitle(figtitle, incanvas=False)
return fig
def plot_pdf(data, draw_support=True, scale_to=None, label=None, color=0,
nYTicks=3):
fig = gcf()
ax = gca()
data = np.array(data)
if len(data) == 0:
warnstr = '[df2] ! Warning: len(data) = 0. Cannot visualize pdf'
warnings.warn(warnstr)
draw_text(warnstr)
return
bw_factor = .05
if isinstance(color, (int, float)):
colorx = color
line_color = plt.get_cmap('gist_rainbow')(colorx)
else:
line_color = color
# Estimate a pdf
data_pdf = estimate_pdf(data, bw_factor)
# Get probability of seen data
prob_x = data_pdf(data)
# Get probability of unseen data data
x_data = np.linspace(0, data.max(), 500)
y_data = data_pdf(x_data)
# Scale if requested
if not scale_to is None:
scale_factor = scale_to / y_data.max()
y_data *= scale_factor
prob_x *= scale_factor
#Plot the actual datas on near the bottom perterbed in Y
if draw_support:
pdfrange = prob_x.max() - prob_x.min()
perb = (np.random.randn(len(data))) * pdfrange / 30.
preb_y_data = np.abs([pdfrange / 50. for _ in data] + perb)
ax.plot(data, preb_y_data, 'o', color=line_color, figure=fig, alpha=.1)
# Plot the pdf (unseen data)
ax.plot(x_data, y_data, color=line_color, label=label)
if nYTicks is not None:
yticks = np.linspace(min(y_data), max(y_data), nYTicks)
ax.set_yticks(yticks)
def estimate_pdf(data, bw_factor):
try:
data_pdf = scipy.stats.gaussian_kde(data, bw_factor)
data_pdf.covariance_factor = bw_factor
except Exception as ex:
print('[df2] ! Exception while estimating kernel density')
print('[df2] data=%r' % (data,))
print('[df2] ex=%r' % (ex,))
raise
return data_pdf
def show_histogram(data, bins=None, **kwargs):
print('[df2] show_histogram()')
dmin = int(np.floor(data.min()))
dmax = int(np.ceil(data.max()))
if bins is None:
bins = dmax - dmin
fig = figure(**kwargs)
ax = gca()
ax.hist(data, bins=bins, range=(dmin, dmax))
#help(np.bincount)
fig.show()
def show_signature(sig, **kwargs):
fig = figure(**kwargs)
plt.plot(sig)
fig.show()
def plot_stems(x_data=None, y_data=None):
if y_data is not None and x_data is None:
x_data = np.arange(len(y_data))
pass
if len(x_data) != len(y_data):
print('[df2] WARNING plot_stems(): len(x_data)!=len(y_data)')
if len(x_data) == 0:
print('[df2] WARNING plot_stems(): len(x_data)=len(y_data)=0')
x_data_ = np.array(x_data)
y_data_ = np.array(y_data)
x_data_sort = x_data_[y_data_.argsort()[::-1]]
y_data_sort = y_data_[y_data_.argsort()[::-1]]
markerline, stemlines, baseline = pylab.stem(x_data_sort, y_data_sort, linefmt='-')
pylab.setp(markerline, 'markerfacecolor', 'b')
pylab.setp(baseline, 'linewidth', 0)
ax = gca()
ax.set_xlim(min(x_data) - 1, max(x_data) + 1)
ax.set_ylim(min(y_data) - 1, max(max(y_data), max(x_data)) + 1)
def plot_sift_signature(sift, title='', fnum=None, pnum=None):
figure(fnum=fnum, pnum=pnum)
ax = gca()
plot_bars(sift, 16)
ax.set_xlim(0, 128)
ax.set_ylim(0, 256)
space_xticks(9, 16)
space_yticks(5, 64)
ax.set_title(title)
dark_background(ax)
return ax
def dark_background(ax=None, doubleit=False):
if ax is None:
ax = gca()
xy, width, height = _axis_xy_width_height(ax)
if doubleit:
halfw = (doubleit) * (width / 2)
halfh = (doubleit) * (height / 2)
xy = (xy[0] - halfw, xy[1] - halfh)
width *= (doubleit + 1)
height *= (doubleit + 1)
rect = matplotlib.patches.Rectangle(xy, width, height, lw=0, zorder=0)
rect.set_clip_on(True)
rect.set_fill(True)
rect.set_color(BLACK * .9)
rect = ax.add_patch(rect)
def space_xticks(nTicks=9, spacing=16, ax=None):
if ax is None:
ax = gca()
ax.set_xticks(np.arange(nTicks) * spacing)
small_xticks(ax)
def space_yticks(nTicks=9, spacing=32, ax=None):
if ax is None:
ax = gca()
ax.set_yticks(np.arange(nTicks) * spacing)
small_yticks(ax)
def small_xticks(ax=None):
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(8)
def small_yticks(ax=None):
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(8)
def plot_bars(y_data, nColorSplits=1):
width = 1
nDims = len(y_data)
nGroup = nDims // nColorSplits
ori_colors = distinct_colors(nColorSplits)
x_data = np.arange(nDims)
ax = gca()
for ix in xrange(nColorSplits):
xs = np.arange(nGroup) + (nGroup * ix)
color = ori_colors[ix]
x_dat = x_data[xs]
y_dat = y_data[xs]
ax.bar(x_dat, y_dat, width, color=color, edgecolor=np.array(color) * .8)
def phantom_legend_label(label, color, loc='upper right'):
'adds a legend label without displaying an actor'
pass
#phantom_actor = plt.Circle((0, 0), 1, fc=color, prop=FONTS.legend, loc=loc)
#plt.legend(phant_actor, label, framealpha=.2)
#plt.legend(*zip(*legend_tups), framealpha=.2)
#legend_tups = []
#legend_tups.append((phantom_actor, label))
def legend(loc='upper right'):
ax = gca()
ax.legend(prop=FONTS.legend, loc=loc)
def plot_histpdf(data, label=None, draw_support=False, nbins=10):
freq, _ = plot_hist(data, nbins=nbins)
plot_pdf(data, draw_support=draw_support, scale_to=freq.max(), label=label)
def plot_hist(data, bins=None, nbins=10, weights=None):
if isinstance(data, list):
data = np.array(data)
if bins is None:
dmin = data.min()
dmax = data.max()
bins = dmax - dmin
ax = gca()
freq, bins_, patches = ax.hist(data, bins=nbins, weights=weights, range=(dmin, dmax))
return freq, bins_
def variation_trunctate(data):
ax = gca()
data = np.array(data)
if len(data) == 0:
warnstr = '[df2] ! Warning: len(data) = 0. Cannot variation_truncate'
warnings.warn(warnstr)
return
trunc_max = data.mean() + data.std() * 2
trunc_min = np.floor(data.min())
ax.set_xlim(trunc_min, trunc_max)
#trunc_xticks = np.linspace(0, int(trunc_max),11)
#trunc_xticks = trunc_xticks[trunc_xticks >= trunc_min]
#trunc_xticks = np.append([int(trunc_min)], trunc_xticks)
#no_zero_yticks = ax.get_yticks()[ax.get_yticks() > 0]
#ax.set_xticks(trunc_xticks)
#ax.set_yticks(no_zero_yticks)
#_----------------- HELPERS ^^^ ---------
# ---- IMAGE CREATION FUNCTIONS ----
@tools.debug_exception
def draw_sift(desc, kp=None):
# TODO: There might be a divide by zero warning in here.
''' desc = np.random.rand(128)
desc = desc / np.sqrt((desc**2).sum())
desc = np.round(desc * 255) '''
# This is draw, because it is an overlay
ax = gca()
tau = 2 * np.pi
DSCALE = .25
XYSCALE = .5
XYSHIFT = -.75
ORI_SHIFT = 0 # -tau #1/8 * tau
# SIFT CONSTANTS
NORIENTS = 8
NX = 4
NY = 4
NBINS = NX * NY
def cirlce_rad2xy(radians, mag):
return np.cos(radians) * mag, np.sin(radians) * mag
discrete_ori = (np.arange(0, NORIENTS) * (tau / NORIENTS) + ORI_SHIFT)
# Build list of plot positions
# Build an "arm" for each sift measurement
arm_mag = desc / 255.0
arm_ori = np.tile(discrete_ori, (NBINS, 1)).flatten()
# The offset x,y's for each sift measurment
arm_dxy = np.array(zip(*cirlce_rad2xy(arm_ori, arm_mag)))
yxt_gen = itertools.product(xrange(NY), xrange(NX), xrange(NORIENTS))
yx_gen = itertools.product(xrange(NY), xrange(NX))
# Transform the drawing of the SIFT descriptor to the its elliptical patch
axTrans = ax.transData
kpTrans = None
if kp is None:
kp = [0, 0, 1, 0, 1]
kp = np.array(kp)
kpT = kp.T
x, y, a, c, d = kpT[:, 0]
kpTrans = Affine2D([( a, 0, x),
( c, d, y),
( 0, 0, 1)])
axTrans = ax.transData
# Draw 8 directional arms in each of the 4x4 grid cells
arrow_patches = []
arrow_patches2 = []
for y, x, t in yxt_gen:
index = y * NX * NORIENTS + x * NORIENTS + t
(dx, dy) = arm_dxy[index]
arw_x = x * XYSCALE + XYSHIFT
arw_y = y * XYSCALE + XYSHIFT
arw_dy = dy * DSCALE * 1.5 # scale for viz Hack
arw_dx = dx * DSCALE * 1.5
#posA = (arw_x, arw_y)
#posB = (arw_x+arw_dx, arw_y+arw_dy)
_args = [arw_x, arw_y, arw_dx, arw_dy]
_kwargs = dict(head_width=.0001, transform=kpTrans, length_includes_head=False)
arrow_patches += [FancyArrow(*_args, **_kwargs)]
arrow_patches2 += [FancyArrow(*_args, **_kwargs)]
# Draw circles around each of the 4x4 grid cells
circle_patches = []
for y, x in yx_gen:
circ_xy = (x * XYSCALE + XYSHIFT, y * XYSCALE + XYSHIFT)
circ_radius = DSCALE
circle_patches += [Circle(circ_xy, circ_radius, transform=kpTrans)]
# Efficiently draw many patches with PatchCollections
circ_collection = PatchCollection(circle_patches)
circ_collection.set_facecolor('none')
circ_collection.set_transform(axTrans)
circ_collection.set_edgecolor(BLACK)
circ_collection.set_alpha(.5)
# Body of arrows
arw_collection = PatchCollection(arrow_patches)
arw_collection.set_transform(axTrans)
arw_collection.set_linewidth(.5)
arw_collection.set_color(RED)
arw_collection.set_alpha(1)
# Border of arrows
arw_collection2 = matplotlib.collections.PatchCollection(arrow_patches2)
arw_collection2.set_transform(axTrans)
arw_collection2.set_linewidth(1)
arw_collection2.set_color(BLACK)
arw_collection2.set_alpha(1)
# Add artists to axes
ax.add_collection(circ_collection)
ax.add_collection(arw_collection2)
ax.add_collection(arw_collection)
def feat_scores_to_color(fs, cmap_='hot'):
assert len(fs.shape) == 1, 'score must be 1d'
cmap = plt.get_cmap(cmap_)
mins = fs.min()
rnge = fs.max() - mins
if rnge == 0:
return [cmap(.5) for fx in xrange(len(fs))]
score2_01 = lambda score: .1 + .9 * (float(score) - mins) / (rnge)
colors = [cmap(score2_01(score)) for score in fs]
return colors
def colorbar(scalars, colors):
'adds a color bar next to the axes'
orientation = ['vertical', 'horizontal'][0]
TICK_FONTSIZE = 8
# Put colors and scalars in correct order
sorted_scalars = sorted(scalars)
sorted_colors = [x for (y, x) in sorted(zip(scalars, colors))]
# Make a listed colormap and mappable object
listed_cmap = mpl.colors.ListedColormap(sorted_colors)
sm = plt.cm.ScalarMappable(cmap=listed_cmap)
sm.set_array(sorted_scalars)
# Use mapable object to create the colorbar
cb = plt.colorbar(sm, orientation=orientation)
# Add the colorbar to the correct label
axis = cb.ax.xaxis if orientation == 'horizontal' else cb.ax.yaxis
position = 'bottom' if orientation == 'horizontal' else 'right'
axis.set_ticks_position(position)
axis.set_ticks([0, .5, 1])
cb.ax.tick_params(labelsize=TICK_FONTSIZE)
def draw_lines2(kpts1, kpts2, fm=None, fs=None, kpts2_offset=(0, 0),
color_list=None, **kwargs):
if not DISTINCT_COLORS:
color_list = None
# input data
if not SHOW_LINES:
return
if fm is None: # assume kpts are in director correspondence
assert kpts1.shape == kpts2.shape
if len(fm) == 0:
return
ax = gca()
woff, hoff = kpts2_offset
# Draw line collection
kpts1_m = kpts1[fm[:, 0]].T
kpts2_m = kpts2[fm[:, 1]].T
xxyy_iter = iter(zip(kpts1_m[0],
kpts2_m[0] + woff,
kpts1_m[1],
kpts2_m[1] + hoff))
if color_list is None:
if fs is None: # Draw with solid color
color_list = [ LINE_COLOR for fx in xrange(len(fm))]
else: # Draw with colors proportional to score difference
color_list = feat_scores_to_color(fs)
segments = [((x1, y1), (x2, y2)) for (x1, x2, y1, y2) in xxyy_iter]
linewidth = [LINE_WIDTH for fx in xrange(len(fm))]
line_alpha = LINE_ALPHA
if LINE_ALPHA_OVERRIDE is not None:
line_alpha = LINE_ALPHA_OVERRIDE
line_group = LineCollection(segments, linewidth, color_list, alpha=line_alpha)
#plt.colorbar(line_group, ax=ax)
ax.add_collection(line_group)
#figure(100)
#plt.hexbin(x,y, cmap=plt.cm.YlOrRd_r)
def draw_kpts(kpts, *args, **kwargs):
draw_kpts2(kpts, *args, **kwargs)
def draw_kpts2(kpts, offset=(0, 0), ell=SHOW_ELLS, pts=False, pts_color=ORANGE,
pts_size=POINT_SIZE, ell_alpha=ELL_ALPHA,
ell_linewidth=ELL_LINEWIDTH, ell_color=ELL_COLOR,
color_list=None, rect=None, arrow=False, **kwargs):
if not DISTINCT_COLORS:
color_list = None
printDBG('drawkpts2: Drawing Keypoints! ell=%r pts=%r' % (ell, pts))
# get matplotlib info
ax = gca()
pltTrans = ax.transData
ell_actors = []
# data
kpts = np.array(kpts)
kptsT = kpts.T
x = kptsT[0, :] + offset[0]
y = kptsT[1, :] + offset[1]
printDBG('[df2] draw_kpts()----------')
printDBG('[df2] draw_kpts() ell=%r pts=%r' % (ell, pts))
printDBG('[df2] draw_kpts() drawing kpts.shape=%r' % (kpts.shape,))
if rect is None:
rect = ell
rect = False
if pts is True:
rect = False
if ell or rect:
printDBG('[df2] draw_kpts() drawing ell kptsT.shape=%r' % (kptsT.shape,))
# We have the transformation from unit circle to ellipse here. (inv(A))
a = kptsT[2]
b = np.zeros(len(a))
c = kptsT[3]
d = kptsT[4]
kpts_iter = izip(x, y, a, b, c, d)
aff_list = [Affine2D([( a_, b_, x_),
( c_, d_, y_),
( 0, 0, 1)])
for (x_, y_, a_, b_, c_, d_) in kpts_iter]
patch_list = []
ell_actors = [Circle( (0, 0), 1, transform=aff) for aff in aff_list]
if ell:
patch_list += ell_actors
if rect:
rect_actors = [Rectangle( (-1, -1), 2, 2, transform=aff) for aff in aff_list]
patch_list += rect_actors
if arrow:
_kwargs = dict(head_width=.01, length_includes_head=False)
arrow_actors1 = [FancyArrow(0, 0, 0, 1, transform=aff, **_kwargs) for aff in aff_list]
arrow_actors2 = [FancyArrow(0, 0, 1, 0, transform=aff, **_kwargs) for aff in aff_list]
patch_list += arrow_actors1
patch_list += arrow_actors2
ellipse_collection = matplotlib.collections.PatchCollection(patch_list)
ellipse_collection.set_facecolor('none')
ellipse_collection.set_transform(pltTrans)
if ELL_ALPHA_OVERRIDE is not None:
ell_alpha = ELL_ALPHA_OVERRIDE
ellipse_collection.set_alpha(ell_alpha)
ellipse_collection.set_linewidth(ell_linewidth)
if not color_list is None:
ell_color = color_list
if ell_color == 'distinct':
ell_color = distinct_colors(len(kpts))
ellipse_collection.set_edgecolor(ell_color)
ax.add_collection(ellipse_collection)
if pts:
printDBG('[df2] draw_kpts() drawing pts x.shape=%r y.shape=%r' % (x.shape, y.shape))
if color_list is None:
color_list = [pts_color for _ in xrange(len(x))]
ax.autoscale(enable=False)
ax.scatter(x, y, c=color_list, s=2 * pts_size, marker='o', edgecolor='none')
#ax.autoscale(enable=False)
#ax.plot(x, y, linestyle='None', marker='o', markerfacecolor=pts_color, markersize=pts_size, markeredgewidth=0)
# ---- CHIP DISPLAY COMMANDS ----
def imshow(img, fnum=None, title=None, figtitle=None, pnum=None,
interpolation='nearest', **kwargs):
'other interpolations = nearest, bicubic, bilinear'
#printDBG('[df2] ----- IMSHOW ------ ')
#printDBG('[***df2.imshow] fnum=%r pnum=%r title=%r *** ' % (fnum, pnum, title))
#printDBG('[***df2.imshow] img.shape = %r ' % (img.shape,))
#printDBG('[***df2.imshow] img.stats = %r ' % (helpers.printable_mystats(img),))
fig = figure(fnum=fnum, pnum=pnum, title=title, figtitle=figtitle, **kwargs)
ax = gca()
if not DARKEN is None:
imgdtype = img.dtype
img = np.array(img, dtype=float) * DARKEN
img = np.array(img, dtype=imgdtype)
plt_imshow_kwargs = {
'interpolation': interpolation,
#'cmap': plt.get_cmap('gray'),
'vmin': 0,
'vmax': 255,
}
try:
if len(img.shape) == 3 and img.shape[2] == 3:
# img is in a color format
imgBGR = img
if imgBGR.dtype == np.float64:
if imgBGR.max() <= 1:
imgBGR = np.array(imgBGR, dtype=np.float32)
else:
imgBGR = np.array(imgBGR, dtype=np.uint8)
imgRGB = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2RGB)
ax.imshow(imgRGB, **plt_imshow_kwargs)
elif len(img.shape) == 2:
# img is in grayscale
imgGRAY = img
ax.imshow(imgGRAY, cmap=plt.get_cmap('gray'), **plt_imshow_kwargs)
else:
raise Exception('unknown image format')
except TypeError as te:
print('[df2] imshow ERROR %r' % te)
raise
except Exception as ex:
print('[df2] img.dtype = %r' % (img.dtype,))
print('[df2] type(img) = %r' % (type(img),))
print('[df2] img.shape = %r' % (img.shape,))
print('[df2] imshow ERROR %r' % ex)
raise
#plt.set_cmap('gray')
ax.set_xticks([])
ax.set_yticks([])
#ax.set_autoscale(False)
#try:
#if pnum == 111:
#fig.tight_layout()
#except Exception as ex:
#print('[df2] !! Exception durring fig.tight_layout: '+repr(ex))
#raise
return fig, ax
def get_num_channels(img):
ndims = len(img.shape)
if ndims == 2:
nChannels = 1
elif ndims == 3 and img.shape[2] == 3:
nChannels = 3
elif ndims == 3 and img.shape[2] == 1:
nChannels = 1
else:
raise Exception('Cannot determine number of channels')
return nChannels
def stack_images(img1, img2, vert=None):
nChannels = get_num_channels(img1)
nChannels2 = get_num_channels(img2)
assert nChannels == nChannels2
(h1, w1) = img1.shape[0: 2] # get chip dimensions
(h2, w2) = img2.shape[0: 2]
woff, hoff = 0, 0
vert_wh = max(w1, w2), h1 + h2
horiz_wh = w1 + w2, max(h1, h2)
if vert is None:
# Display the orientation with the better (closer to 1) aspect ratio
vert_ar = max(vert_wh) / min(vert_wh)
horiz_ar = max(horiz_wh) / min(horiz_wh)
vert = vert_ar < horiz_ar
if vert:
wB, hB = vert_wh
hoff = h1
else:
wB, hB = horiz_wh
woff = w1
# concatentate images
if nChannels == 3:
imgB = np.zeros((hB, wB, 3), np.uint8)
imgB[0:h1, 0:w1, :] = img1
imgB[hoff:(hoff + h2), woff:(woff + w2), :] = img2
elif nChannels == 1:
imgB = np.zeros((hB, wB), np.uint8)
imgB[0:h1, 0:w1] = img1
imgB[hoff:(hoff + h2), woff:(woff + w2)] = img2
return imgB, woff, hoff
def show_chipmatch2(rchip1, rchip2, kpts1, kpts2, fm=None, fs=None, title=None,
vert=None, fnum=None, pnum=None, **kwargs):
'''Draws two chips and the feature matches between them. feature matches
kpts1 and kpts2 use the (x,y,a,c,d)
'''
printDBG('[df2] draw_matches2() fnum=%r, pnum=%r' % (fnum, pnum))
# get matching keypoints + offset
(h1, w1) = rchip1.shape[0:2] # get chip (h, w) dimensions
(h2, w2) = rchip2.shape[0:2]
# Stack the compared chips
match_img, woff, hoff = stack_images(rchip1, rchip2, vert)
xywh1 = (0, 0, w1, h1)
xywh2 = (woff, hoff, w2, h2)
# Show the stacked chips
fig, ax = imshow(match_img, title=title, fnum=fnum, pnum=pnum)
# Overlay feature match nnotations
draw_fmatch(xywh1, xywh2, kpts1, kpts2, fm, fs, **kwargs)
return ax, xywh1, xywh2
# draw feature match
def draw_fmatch(xywh1, xywh2, kpts1, kpts2, fm, fs=None, lbl1=None, lbl2=None,
fnum=None, pnum=None, rect=False, colorbar_=True, **kwargs):
'''Draws the matching features. This is draw because it is an overlay
xywh1 - location of rchip1 in the axes
xywh2 - location or rchip2 in the axes
'''
if fm is None:
assert kpts1.shape == kpts2.shape, 'shapes different or fm not none'
fm = np.tile(np.arange(0, len(kpts1)), (2, 1)).T
pts = kwargs.get('draw_pts', False)
ell = kwargs.get('draw_ell', True)
lines = kwargs.get('draw_lines', True)
ell_alpha = kwargs.get('ell_alpha', .4)
nMatch = len(fm)
#printDBG('[df2.draw_fnmatch] nMatch=%r' % nMatch)
x1, y1, w1, h1 = xywh1
x2, y2, w2, h2 = xywh2
offset2 = (x2, y2)
# Custom user label for chips 1 and 2
if lbl1 is not None:
absolute_lbl(x1 + w1, y1, lbl1)
if lbl2 is not None:
absolute_lbl(x2 + w2, y2, lbl2)
# Plot the number of matches
if kwargs.get('show_nMatches', False):
upperleft_text('#match=%d' % nMatch)
# Draw all keypoints in both chips as points
if kwargs.get('all_kpts', False):
all_args = dict(ell=False, pts=pts, pts_color=GREEN, pts_size=2,
ell_alpha=ell_alpha, rect=rect)
all_args.update(kwargs)
draw_kpts2(kpts1, **all_args)
draw_kpts2(kpts2, offset=offset2, **all_args)
# Draw Lines and Ellipses and Points oh my
if nMatch > 0:
colors = [kwargs['colors']] * nMatch if 'colors' in kwargs else distinct_colors(nMatch)
if fs is not None:
colors = feat_scores_to_color(fs, 'hot')
acols = add_alpha(colors)
# Helper functions
def _drawkpts(**_kwargs):
_kwargs.update(kwargs)
fxs1 = fm[:, 0]
fxs2 = fm[:, 1]
draw_kpts2(kpts1[fxs1], rect=rect, **_kwargs)
draw_kpts2(kpts2[fxs2], offset=offset2, rect=rect, **_kwargs)
def _drawlines(**_kwargs):
_kwargs.update(kwargs)
draw_lines2(kpts1, kpts2, fm, fs, kpts2_offset=offset2, **_kwargs)
# User helpers
if ell:
_drawkpts(pts=False, ell=True, color_list=colors)
if pts:
_drawkpts(pts_size=8, pts=True, ell=False, pts_color=BLACK)
_drawkpts(pts_size=6, pts=True, ell=False, color_list=acols)
if lines:
_drawlines(color_list=colors)
else:
draw_boxedX(xywh2)
if fs is not None and colorbar_ and 'colors' in vars() and colors is not None:
colorbar(fs, colors)
#legend()
return None
def draw_boxedX(xywh, color=RED, lw=2, alpha=.5, theta=0):
'draws a big red x. redx'
ax = gca()
x1, y1, w, h = xywh
x2, y2 = x1 + w, y1 + h
segments = [((x1, y1), (x2, y2)),
((x1, y2), (x2, y1))]
trans = Affine2D()
trans.rotate(theta)
trans = trans + ax.transData
width_list = [lw] * len(segments)
color_list = [color] * len(segments)
line_group = LineCollection(segments, width_list, color_list, alpha=alpha,
transOffset=trans)
ax.add_collection(line_group)
def disconnect_callback(fig, callback_type, **kwargs):
#print('[df2] disconnect %r callback' % callback_type)
axes = kwargs.get('axes', [])
for ax in axes:
ax._hs_viewtype = ''
cbid_type = callback_type + '_cbid'
cbfn_type = callback_type + '_func'
cbid = fig.__dict__.get(cbid_type, None)
cbfn = fig.__dict__.get(cbfn_type, None)
if cbid is not None:
fig.canvas.mpl_disconnect(cbid)
else:
cbfn = None
fig.__dict__[cbid_type] = None
return cbid, cbfn
def connect_callback(fig, callback_type, callback_fn):
#print('[df2] register %r callback' % callback_type)
if callback_fn is None:
return
cbid_type = callback_type + '_cbid'
cbfn_type = callback_type + '_func'
fig.__dict__[cbid_type] = fig.canvas.mpl_connect(callback_type, callback_fn)
fig.__dict__[cbfn_type] = callback_fn
| apache-2.0 | -1,678,968,028,501,710,800 | 31.697605 | 119 | 0.588353 | false |
silas/rock | rock/text.py | 1 | 1235 | from __future__ import unicode_literals
def _(text):
return text.strip('\n')
USAGE = _("""
Usage: rock [--help] [--env=ENV] [--path=PATH] [--runtime=RUNTIME] command
""")
HELP = _("""
--help show help message
--verbose show script while running
--dry-run show script without running
--version show version
project:
--env=ENV set env
--path=PATH set path
--runtime=RUNTIME set runtime
commands:
build run build
test run tests
run run in environment
clean clean project files
other commands:
config show project configuration
env show evaluable environment variables
init generates project skeleton
runtime show installed runtimes
""")
CONFIG_USAGE = _("""
Usage: rock config [--format=FORMAT]
""")
CONFIG_HELP = _("""
--help show help message
--format set output format (json, yaml)
""")
ENV_USAGE = _("""
Usage: rock env
""")
ENV_HELP = _("""
--help show help message
""")
RUNTIME_USAGE = _("""
Usage: rock runtime
""")
RUNTIME_HELP = _("""
--help show help message
""")
| mit | 5,519,456,527,590,050,000 | 20.293103 | 74 | 0.545749 | false |
setsid/yacron | yacron/time.py | 1 | 5052 | """
This file is part of yacron.
Copyright (C) 2016 Vadim Kuznetsov <vimusov@gmail.com>
yacron is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
yacron is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with yacron. If not, see <http://www.gnu.org/licenses/>.
"""
class CronTime(object):
"""
Parse and store scheduled time.
"""
def __init__(self, minutes, hours, weekdays):
"""
Parse and store the minutes, hours and weekdays values.
:param minutes: Minutes (str)
:param hours: Hours (str)
:param weekdays: Weekdays (str)
:raise ValueError if any of the values is invalid
"""
self._minutes = self._parse_value(0, minutes, 59)
self._hours = self._parse_value(0, hours, 23)
# slashes are unacceptable in weekdays value
self._weekdays = self._parse_value(1, weekdays, 7, slash_acceptable=False)
@property
def minutes(self):
return self._minutes
@property
def hours(self):
return self._hours
@property
def weekdays(self):
return self._weekdays
def _check_value_range(self, min_value, value, max_value):
"""
Check is value in range.
:param min_value: Minimal valid value
:param value: Value
:param max_value: Maximum valid value
:return True if the value is in range
:raise ValueError if the value is out of range
"""
if not (min_value <= value <= max_value):
raise ValueError("invalid value '{0:d}', must be in [{1:d}..{2:d}]".format(value, min_value, max_value))
return True
def _check_special_chars(self, value):
"""
Check special characters in the value:
1) value can not contains more than one '*' or '/' or '-' characters;
2) special characters can not be mixed (there can be the only one except ',');
:param value: Value.
:raise ValueError if any invalid sequence of special characters found in the value.
"""
all_count = value.count('*')
slash_count = value.count('/')
comma_count = value.count(',')
hyphen_count = value.count('-')
is_invalid = any((
all_count > 1,
slash_count > 1,
hyphen_count > 1,
all_count and (slash_count or comma_count or hyphen_count),
slash_count and (all_count or comma_count or hyphen_count),
comma_count and (all_count or slash_count or hyphen_count),
hyphen_count and (all_count or slash_count or comma_count),
))
if is_invalid:
raise ValueError("invalid format in value '{0:s}'".format(value))
def _parse_value(self, min_value, value, max_value, slash_acceptable=True):
"""
Parse and check a value.
:param min_value: Minimal valid value
:param value: Value
:param max_value: Maximum valid value
:param slash_acceptable: Slash is valid in the value
:return: List of values.
:raise ValueError if parsing failed
"""
self._check_special_chars(value)
if value == '*':
return list(range(min_value, max_value + 1))
if value.startswith('/'):
if not slash_acceptable:
raise ValueError("value '{0:s}' can not contains slash".format(value))
divisor = int(value[1:])
self._check_value_range(min_value, divisor, max_value)
return [n for n in range(min_value, max_value + 1) if n % divisor == 0]
if '-' in value:
start_value, stop_value = map(int, value.split('-'))
self._check_value_range(min_value, start_value, max_value)
self._check_value_range(min_value, stop_value, max_value)
if start_value >= stop_value:
raise ValueError("start value can not be greater or equal to stop value")
return list(range(start_value, stop_value + 1))
if ',' in value:
return [n for n in map(int, value.split(',')) if self._check_value_range(min_value, n, max_value)]
return [int(value)]
def check_time(self, cur_time):
"""
Compare parsed time and current time.
:param cur_time: Current time (datetime).
:return: True if current time matches with parser time and False otherwise
"""
return all((
cur_time.minute in self._minutes,
cur_time.hour in self._hours,
cur_time.isoweekday() in self._weekdays,
))
| gpl-3.0 | 7,801,523,714,869,108,000 | 35.345324 | 116 | 0.598773 | false |
daymer/xWIKI_Karma | Migration_to_xWiki/migration_sample.py | 1 | 1532 | from PythonConfluenceAPI import ConfluenceAPI
import Configuration
import CustomModules.SQL_Connector
from Configuration import MySQLConfig, MediaWIKIConfig
from Migration_to_xWiki.Users_association import Users
from CustomModules import Mechanics
from CustomModules.Mechanics import XWikiClient, MysqlConnector, MigrationAssistant
target_pool = 'Migration pool'
parent = 'Migration pool'
MySQLconfig_INSTANCE = MySQLConfig()
MysqlConnector_INSTANCE = MysqlConnector(MySQLconfig_INSTANCE)
SQLConfig = Configuration.SQLConfig()
xWikiConfig = Configuration.XWikiConfig(target_pool)
xWikiClient = XWikiClient(xWikiConfig.api_root, xWikiConfig.auth_user, xWikiConfig.auth_pass)
ConfluenceConfig_instance = Configuration.ConfluenceConfig()
confluenceAPI_instance = ConfluenceAPI(username=ConfluenceConfig_instance.USER, password=ConfluenceConfig_instance.PASS, uri_base=ConfluenceConfig_instance.ULR)
MediaWIKIConfig = MediaWIKIConfig()
Migrator = MigrationAssistant(ConfluenceConfig=ConfluenceConfig_instance, MediaWIKIConfig=MediaWIKIConfig, xWikiConfig=xWikiConfig)
UserList = Users()
SQLConnector_instance = CustomModules.SQL_Connector.SQLConnector(SQLConfig)
title = 'Hyper-V Basics'
platform = 'Confluence'
result = Mechanics.migrate_page(title, platform, target_pool, parent, MySQLconfig_INSTANCE,
MysqlConnector_INSTANCE, SQLConfig, SQLConnector_instance, ConfluenceConfig_instance,
MediaWIKIConfig, xWikiConfig, xWikiClient, Migrator, UserList)
print(result)
| apache-2.0 | 4,877,879,177,620,873,000 | 48.419355 | 160 | 0.81201 | false |
UCSC-iGEM-2016/taris_controller | taris_controller/taris_sensor.py | 1 | 9944 | #!/usr/bin/python
from __future__ import print_function
import io # used to create file streams
import fcntl # used to access I2C parameters like addresses
import sys
import time # used for sleep delay and timestamps
class Taris_Sensor():
''' This object holds all required interface data for the Atlas Scientific \
EZO pH and RTD sensors. Built off of the base library, with new functions \
added for calibration and additional testing. '''
def __init__(self, address, bus):
# open two file streams, one for reading and one for writing
# the specific I2C channel is selected with bus
# it is usually 1, except for older revisions where it's 0
# wb and rb indicate binary read and write
self.file_read = io.open("/dev/i2c-"+str(bus), "rb", buffering=0)
self.file_write = io.open("/dev/i2c-"+str(bus), "wb", buffering=0)
# initializes I2C to either a user specified or default address
self.set_i2c_address(address)
self.cal_timeout = 1.6 # timeout for calibrations
self.read_timeout = 1.0 # timeout for reads
self.short_timeout = 0.3 # timeout for regular commands
# Set if testing board
self.DEBUG = True
def set_i2c_address(self, addr):
'''Set the I2C communications to the slave specified by the address. \
The commands for I2C dev using the ioctl functions are specified in \
the i2c-dev.h file from i2c-tools'''
I2C_SLAVE = 0x703
fcntl.ioctl(self.file_read, I2C_SLAVE, addr)
fcntl.ioctl(self.file_write, I2C_SLAVE, addr)
def write(self, cmd):
'''Writes a command to the sensor.'''
# appends the null character and sends the string over I2C
cmd += "\00"
self.file_write.write(cmd)
def read(self, num_of_bytes=31,startbit=1):
'''Reads data from the sensor and parses the incoming response.'''
# reads a specified number of bytes from I2C, then parses and displays the result
res = self.file_read.read(num_of_bytes) # read from the board
response = filter(lambda x: x != '\x00', res) # remove the null characters to get the response
if ord(response[0]) == 1: # if the response isn't an error
# change MSB to 0 for all received characters except the first and get a list of characters
char_list = map(lambda x: chr(ord(x) & ~0x80), list(response[startbit:]))
# NOTE: having to change the MSB to 0 is a glitch in the raspberry pi, and you shouldn't have to do this!
return ''.join(char_list) # convert the char list to a string and returns it
else:
return "Error " + str(ord(response[0]))
def query(self, string, start=1):
'''For commands that require a write, a wait, and a response. For instance, \
calibration requires writing an initial CAL command, waiting 300ms, \
then checking for a pass/fail indicator message.'''
# write a command to the board, wait the correct timeout, and read the response
self.write(string)
# the read and calibration commands require a longer timeout
if string.upper().startswith("R"):
time.sleep(self.read_timeout)
elif string.upper().startswith("CAL"):
time.sleep(self.cal_timeout)
else:
time.sleep(self.short_timeout)
return self.read(startbit=start)
def verify(self):
'''Verifies that the sensor is connected, also returns firmware version.'''
device_ID = self.query("I")
if device_ID.startswith("?I"):
print("Connected sensor: " + str(device_ID)[3:])
else:
raw_input("EZO not connected: " + device_ID)
def close(self):
'''Closes the sensor's filestream, not usually required.'''
self.file_read.close()
self.file_write.close()
def getData(self):
'''Gets data from sensor reading as a float.'''
data = self.query("R")
return float(data)
def cal_wait(self, cal_time):
'''UI for waiting for pH sensor to stabilize during calibration'''
x=1
if self.DEBUG == True:
cal_time = 4
while x<cal_time:
if x==1:
sys.stdout.write("Please wait for sensor to stabilize:")
else:
sys.stdout.write(".")
sys.stdout.flush()
time.sleep(1)
x+=1
print('\n')
def pH_calibrateSensor(self):
'''Performs pH sensor calibration using included buffers.'''
# Clear previous calibration data
print("Starting pH sensor calibration...")
q = self.query("Cal,clear", 0)
if str(ord(q)) != '1':
print("Calibration failed with response " + str(q))
time.sleep(2)
return False
# Midpoint calibration. This will also reset previous data.
raw_input("Please rinse probe. Press [Enter] when pH 7 buffer is loaded.")
self.cal_wait(60)
mid_pH = "7.00"
q = self.query("CAL,MID," + mid_pH, 0)
if str(ord(q)) != '1':
print("Calibration failed with response " + str(q))
time.sleep(2)
return False
# Lowpoint calibration
raw_input("Please rinse probe. Press [Enter] when pH 4 buffer is loaded.")
self.cal_wait(60)
low_pH = "4.00"
q = self.query("CAL,LOW," + low_pH, 0)
if str(ord(q)) != '1':
print("Calibration failed with response " + str(q))
time.sleep(2)
return False
# Highpoint calibration
raw_input("Please rinse probe. Press [Enter] when pH 10 buffer is loaded.")
self.cal_wait(60)
high_pH = "10.00"
q = self.query("CAL,HIGH," + high_pH, 0)
if str(ord(q)) != '1':
print("Calibration failed with response " + str(q))
time.sleep(2)
return False
q = str(self.query("Cal,?"))
# Check that 3-point calibration is complete, otherwise return ERROR
if q != "?CAL,3":
print("Three point calibration incomplete!" + str(q))
cal_response = raw_input("Enter 'R' to retry or Enter to exit.")
if cal_response == "R" or cal_response == "r":
self.pH_calibrateSensor()
else:
return False
print("Three point pH calibration complete!")
time.sleep(1)
return True
def temp_calibrateSensor(self):
'''Calibrates the temperature sensor. Requires an external thermometer.'''
print("Clearing previous temperature calibration.")
q = str(ord(self.query("Cal,clear\0x0d", 0)))
if q == "1":
cal_temp = raw_input("Enter room temperature\n>>")
self.cal_wait(5)
q = str(ord(self.query("Cal,"+str(cal_temp) + "\0x0d", 0)))
if q == "1":
q = str(self.query("Cal,?"))
if q == "?CAL,1":
print("One point temperature calibration complete!")
return True
elif q == "?CAL,0":
print("One point temperature calibration incomplete!")
cal_response = raw_input("Enter R to retry or Enter to exit.")
if cal_response == "R" or cal_response == "r":
self.temp_calibrateSensor()
else:
return False
else:
print("Error setting new calibration temperature: " + str(q))
time.sleep(1)
return False
else:
print("Could not set new calibration temperature: " + str(q))
time.sleep(1)
return False
else:
print("Could not clear RTD sensor: " + str(q))
time.sleep(1)
return False
return False
def pH_compensateTemp(self,temp):
'''Compensates the pH sensor for temperature, is used in conjunction with \
a reading from the RTD sensor.'''
comp_status = self.query("T," + str(temp),0)
if str(ord(comp_status)) != '1':
print("Temperature compensation failed!: ")
time.sleep(2)
return False
else:
comp_status = str(self.query("T,?"))
print("Temperature compensation set for: " + comp_status[3:] + u'\xb0' + "C")
time.sleep(2)
return False
def lockProtocol(self,command):
'''Not currently working. Normally used for locking some of the \
internal parameters (e.g. baud rate for UART mode).'''
read_bytes = 9
print("1.\tDisconnect power to device and any signal wires.\n\
2.\tShort PRB to TX.\n\
3.\tTurn device on and wait for LED to change to blue.\n\
4.\tRemove short from PRB to TX, then restart device.\n\
5.\tConnect data lines to Raspberry Pi I2C pins.")
raw_input("Press Enter when this is complete.")
raw_input("Press Enter to prevent further changes to device configuration.")
command_message = "PLOCK," + str(command)
self.sensorQ(command_message)
time.sleep(0.3)
lock_status = self.sensorRead(read_bytes)
if lock_status == "?PLOCK,1":
print("Sensor settings locked.")
return_code = 1
elif lock_status == "?PLOCK,0":
print("Sensor settings unlocked.")
return_code = 0
else:
print("False locking sensor settings.")
return False
return return_code
| gpl-3.0 | 2,327,405,068,719,227,000 | 38.776 | 117 | 0.559433 | false |
lezizi/A-Framework | python/local-source/source.py | 1 | 2324 | #!/usr/bin/env python
#
# Copyright (C) 2012 LeZiZi Studio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class SourceHandler():
'''
Provides basic source handling.
Property:
source: source object
'''
from base import Source
def __init__(self, source=None):
if source is None:
self.source = self.Source()
else:
self.source = source
def append(self,action):
'''
Append an Action to current source.
Argument:
action: An Action.
Return:
Boolean. True for success and False when action exsisits.
'''
self.source.list.append(action)
def delete(self,act):
'''
Argument:
act: An Action OR a string of action key.
Return:
Boolean. True for success.
'''
if self.source.list.count(act) == 0:
del(self.list[self.list.index(act)])
return(True)
else:
return(False)
def join(self, source):
'''
Copy source form another souce to current source.
'''
for each in source:
if self.list.count(each) == 0 :
self.list.append(each)
def match(self,ingroups=[],outgroups=[],implementation=None,key=None):
### NOT YET IMP ##
pass
def test():
from base import Action
b = Action()
b.key = "1"
c = Action()
c.key = "1"
print(cmp(b,c))
a = SourceHandler()
print(a.append(b))
print(a.append(c))
print(a.source.list)
print(a.delete(b))
#for each in dir(a):
# print(getattr(a,each))
# test()
| apache-2.0 | -1,190,390,218,465,255,400 | 25.023256 | 76 | 0.547762 | false |
lyndsysimon/hgrid-git-example | app.py | 1 | 1874 | from flask import Flask, jsonify, render_template, request
import json
import os
import tempfile
app = Flask(__name__)
from git_subprocess import Repository
repo_path = '/tmp/test/'
# Set up a git repository for a storage backend
repo = Repository(repo_path or tempfile.mkdtemp())
repo.init()
# Homepage - just render the template
@app.route('/')
def index():
return render_template('index.html')
# DELETE verb
@app.route('/api/files/', methods=['DELETE', ])
def delete_files():
# since multiple items could be deleted at once, iterate the list.
for id in json.loads(request.form.get('ids', '[]')):
repo._rm_file(id)
repo.commit(
author='Internet User <anon@inter.net>',
message='Deleted file(s)',
)
return jsonify({'deleted': request.form.get('ids')})
# GET verb
@app.route('/api/files/', methods=['GET', ])
def get_files():
return jsonify({
'files': [
_file_dict(f)
for f in os.listdir(repo.path)
if os.path.isfile(os.path.join(repo.path, f))
]
})
# POST verb
@app.route('/api/files/', methods=['POST', ])
def add_file():
f = request.files.get('file')
# write the file out to its new location
new_path = os.path.join(repo.path, f.filename)
with open(new_path, 'w') as outfile:
outfile.write(f.read())
# add it to git and commit
repo.add_file(
file_path=f.filename,
commit_author='Internet User <anon@inter.net>',
commit_message='Commited file {}'.format(f.filename)
)
return json.dumps([_file_dict(new_path), ])
def _file_dict(f):
return {
'uid': f,
'name': f,
'size': os.path.getsize(os.path.join(repo.path, f)),
'type': 'file',
'parent_uid': 'null'
}
if __name__ == '__main__':
app.run(debug=True, port=5000)
| bsd-2-clause | 4,562,782,882,468,337,000 | 23.337662 | 70 | 0.593917 | false |
lliss/tr-55 | tr55/model.py | 1 | 14151 | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
"""
TR-55 Model Implementation
A mapping between variable/parameter names found in the TR-55 document
and variables used in this program are as follows:
* `precip` is referred to as P in the report
* `runoff` is Q
* `evaptrans` maps to ET, the evapotranspiration
* `inf` is the amount of water that infiltrates into the soil (in inches)
* `init_abs` is Ia, the initial abstraction, another form of infiltration
"""
import copy
from tr55.tablelookup import lookup_cn, lookup_bmp_infiltration, \
lookup_ki, is_bmp, is_built_type, make_precolumbian, get_pollutants
from tr55.water_quality import get_volume_of_runoff, get_pollutant_load
from tr55.operations import dict_plus
def runoff_pitt(precip, land_use):
"""
The Pitt Small Storm Hydrology method. The output is a runoff
value in inches.
"""
c1 = +3.638858398e-2
c2 = -1.243464039e-1
c3 = +1.295682223e-1
c4 = +9.375868043e-1
c5 = -2.235170859e-2
c6 = +0.170228067e+0
c7 = -3.971810782e-1
c8 = +3.887275538e-1
c9 = -2.289321859e-2
p4 = pow(precip, 4)
p3 = pow(precip, 3)
p2 = pow(precip, 2)
impervious = (c1 * p3) + (c2 * p2) + (c3 * precip) + c4
urb_grass = (c5 * p4) + (c6 * p3) + (c7 * p2) + (c8 * precip) + c9
runoff_vals = {
'open_water': impervious,
'developed_low': 0.20 * impervious + 0.80 * urb_grass,
'cluster_housing': 0.20 * impervious + 0.80 * urb_grass,
'developed_med': 0.65 * impervious + 0.35 * urb_grass,
'developed_high': impervious,
'developed_open': urb_grass
}
if land_use not in runoff_vals:
raise Exception('Land use %s not a built-type.' % land_use)
else:
return min(runoff_vals[land_use], precip)
def nrcs_cutoff(precip, curve_number):
"""
A function to find the cutoff between precipitation/curve number
pairs that have zero runoff by definition, and those that do not.
"""
if precip <= -1 * (2 * (curve_number - 100.0) / curve_number):
return True
else:
return False
def runoff_nrcs(precip, evaptrans, soil_type, land_use):
"""
The runoff equation from the TR-55 document. The output is a
runoff value in inches.
"""
if land_use == 'cluster_housing':
land_use = 'developed_low'
curve_number = lookup_cn(soil_type, land_use)
if nrcs_cutoff(precip, curve_number):
return 0.0
potential_retention = (1000.0 / curve_number) - 10
initial_abs = 0.2 * potential_retention
precip_minus_initial_abs = precip - initial_abs
numerator = pow(precip_minus_initial_abs, 2)
denominator = (precip_minus_initial_abs + potential_retention)
runoff = numerator / denominator
return min(runoff, precip - evaptrans)
def simulate_cell_day(precip, evaptrans, cell, cell_count):
"""
Simulate a bunch of cells of the same type during a one-day event.
`precip` is the amount of precipitation in inches.
`evaptrans` is evapotranspiration.
`cell` is a string which contains a soil type and land use
separated by a colon.
`cell_count` is the number of cells to simulate.
The return value is a dictionary of runoff, evapotranspiration, and
infiltration as volumes of water.
"""
def clamp(runoff, et, inf, precip):
"""
This function clamps ensures that runoff + et + inf <= precip.
NOTE: infiltration is normally independent of the
precipitation level, but this function introduces a slight
dependency (that is, at very low levels of precipitation, this
function can cause infiltration to be smaller than it
ordinarily would be.
"""
total = runoff + et + inf
if (total > precip):
scale = precip / total
runoff *= scale
et *= scale
inf *= scale
return (runoff, et, inf)
precip = max(0.0, precip)
soil_type, land_use, bmp = cell.lower().split(':')
# If there is no precipitation, then there is no runoff or
# infiltration. There is evapotranspiration, however (it is
# understood that over a period of time, this can lead to the sum
# of the three values exceeding the total precipitation).
if precip == 0.0:
return {
'runoff-vol': 0.0,
# 'et-vol': cell_count * evaptrans,
'et-vol': 0.0,
'inf-vol': 0.0,
}
# Deal with the Best Management Practices (BMPs). For most BMPs,
# the infiltration is read from the table and the runoff is what
# is left over after infiltration and evapotranspiration. Rain
# gardens are treated differently.
if bmp and is_bmp(bmp) and bmp != 'rain_garden':
inf = lookup_bmp_infiltration(soil_type, bmp) # infiltration
runoff = max(0.0, precip - (evaptrans + inf)) # runoff
(runoff, evaptrans, inf) = clamp(runoff, evaptrans, inf, precip)
return {
'runoff-vol': cell_count * runoff,
'et-vol': cell_count * evaptrans,
'inf-vol': cell_count * inf
}
elif bmp and bmp == 'rain_garden':
# Here, return a mixture of 20% ideal rain garden and 80%
# high-intensity residential.
inf = lookup_bmp_infiltration(soil_type, bmp)
runoff = max(0.0, precip - (evaptrans + inf))
hi_res_cell = soil_type + ':developed_med:'
hi_res = simulate_cell_day(precip, evaptrans, hi_res_cell, 1)
hir_run = hi_res['runoff-vol']
hir_et = hi_res['et-vol']
hir_inf = hi_res['inf-vol']
final_runoff = (0.2 * runoff + 0.8 * hir_run)
final_et = (0.2 * evaptrans + 0.8 * hir_et)
final_inf = (0.2 * inf + 0.8 * hir_inf)
final = clamp(final_runoff, final_et, final_inf, precip)
(final_runoff, final_et, final_inf) = final
return {
'runoff-vol': cell_count * final_runoff,
'et-vol': cell_count * final_et,
'inf-vol': cell_count * final_inf
}
# At this point, if the `bmp` string has non-zero length, it is
# equal to either 'no_till' or 'cluster_housing'.
if bmp and bmp != 'no_till' and bmp != 'cluster_housing':
raise KeyError('Unexpected BMP: %s' % bmp)
land_use = bmp or land_use
# When the land use is a built-type and the level of precipitation
# is two inches or less, use the Pitt Small Storm Hydrology Model.
# When the land use is a built-type but the level of precipitation
# is higher, the runoff is the larger of that predicted by the
# Pitt model and NRCS model. Otherwise, return the NRCS amount.
if is_built_type(land_use) and precip <= 2.0:
runoff = runoff_pitt(precip, land_use)
elif is_built_type(land_use):
pitt_runoff = runoff_pitt(2.0, land_use)
nrcs_runoff = runoff_nrcs(precip, evaptrans, soil_type, land_use)
runoff = max(pitt_runoff, nrcs_runoff)
else:
runoff = runoff_nrcs(precip, evaptrans, soil_type, land_use)
inf = max(0.0, precip - (evaptrans + runoff))
(runoff, evaptrans, inf) = clamp(runoff, evaptrans, inf, precip)
return {
'runoff-vol': cell_count * runoff,
'et-vol': cell_count * evaptrans,
'inf-vol': cell_count * inf,
}
def create_unmodified_census(census):
"""
This creates a cell census, ignoring any modifications. The
output is suitable for use with `simulate_water_quality`.
"""
unmod = copy.deepcopy(census)
unmod.pop('modifications', None)
return unmod
def create_modified_census(census):
"""
This creates a cell census, with modifications, that is suitable
for use with `simulate_water_quality`.
For every type of cell that undergoes modification, the
modifications are indicated with a sub-distribution under that
cell type.
"""
mod = copy.deepcopy(census)
mod.pop('modifications', None)
for (cell, subcensus) in mod['distribution'].items():
n = subcensus['cell_count']
changes = {
'distribution': {
cell: {
'distribution': {
cell: {'cell_count': n}
}
}
}
}
mod = dict_plus(mod, changes)
for modification in (census.get('modifications') or []):
for (orig_cell, subcensus) in modification['distribution'].items():
n = subcensus['cell_count']
soil1, land1 = orig_cell.split(':')
soil2, land2, bmp = modification['change'].split(':')
changed_cell = '%s:%s:%s' % (soil2 or soil1, land2 or land1, bmp)
changes = {
'distribution': {
orig_cell: {
'distribution': {
orig_cell: {'cell_count': -n},
changed_cell: {'cell_count': n}
}
}
}
}
mod = dict_plus(mod, changes)
return mod
def simulate_water_quality(tree, cell_res, fn,
current_cell=None, precolumbian=False):
"""
Perform a water quality simulation by doing simulations on each of
the cell types (leaves), then adding them together by summing the
values of a node's subtrees and storing them at that node.
`tree` is the (sub)tree of cell distributions that is currently
under consideration.
`cell_res` is the size of each cell (used for turning inches of
water into volumes of water).
`fn` is a function that takes a cell type and a number of cells
and returns a dictionary containing runoff, et, and inf as
volumes.
`current_cell` is the cell type for the present node.
"""
# Internal node.
if 'cell_count' in tree and 'distribution' in tree:
n = tree['cell_count']
# simulate subtrees
if n != 0:
tally = {}
for cell, subtree in tree['distribution'].items():
simulate_water_quality(subtree, cell_res, fn,
cell, precolumbian)
subtree_ex_dist = subtree.copy()
subtree_ex_dist.pop('distribution', None)
tally = dict_plus(tally, subtree_ex_dist)
tree.update(tally) # update this node
# effectively a leaf
elif n == 0:
for pol in get_pollutants():
tree[pol] = 0.0
# Leaf node.
elif 'cell_count' in tree and 'distribution' not in tree:
# the number of cells covered by this leaf
n = tree['cell_count']
# canonicalize the current_cell string
split = current_cell.split(':')
if (len(split) == 2):
split.append('')
if precolumbian:
split[1] = make_precolumbian(split[1])
current_cell = '%s:%s:%s' % tuple(split)
# run the runoff model on this leaf
result = fn(current_cell, n) # runoff, et, inf
tree.update(result)
# perform water quality calculation
if n != 0:
soil_type, land_use, bmp = split
runoff_per_cell = result['runoff-vol'] / n
liters = get_volume_of_runoff(runoff_per_cell, n, cell_res)
for pol in get_pollutants():
tree[pol] = get_pollutant_load(land_use, pol, liters)
def postpass(tree):
"""
Remove volume units and replace them with inches.
"""
if 'cell_count' in tree:
if tree['cell_count'] > 0:
n = tree['cell_count']
tree['runoff'] = tree['runoff-vol'] / n
tree['et'] = tree['et-vol'] / n
tree['inf'] = tree['inf-vol'] / n
else:
tree['runoff'] = 0
tree['et'] = 0
tree['inf'] = 0
tree.pop('runoff-vol', None)
tree.pop('et-vol', None)
tree.pop('inf-vol', None)
if 'distribution' in tree:
for subtree in tree['distribution'].values():
postpass(subtree)
def simulate_modifications(census, fn, cell_res, precolumbian=False):
"""
Simulate effects of modifications.
`census` contains a distribution of cell-types in the area of interest.
`fn` is as described in `simulate_water_quality`.
`cell_res` is as described in `simulate_water_quality`.
"""
mod = create_modified_census(census)
simulate_water_quality(mod, cell_res, fn, precolumbian=precolumbian)
postpass(mod)
unmod = create_unmodified_census(census)
simulate_water_quality(unmod, cell_res, fn, precolumbian=precolumbian)
postpass(unmod)
return {
'unmodified': unmod,
'modified': mod
}
def simulate_day(census, precip, cell_res=10, precolumbian=False):
"""
Simulate a day, including water quality effects of modifications.
`census` contains a distribution of cell-types in the area of interest.
`cell_res` is as described in `simulate_water_quality`.
`precolumbian` indicates that artificial types should be turned
into forest.
"""
et_max = 0.207
if 'modifications' in census:
verify_census(census)
def fn(cell, cell_count):
# Compute et for cell type
split = cell.split(':')
if (len(split) == 2):
(land_use, bmp) = split
else:
(_, land_use, bmp) = split
et = et_max * lookup_ki(bmp or land_use)
# Simulate the cell for one day
return simulate_cell_day(precip, et, cell, cell_count)
return simulate_modifications(census, fn, cell_res, precolumbian)
def verify_census(census):
"""
Assures that there is no soil type/land cover pair
in a modification census that isn't in the AoI census.
"""
for modification in census['modifications']:
for land_cover in modification['distribution']:
if land_cover not in census['distribution']:
raise ValueError("Invalid modification census")
| apache-2.0 | -1,927,152,914,711,812,900 | 33.098795 | 77 | 0.596636 | false |
luwei0917/awsemmd_script | small_script/computeRg.py | 1 | 2040 | from Bio.PDB.PDBParser import PDBParser
import argparse
parser = argparse.ArgumentParser(description="Compute Rg of pdb")
parser.add_argument("pdb", help="pdb file")
args = parser.parse_args()
def computeRg(pdb_file, chain="A"):
# compute Radius of gyration
# pdb_file = f"/Users/weilu/Research/server/feb_2019/iterative_optimization_new_temp_range/all_simulations/{p}/{p}/crystal_structure.pdb"
chain_name = chain
parser = PDBParser()
structure = parser.get_structure('X', pdb_file)
chain = list(structure[0][chain_name])
all_res = list(structure.get_residues())
# n = len(all_res)
# n = len(chain)
regular_res_list = [res for res in all_res if res.get_id()[0] == ' ']
n = len(regular_res_list)
print("all chains")
cutoff = 15
for residue in regular_res_list:
if residue.get_id()[0] == ' ' and abs(residue["CA"].get_vector()[-1]) < cutoff:
print(residue.get_id()[1])
rg = 0.0
for i, residue_i in enumerate(regular_res_list):
for j, residue_j in enumerate(regular_res_list[i+1:]):
try:
r = residue_i["CA"] - residue_j["CA"]
except:
print(residue_i, residue_j)
rg += r**2
return (rg/(n**2))**0.5
rg = computeRg(args.pdb)
print(rg)
def cylindrical_rg_bias_term(oa, k_rg=4.184, rg0=0, atomGroup=-1, forceGroup=27):
nres, ca = oa.nres, oa.ca
if atomGroup == -1:
group = list(range(nres))
else:
group = atomGroup # atomGroup = [0, 1, 10, 12] means include residue 1, 2, 11, 13.
n = len(group)
rg_square = CustomBondForce("1/normalization*(x^2+y^2)")
# rg = CustomBondForce("1")
rg_square.addGlobalParameter("normalization", n*n)
for i in group:
for j in group:
if j <= i:
continue
rg_square.addBond(ca[i], ca[j], [])
rg = CustomCVForce(f"{k_rg}*(rg_square^0.5-{rg0})^2")
rg.addCollectiveVariable("rg_square", rg_square)
rg.setForceGroup(forceGroup)
return rg
| mit | 9,124,268,330,187,088,000 | 35.428571 | 141 | 0.59951 | false |
thegrill/checkin-control | docs/source/conf.py | 1 | 6111 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# grill-checkin-control documentation build configuration file, created by
# sphinx-quickstart on Sun Jun 25 22:20:49 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
# 'sphinx.ext.imgmath',
# 'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
# 'sphinx.ext.githubpages',
'sphinx.ext.graphviz',
'sphinx.ext.inheritance_diagram',
'sphinx_autodoc_typehints']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'checkin-control'
copyright = '2017, Christian Lopez Barron'
author = 'Christian Lopez Barron'
# inheritance_graph_attrs = dict(rankdir="LR", size='"6.0, 8.0"',fontsize=14, ratio='compress')
inheritance_graph_attrs = dict(rankdir="TB", bgcolor='transparent')
# inheritance_node_attrs = dict(shape='Mrecord', fontsize=14, height=0.75, color='dodgerblue1', style='filled')
inheritance_node_attrs = dict(shape='Mrecord', color='"#2573a7"', style='filled', fillcolor='"#eaf4fa"',
size='"6.0, 8.0"')
inheritance_edge_attrs = dict(color='"#123a54"')
autodoc_member_order = 'groupwise'
autodoc_default_flags = ['members', 'show-inheritance']
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'grill-checkin-controldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'grill-checkin-control.tex', 'grill-checkin-control Documentation',
'Christian Lopez Barron', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'grill-checkin-control', 'grill-checkin-control Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'grill-checkin-control', 'grill-checkin-control Documentation',
author, 'checkin-control', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('https://docs.python.org/3.6', None),
'fs': ('https://pyfilesystem2.readthedocs.io/en/latest/', None)}
| mit | -6,953,007,022,857,016,000 | 32.211957 | 111 | 0.663067 | false |
ProcessOut/processout-python | processout/token.py | 1 | 10402 | try:
from urllib.parse import quote_plus
except ImportError:
from urllib import quote_plus
import processout
from processout.networking.request import Request
from processout.networking.response import Response
# The content of this file was automatically generated
class Token(object):
def __init__(self, client, prefill = None):
self._client = client
self._id = None
self._customer = None
self._customer_id = None
self._gateway_configuration = None
self._gateway_configuration_id = None
self._card = None
self._card_id = None
self._type = None
self._metadata = None
self._is_subscription_only = None
self._is_default = None
self._created_at = None
if prefill != None:
self.fill_with_data(prefill)
@property
def id(self):
"""Get id"""
return self._id
@id.setter
def id(self, val):
"""Set id
Keyword argument:
val -- New id value"""
self._id = val
return self
@property
def customer(self):
"""Get customer"""
return self._customer
@customer.setter
def customer(self, val):
"""Set customer
Keyword argument:
val -- New customer value"""
if val is None:
self._customer = val
return self
if isinstance(val, dict):
obj = processout.Customer(self._client)
obj.fill_with_data(val)
self._customer = obj
else:
self._customer = val
return self
@property
def customer_id(self):
"""Get customer_id"""
return self._customer_id
@customer_id.setter
def customer_id(self, val):
"""Set customer_id
Keyword argument:
val -- New customer_id value"""
self._customer_id = val
return self
@property
def gateway_configuration(self):
"""Get gateway_configuration"""
return self._gateway_configuration
@gateway_configuration.setter
def gateway_configuration(self, val):
"""Set gateway_configuration
Keyword argument:
val -- New gateway_configuration value"""
if val is None:
self._gateway_configuration = val
return self
if isinstance(val, dict):
obj = processout.GatewayConfiguration(self._client)
obj.fill_with_data(val)
self._gateway_configuration = obj
else:
self._gateway_configuration = val
return self
@property
def gateway_configuration_id(self):
"""Get gateway_configuration_id"""
return self._gateway_configuration_id
@gateway_configuration_id.setter
def gateway_configuration_id(self, val):
"""Set gateway_configuration_id
Keyword argument:
val -- New gateway_configuration_id value"""
self._gateway_configuration_id = val
return self
@property
def card(self):
"""Get card"""
return self._card
@card.setter
def card(self, val):
"""Set card
Keyword argument:
val -- New card value"""
if val is None:
self._card = val
return self
if isinstance(val, dict):
obj = processout.Card(self._client)
obj.fill_with_data(val)
self._card = obj
else:
self._card = val
return self
@property
def card_id(self):
"""Get card_id"""
return self._card_id
@card_id.setter
def card_id(self, val):
"""Set card_id
Keyword argument:
val -- New card_id value"""
self._card_id = val
return self
@property
def type(self):
"""Get type"""
return self._type
@type.setter
def type(self, val):
"""Set type
Keyword argument:
val -- New type value"""
self._type = val
return self
@property
def metadata(self):
"""Get metadata"""
return self._metadata
@metadata.setter
def metadata(self, val):
"""Set metadata
Keyword argument:
val -- New metadata value"""
self._metadata = val
return self
@property
def is_subscription_only(self):
"""Get is_subscription_only"""
return self._is_subscription_only
@is_subscription_only.setter
def is_subscription_only(self, val):
"""Set is_subscription_only
Keyword argument:
val -- New is_subscription_only value"""
self._is_subscription_only = val
return self
@property
def is_default(self):
"""Get is_default"""
return self._is_default
@is_default.setter
def is_default(self, val):
"""Set is_default
Keyword argument:
val -- New is_default value"""
self._is_default = val
return self
@property
def created_at(self):
"""Get created_at"""
return self._created_at
@created_at.setter
def created_at(self, val):
"""Set created_at
Keyword argument:
val -- New created_at value"""
self._created_at = val
return self
def fill_with_data(self, data):
"""Fill the current object with the new values pulled from data
Keyword argument:
data -- The data from which to pull the new values"""
if "id" in data.keys():
self.id = data["id"]
if "customer" in data.keys():
self.customer = data["customer"]
if "customer_id" in data.keys():
self.customer_id = data["customer_id"]
if "gateway_configuration" in data.keys():
self.gateway_configuration = data["gateway_configuration"]
if "gateway_configuration_id" in data.keys():
self.gateway_configuration_id = data["gateway_configuration_id"]
if "card" in data.keys():
self.card = data["card"]
if "card_id" in data.keys():
self.card_id = data["card_id"]
if "type" in data.keys():
self.type = data["type"]
if "metadata" in data.keys():
self.metadata = data["metadata"]
if "is_subscription_only" in data.keys():
self.is_subscription_only = data["is_subscription_only"]
if "is_default" in data.keys():
self.is_default = data["is_default"]
if "created_at" in data.keys():
self.created_at = data["created_at"]
return self
def verify(self, options = {}):
"""Verify a customer token's card is valid.
Keyword argument:
options -- Options for the request"""
self.fill_with_data(options)
request = Request(self._client)
path = "/customers/" + quote_plus(self.customer_id) + "/tokens/" + quote_plus(self.id) + "/verify"
data = {
}
response = Response(request.post(path, data, options))
return_values = []
return_values.append(response.success)
return return_values[0]
def fetch_customer_tokens(self, customer_id, options = {}):
"""Get the customer's tokens.
Keyword argument:
customer_id -- ID of the customer
options -- Options for the request"""
self.fill_with_data(options)
request = Request(self._client)
path = "/customers/" + quote_plus(customer_id) + "/tokens"
data = {
}
response = Response(request.get(path, data, options))
return_values = []
a = []
body = response.body
for v in body['tokens']:
tmp = processout.Token(self._client)
tmp.fill_with_data(v)
a.append(tmp)
return_values.append(a)
return return_values[0]
def find(self, customer_id, token_id, options = {}):
"""Find a customer's token by its ID.
Keyword argument:
customer_id -- ID of the customer
token_id -- ID of the token
options -- Options for the request"""
self.fill_with_data(options)
request = Request(self._client)
path = "/customers/" + quote_plus(customer_id) + "/tokens/" + quote_plus(token_id) + ""
data = {
}
response = Response(request.get(path, data, options))
return_values = []
body = response.body
body = body["token"]
obj = processout.Token(self._client)
return_values.append(obj.fill_with_data(body))
return return_values[0]
def create(self, options = {}):
"""Create a new token for the given customer ID.
Keyword argument:
options -- Options for the request"""
self.fill_with_data(options)
request = Request(self._client)
path = "/customers/" + quote_plus(self.customer_id) + "/tokens"
data = {
'metadata': self.metadata,
'source': options.get("source"),
'settings': options.get("settings"),
'target': options.get("target"),
'verify': options.get("verify"),
'verify_metadata': options.get("verify_metadata"),
'set_default': options.get("set_default")
}
response = Response(request.post(path, data, options))
return_values = []
body = response.body
body = body["token"]
return_values.append(self.fill_with_data(body))
return return_values[0]
def delete(self, options = {}):
"""Delete a customer token
Keyword argument:
options -- Options for the request"""
self.fill_with_data(options)
request = Request(self._client)
path = "/customers/" + quote_plus(self.customer_id) + "/tokens/" + quote_plus(self.id) + ""
data = {
}
response = Response(request.delete(path, data, options))
return_values = []
return_values.append(response.success)
return return_values[0]
| mit | -1,514,942,630,543,906,300 | 26.44591 | 109 | 0.54278 | false |
Airbitz/airbitz-ofx | qbo.py | 1 | 7851 | #####################################################################
# #
# File: qbo.py #
# Developer: Justin Leto #
# #
# qbo class provides an interface from main csv iterator method #
# to handle qbo formatting, validations, and writing to file. #
# #
# Usage: python csvtoqbo.py <options> <csvfiles> #
# #
#####################################################################
import sys, traceback
import os
from datetime import datetime
import logging
import qboconst
class qbo:
# Holds a list of valid transactions via the addTransaction() method
__transactions = list()
# The full QBO document build from constants and transactions
__document = None
# Flag indicating whether the QBO document is valid
__isValid = None
# constructor
def __init__(self):
# Reads in constant values from file, set to private (const) variables
self.__HEADER = qboconst.HEADER
self.__FOOTER = qboconst.FOOTER
self.__DATE_START = qboconst.DATE_START
self.__DATE_END = qboconst.DATE_END
self.__BANKTRANLIST_START = qboconst.BANKTRANLIST_START
self.__BANKTRANLIST_END = qboconst.BANKTRANLIST_END
self.__TRANSACTION_START = qboconst.TRANSACTION_START
self.__TRANSACTION_END = qboconst.TRANSACTION_END
# Set document to valid
self.__isValid = True
# PUBLIC GET METHODS for constant values - used in unit testing.
#
#
def getHEADER(self):
return self.__HEADER
def getFOOTER(self):
return self.__FOOTER
def getDATE_START(self):
return self.__DATE_START
def getDATE_END(self):
return self.__DATE_END
def getBANKTRANLIST_START(self):
return self.__BANKTRANLIST_START
def getBANKTRANLIST_END(self):
return self.__BANKTRANLIST_END
def getTRANSACTION_START(self):
return self.__TRANSACTION_START
def getTRANSACTION_END(self):
return self.__TRANSACTION_END
# method to validate paramters used to submit transactions
def validateTransaction(self, status, date_posted, txn_type, to_from_flag, txn_amount, txn_exrate, name):
# if str.lower(status) != 'completed':
# #log status failure
# logging.info("Transaction status [" + status + "] invalid.")
# raise Exception("Transaction status [" + status + "] invalid.")
#
#if type(datetime.strptime(str(date_posted), '%m/%d/%Y')) is not datetime:
# logging.info("Transaction posted date [" + date_posted + "] invalid.")
# raise Exception("Transaction posted date [" + date_posted + "] invalid.")
# if str.lower(txn_type) not in ('payment','refund','withdrawal', 'withdraw funds', 'send', 'receive'):
# logging.info("Transaction type [" + str(txn_type) + "] not 'Payment', 'Refund', 'Withdraw Funds', or 'Withdrawal'.")
# raise Exception("Transaction type [" + str(txn_type) + "] not 'Payment', 'Refund', 'Withdraw Funds', or 'Withdrawal'.")
#
# if str.lower(to_from_flag) not in ('to', 'from'):
# logging.info("Transaction 'To/From' field [" + to_from_flag + "] invalid.")
# raise Exception("Transaction 'To/From' field [" + to_from_flag + "] invalid.")
#
# #logical test of txn_type and to_from_flag
# if ((str.lower(txn_type) == 'refund' and str.lower(to_from_flag) != 'to') or (str.lower(txn_type) == 'payment' and str.lower(to_from_flag) != 'from')):
# logging.info("Transaction type inconsistent with 'To/From' field.")
# raise Exception("Transaction type inconsistent with 'To/From' field.")
#
if len(name) == 0 or not name:
logging.info("Transaction name empty or null.")
raise Exception("Transaction name empty or null.")
return True
# Add transaction takes in param values uses the required formatting QBO transactions
# and pushes to list
def addTransaction(self, denom, date_posted, txn_memo, txn_id, txn_amount, txn_curamt, txn_category, name):
# try:
# # Validating param values prior to committing transaction
# self.validateTransaction(status, date_posted, txn_type, txn_id, txn_amount, name)
# except:
# raise Exception
# Construct QBO formatted transaction
transaction = ""
day = ""
month = ""
date_array = date_posted.split('-')
day = date_array[2]
month = date_array[1]
year = date_array[0]
if len(day) == 1:
day = "0"+day
if len(month) ==1:
month = "0"+month
rec_date = datetime.strptime(year+"/"+month+"/"+day, '%Y/%m/%d')
rec_date = rec_date.strftime('%Y%m%d%H%M%S') + '.000'
dtposted = ' <DTPOSTED>' + rec_date
if float(txn_amount) > 0:
trtype = ' <TRNTYPE>CREDIT'
else:
trtype = ' <TRNTYPE>DEBIT'
#
# if str.lower(txn_type) == 'receive':
# trtype = '<TRNTYPE>CREDIT'
# elif str.lower(txn_type) == 'send':
# trtype = '<TRNTYPE>DEBIT'
# if str.lower(txn_type) in ('refund', 'withdrawal', 'withdraw funds'):
# tramt = '<TRNAMT>-' + str(txn_amount).replace('$','')
# else:
# tramt = '<TRNAMT>' + str(txn_amount).replace('$','')
tramtbits = float(txn_amount) * denom
tramt = ' <TRNAMT>' + str(tramtbits)
if name:
trname = ' <NAME>' + str(name) + "\n"
else:
trname = ''
exrate = float(txn_curamt) / (tramtbits)
curamt = "{0:0.2f}".format(abs(float(txn_curamt)))
fmtexrate = "{0:0.6f}".format(float(exrate))
rawmemo = 'Rate=' + fmtexrate + " USD=" + curamt + " category=\"" + str(txn_category) + "\" memo=\"" + str(txn_memo)
memo = ' <MEMO>' + rawmemo[:253] + "\"\n"
fitid = ' <FITID>' + str(txn_id)
exrate = ' <CURRATE>' + fmtexrate
transaction = ("" + self.__TRANSACTION_START + "\n"
"" + trtype + "\n"
"" + dtposted + "\n"
"" + tramt + "\n"
"" + fitid + "\n"
"" + trname +
"" + memo +
"" + " <CURRENCY>" + "\n"
"" + exrate + "\n"
"" + " <CURSYM>USD" + "\n"
"" + " </CURRENCY>" + "\n"
"" + self.__TRANSACTION_END + "\n")
# Commit transaction to the document by adding to private member list object
self.__transactions.append(transaction)
logging.info("Transaction [" + str(self.getCount()) + "] Accepted.")
return True
# get the current number of valid committed transactions
def getCount(self):
return len(self.__transactions)
# get the valid status of the document
def isValid(self):
# If number of valid transactions are 0 document is invalid
if self.getCount() == 0:
self.__isValid = False
return self.__isValid
# get the text of the document
def getDocument(self):
self.Build()
return self.__document
# Construct the document, add the transactions
# save str into private member variable __document
def Build(self):
if not self.isValid():
logging.info("Error: QBO document is not valid.")
raise Exception("Error: QBO document is not valid.")
self.__document = ("" + self.__HEADER + "\n"
"" + self.__BANKTRANLIST_START + "\n"
"" + self.__DATE_START + "\n"
"" + self.__DATE_END + "\n")
for txn in self.__transactions:
self.__document = self.__document + str(txn)
self.__document = self.__document + ("" + self.__BANKTRANLIST_END + "\n"
"" + self.__FOOTER + "")
# Write QBO document to file
def Write(self, filename):
try:
with open(filename, 'w') as f:
# getDocument method will build document
# test for validity and return string for write
f.write(self.getDocument())
return True
except:
#log io error return False
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
print(''.join('!! ' + line for line in lines))
logging.info('qbo.Write() method: '.join('!! ' + line for line in lines))
return False
| mit | 1,675,282,766,867,628,800 | 31.126582 | 155 | 0.603235 | false |
rackerlabs/cache-busters | cache_buster/test/test_driver.py | 1 | 5320 | """
Copyright 2013 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pretend
from twisted.internet.defer import Deferred, succeed, fail
from twisted.python.failure import Failure
from twisted.trial import unittest
from cache_buster.driver import Driver, count_cache_results
from cache_buster.keys import FormattingKeyMaker
from cache_buster.test.doubles import DummyLogger
class DriverTests(unittest.TestCase):
def test_construct(self):
Driver(FormattingKeyMaker({}), None, None)
def test_invalidate_row_calls_cache_delete(self):
cache = pretend.stub(
delete=pretend.call_recorder(lambda key: succeed(None))
)
d = Driver(FormattingKeyMaker({
"foo_table": ["bar", "baz"]
}), cache, DummyLogger())
d.invalidate_row("foo_table", {})
self.assertEqual(cache.delete.calls, [
pretend.call("bar"), pretend.call("baz")
])
def test_invalidate_row_returns_deferred(self):
d = Driver(FormattingKeyMaker({}), None, DummyLogger())
res = self.successResultOf(d.invalidate_row("foo_table", {}))
self.assertIs(res, None)
def test_invalidate_row_waits_for_cache_delete(self):
d1 = Deferred()
cache = pretend.stub(
delete=lambda key: d1,
)
d = Driver(FormattingKeyMaker({
"foo_table": ["bar"]
}), cache, DummyLogger())
invalidate_d = d.invalidate_row("foo_table", {})
self.assertNoResult(invalidate_d)
d1.callback(None)
res = self.successResultOf(invalidate_d)
self.assertIs(res, None)
def test_invalidate_row_succeeds_on_cache_delete_failure(self):
cache = pretend.stub(
delete=lambda key: fail(Exception()),
)
d = Driver(FormattingKeyMaker({
"foo_table": ["bar"]
}), cache, DummyLogger())
invalidate_d = d.invalidate_row("foo_table", {})
res = self.successResultOf(invalidate_d)
self.assertIs(res, None)
def test_invalidate_row_logs_on_cache_delete_failure(self):
f = Failure(Exception())
cache = pretend.stub(
delete=lambda key: fail(f),
)
logger = pretend.stub(
msg=lambda s, **kwargs: None,
err=pretend.call_recorder(lambda failure, table, key: None)
)
d = Driver(FormattingKeyMaker({
"foo_table": ["bar"]
}), cache, logger)
d.invalidate_row("foo_table", {})
self.assertEqual(logger.err.calls, [
pretend.call(f, table="foo_table", key="bar")
])
def test_invalidate_row_logs_counts(self):
cache = pretend.stub(
delete=lambda key: succeed(True)
)
logger = pretend.stub(
err=None,
msg=pretend.call_recorder(lambda *args, **kwargs: None),
)
d = Driver(FormattingKeyMaker({
"foo_table": ["bar", "baz"]
}), cache, logger)
d.invalidate_row("foo_table", {})
self.assertEqual(logger.msg.calls, [
pretend.call("cache_buster.driver.invalidated_rows",
deletes=2, nonexistant=0, failures=0,
)
])
def test_invalidate_row_logs_nonexistant_counts(self):
cache = pretend.stub(
delete=lambda key: succeed(False)
)
logger = pretend.stub(
err=None,
msg=pretend.call_recorder(lambda *args, **kwargs: None)
)
d = Driver(FormattingKeyMaker({
"foo_table": ["bar"]
}), cache, logger)
d.invalidate_row("foo_table", {})
self.assertEqual(logger.msg.calls, [
pretend.call("cache_buster.driver.invalidated_rows",
deletes=0, nonexistant=1, failures=0,
)
])
def test_invalidate_row_logs_failure_counts(self):
cache = pretend.stub(
delete=lambda key: fail(Exception())
)
logger = pretend.stub(
err=lambda failure, table, key: None,
msg=pretend.call_recorder(lambda *args, **kwargs: None)
)
d = Driver(FormattingKeyMaker({
"foo_table": ["bar"]
}), cache, logger)
d.invalidate_row("foo_table", {})
self.assertEqual(logger.msg.calls, [
pretend.call("cache_buster.driver.invalidated_rows",
deletes=0, nonexistant=0, failures=1,
)
])
class CountCacheResultsTests(unittest.TestCase):
def test_many_results(self):
deletes, nonexistant, failures = count_cache_results([
True,
False,
None,
False,
True
])
self.assertEqual(deletes, 2)
self.assertEqual(nonexistant, 2)
self.assertEqual(failures, 1)
| apache-2.0 | 5,146,065,233,699,703,000 | 32.88535 | 72 | 0.594737 | false |
berkerpeksag/pythondotorg | pydotorg/settings/base.py | 1 | 5943 | import os
import dj_database_url
### Basic config
BASE = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
DEBUG = TEMPLATE_DEBUG = True
SITE_ID = 1
SECRET_KEY = 'its-a-secret-to-everybody'
# Until Sentry works on Py3, do errors the old-fashioned way.
ADMINS = []
# General project information
# These are available in the template as SITE_INFO.<title>
SITE_VARIABLES = {
'site_name': 'Python.org',
'site_descript': 'The official home of the Python Programming Language',
}
### Databases
DATABASES = {
'default': dj_database_url.config(default='postgres:///python.org')
}
### Locale settings
TIME_ZONE = 'UTC'
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = True
USE_TZ = True
DATE_FORMAT = 'Y-m-d'
### Files (media and static)
MEDIA_ROOT = os.path.join(BASE, 'media')
MEDIA_URL = '/m/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = os.path.join(BASE, 'static-root')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE, 'static'),
]
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
### Authentication
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
"django.contrib.auth.backends.ModelBackend",
# `allauth` specific authentication methods, such as login by e-mail
"allauth.account.auth_backends.AuthenticationBackend",
)
LOGIN_REDIRECT_URL = 'home'
ACCOUNT_LOGOUT_REDIRECT_URL = 'home'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_UNIQUE_EMAIL = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
SOCIALACCOUNT_EMAIL_REQUIRED = True
SOCIALACCOUNT_EMAIL_VERIFICATION = True
SOCIALACCOUNT_QUERY_EMAIL = True
### Templates
TEMPLATE_DIRS = [
os.path.join(BASE, 'templates')
]
TEMPLATE_CONTEXT_PROCESSORS = [
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.core.context_processors.request",
"allauth.account.context_processors.account",
"allauth.socialaccount.context_processors.socialaccount",
"django.contrib.messages.context_processors.messages",
"pydotorg.context_processors.site_info",
"pydotorg.context_processors.url_name",
]
### URLs, WSGI, middleware, etc.
ROOT_URLCONF = 'pydotorg.urls'
MIDDLEWARE_CLASSES = (
'pydotorg.middleware.AdminNoCaching',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'pages.middleware.PageFallbackMiddleware',
'django.contrib.redirects.middleware.RedirectFallbackMiddleware',
)
AUTH_USER_MODEL = 'users.User'
WSGI_APPLICATION = 'pydotorg.wsgi.application'
### Apps
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.redirects',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.comments',
'django.contrib.admin',
'django.contrib.admindocs',
'django_comments_xtd',
'jsonfield',
'pipeline',
'sitetree',
'timedelta',
'imagekit',
'haystack',
'honeypot',
'users',
'boxes',
'cms',
'companies',
'feedbacks',
'community',
'jobs',
'pages',
'sponsors',
'successstories',
'events',
'minutes',
'peps',
'blogs',
'downloads',
'codesamples',
'allauth',
'allauth.account',
'allauth.socialaccount',
#'allauth.socialaccount.providers.facebook',
#'allauth.socialaccount.providers.github',
#'allauth.socialaccount.providers.openid',
#'allauth.socialaccount.providers.twitter',
# Tastypie needs the `users` app to be already loaded.
'tastypie',
]
# Fixtures
FIXTURE_DIRS = (
os.path.join(BASE, 'fixtures'),
)
### Testing
SKIP_NETWORK_TESTS = True
### Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
### Development
DEV_FIXTURE_URL = 'https://www.python.org/m/fixtures/dev-fixtures.json.gz'
### Comments
COMMENTS_APP = 'django_comments_xtd'
COMMENTS_XTD_MAX_THREAD_LEVEL = 0
COMMENTS_XTD_FORM_CLASS = "jobs.forms.JobCommentForm"
### Honeypot
HONEYPOT_FIELD_NAME = 'email_body_text'
HONEYPOT_VALUE = 'write your message'
### Blog Feed URL
PYTHON_BLOG_FEED_URL = "http://feeds.feedburner.com/PythonInsider"
PYTHON_BLOG_URL = "http://blog.python.org"
### Registration mailing lists
MAILING_LIST_PSF_MEMBERS = "psf-members-announce-request@python.org"
### PEP Repo Location
PEP_REPO_PATH = ''
### Fastly ###
FASTLY_API_KEY = False # Set to Fastly API key in production to allow pages to
# be purged on save
# Jobs
JOB_THRESHOLD_DAYS = 90
JOB_FROM_EMAIL = 'jobs@python.org'
### Pipeline
from .pipeline import (
PIPELINE_CSS, PIPELINE_JS,
PIPELINE_COMPILERS,
PIPELINE_SASS_BINARY, PIPELINE_SASS_ARGUMENTS,
PIPELINE_CSS_COMPRESSOR, PIPELINE_JS_COMPRESSOR,
)
| apache-2.0 | -7,469,629,725,360,730,000 | 23.557851 | 79 | 0.676931 | false |
stevegt/UltimakerUtils | leveling-rings-UM1.py | 1 | 2681 | #!/usr/bin/python
# Derived from the UM2 version by an anonymous contributor...
#
# http://umforum.ultimaker.com/index.php?/topic/5951-um2-calibration-utility-leveling-ringsgcode/?p=54694
#
# ...who wisely says: "I accept NO liability for any damage done by
# using either version or any derivatives. USE AT YOUR OWN RISK."
filament_diameter = 2.89
build_area_width = 205.0
build_area_depth = 205.0
rings = 10
wide = 0.4
thick = 0.2925 / 2
temperature = 230
bed_temperature = 60
base_dia = 180
pi=3.1415927
center_x = build_area_width/2.0
center_y = build_area_depth/2.0
filament_area = (filament_diameter / 2) ** 2 * pi
head = '''
M107 ;start with the fan off
G21 ;metric values
G90 ;absolute positioning
M82 ;set extruder to absolute mode
M107 ;start with the fan off
G28 X0 Y0 ;move X/Y to min endstops
G28 Z0 ;move Z to min endstops
G1 Z15.0 F9000 ;move the platform down 15mm
M140 S{bed_temperature:.2f} ;set bed temp (no wait)
M109 T0 S{temperature:.2f} ;set extruder temp (wait)
M190 S{bed_temperature:.2f} ;set bed temp (wait)
G92 E0 ;zero the extruded length
G1 F200 E3 ;extrude 3mm of feed stock
G92 E0 ;zero the extruded length again
G1 F9000 ;set speed to 9000
;Put printing message on LCD screen
M117 Printing...
;Layer count: 1
;LAYER:0
'''
loop = '''
G0 F9000 X{x:.2f} Y{y:.2f} Z{z:.2f}
G2 F1000 X{x:.2f} Y{y:.2f} I{r:.2f} E{total_mm3:.2f}'''
tail = '''
;End GCode
M104 S0 ;extruder heater off
M140 S0 ;heated bed heater off (if you have it)
G91 ;relative positioning
G1 E-1 F300 ;retract the filament a bit before lifting the nozzle, to release some of the pressure
G1 Z+0.5 E-5 X-20 Y-20 F9000 ;move Z up a bit and retract filament even more
G28 X0 Y0 ;move X/Y to min endstops, so the head is out of the way
M84 ;steppers off
G90 ;absolute positioning'''
total_mm3 = 0
body = ''
cross_section = thick * wide
z = thick
for i in range(rings):
dia = base_dia - ((wide * 2) * i)
circumference = pi * dia
r = dia/2.0;
x = center_x - r
y = center_y
mm3 = (circumference * cross_section) / filament_area
total_mm3 += mm3
body += loop.format(**vars())
print head.format(**vars())
print body
print tail.format(**vars())
| gpl-2.0 | 5,621,385,078,935,052,000 | 30.174419 | 118 | 0.564715 | false |
udapi/udapi-python | udapi/block/ud/complywithtext.py | 1 | 11648 | r"""Block ComplyWithText for adapting the nodes to comply with the text.
Implementation design details:
Usually, most of the inconsistencies between tree tokens and the raw text are simple to solve.
However, there may be also rare cases when it is not clear how to align the tokens
(nodes in the tree) with the raw text (stored in ``root.text``).
This block tries to solve the general case using several heuristics.
It starts with running a LCS-like algorithm (LCS = longest common subsequence)
``difflib.SequenceMatcher`` on the raw text and concatenation of tokens' forms,
i.e. on sequences of characters (as opposed to running LCS on sequences of tokens).
To prevent mis-alignment problems, we keep the spaces present in the raw text
and we insert spaces into the concatenated forms (``tree_chars``) according to ``SpaceAfter=No``.
An example of a mis-alignment problem:
text "énfase na necesidade" with 4 nodes "énfase en a necesidade"
should be solved by adding multiword token "na" over the nodes "en" and "a".
However, running LCS (or difflib) over the character sequences
"énfaseenanecesidade"
"énfasenanecesidade"
may result in énfase -> énfas.
Author: Martin Popel
"""
import difflib
import logging
import re
from udapi.core.block import Block
from udapi.core.mwt import MWT
class ComplyWithText(Block):
"""Adapt the nodes to comply with the text."""
def __init__(self, fix_text=True, prefer_mwt=True, allow_goeswith=True, max_mwt_length=4,
**kwargs):
"""Args:
fix_text: After all heuristics are applied, the token forms may still not match the text.
Should we edit the text to match the token forms (as a last resort)? Default=True.
prefer_mwt - What to do if multiple subsequent nodes correspond to a text written
without spaces and non-word characters (punctuation)?
E.g. if "3pm doesn't" is annotated with four nodes "3 pm does n't".
We can use either SpaceAfter=No, or create a multi-word token (MWT).
Note that if there is space or punctuation, SpaceAfter=No will be used always
(e.g. "3 p.m." annotated with three nodes "3 p. m.").
If the character sequence does not match exactly, MWT will be used always
(e.g. "3pm doesn't" annotated with four nodes "3 p.m. does not").
Thus this parameter influences only the "unclear" cases.
Default=True (i.e. prefer multi-word tokens over SpaceAfter=No).
allow_goeswith - If a node corresponds to multiple space-separated strings in text,
which are not allowed as tokens with space, we can either leave this diff
unresolved or create new nodes and join them with the `goeswith` deprel.
Default=True (i.e. add the goeswith nodes if applicable).
max_mwt_length - Maximum length of newly created multi-word tokens (in syntactic words).
Default=4.
"""
super().__init__(**kwargs)
self.fix_text = fix_text
self.prefer_mwt = prefer_mwt
self.allow_goeswith = allow_goeswith
self.max_mwt_length = max_mwt_length
@staticmethod
def allow_space(form):
"""Is space allowed within this token form?"""
return re.fullmatch('[0-9 ]+([,.][0-9]+)?', form)
@staticmethod
def store_orig_form(node, new_form):
"""Store the original form of this node into MISC, unless the change is common&expected."""
_ = new_form
if node.form not in ("''", "``"):
node.misc['OrigForm'] = node.form
def process_tree(self, root):
text = root.text
if text is None:
raise ValueError('Tree %s has no text, cannot use ud.ComplyWithText' % root)
# Normalize the stored text (double space -> single space)
# and skip sentences which are already ok.
text = ' '.join(text.split())
if text == root.compute_text():
return
tree_chars, char_nodes = _nodes_to_chars(root.token_descendants)
# Align. difflib may not give LCS, but usually it is good enough.
matcher = difflib.SequenceMatcher(None, tree_chars, text, autojunk=False)
diffs = list(matcher.get_opcodes())
_log_diffs(diffs, tree_chars, text, 'matcher')
diffs = self.unspace_diffs(diffs, tree_chars, text)
_log_diffs(diffs, tree_chars, text, 'unspace')
diffs = self.merge_diffs(diffs, char_nodes)
_log_diffs(diffs, tree_chars, text, 'merge')
# Solve diffs.
self.solve_diffs(diffs, tree_chars, char_nodes, text)
# Fill SpaceAfter=No.
tmp_text = text
for node in root.token_descendants:
if tmp_text.startswith(node.form):
tmp_text = tmp_text[len(node.form):]
if not tmp_text or tmp_text[0].isspace():
del node.misc['SpaceAfter']
tmp_text = tmp_text.lstrip()
else:
node.misc['SpaceAfter'] = 'No'
else:
logging.warning('Node %s does not match text "%s"', node, tmp_text[:20])
return
# Edit root.text if needed.
if self.fix_text:
computed_text = root.compute_text()
if text != computed_text:
root.add_comment('ToDoOrigText = ' + root.text)
root.text = computed_text
def unspace_diffs(self, orig_diffs, tree_chars, text):
diffs = []
for diff in orig_diffs:
edit, tree_lo, tree_hi, text_lo, text_hi = diff
if edit != 'insert':
if tree_chars[tree_lo] == ' ':
tree_lo += 1
if tree_chars[tree_hi - 1] == ' ':
tree_hi -= 1
old = tree_chars[tree_lo:tree_hi]
new = text[text_lo:text_hi]
if old == '' and new == '':
continue
elif old == new:
edit = 'equal'
elif old == '':
edit = 'insert'
diffs.append((edit, tree_lo, tree_hi, text_lo, text_hi))
return diffs
def merge_diffs(self, orig_diffs, char_nodes):
"""Make sure each diff starts on original token boundary.
If not, merge the diff with the previous diff.
E.g. (equal, "5", "5"), (replace, "-6", "–7")
is changed into (replace, "5-6", "5–7")
"""
diffs = []
for diff in orig_diffs:
edit, tree_lo, tree_hi, text_lo, text_hi = diff
if edit != 'insert' and char_nodes[tree_lo] is not None:
diffs.append(diff)
elif edit == 'equal':
while tree_lo < tree_hi and char_nodes[tree_lo] is None:
tree_lo += 1
text_lo += 1
diffs[-1] = ('replace', diffs[-1][1], tree_lo, diffs[-1][3], text_lo)
if tree_lo < tree_hi:
diffs.append(('equal', tree_lo, tree_hi, text_lo, text_hi))
else:
if not diffs:
diffs = [diff]
elif diffs[-1][0] != 'equal':
diffs[-1] = ('replace', diffs[-1][1], tree_hi, diffs[-1][3], text_hi)
else:
p_tree_hi = diffs[-1][2] - 1
p_text_hi = diffs[-1][4] - 1
while char_nodes[p_tree_hi] is None:
p_tree_hi -= 1
p_text_hi -= 1
assert p_tree_hi >= diffs[-1][1]
assert p_text_hi >= diffs[-1][3]
diffs[-1] = ('equal', diffs[-1][1], p_tree_hi, diffs[-1][3], p_text_hi)
diffs.append(('replace', p_tree_hi, tree_hi, p_text_hi, text_hi))
return diffs
def solve_diffs(self, diffs, tree_chars, char_nodes, text):
for diff in diffs:
edit, tree_lo, tree_hi, text_lo, text_hi = diff
# Focus only on edits of type 'replace', log insertions and deletions as failures.
if edit == 'equal':
continue
if edit in ('insert', 'delete'):
logging.warning('Unable to solve token-vs-text mismatch\n%s',
_diff2str(diff, tree_chars, text))
continue
# Revert the splittng and solve the diff.
nodes = [n for n in char_nodes[tree_lo:tree_hi] if n is not None]
form = text[text_lo:text_hi]
self.solve_diff(nodes, form.strip())
def solve_diff(self, nodes, form):
"""Fix a given (minimal) tokens-vs-text inconsistency."""
nodes_str = ' '.join([n.form for n in nodes]) # just for debugging
node = nodes[0]
# First, solve the cases when the text contains a space.
if ' ' in form:
if len(nodes) == 1 and node.form == form.replace(' ', ''):
if self.allow_space(form):
self.store_orig_form(node, form)
node.form = form
elif self.allow_goeswith:
forms = form.split()
node.form = forms[0]
for split_form in reversed(forms[1:]):
new = node.create_child(form=split_form, deprel='goeswith', upos=node.upos)
new.shift_after_node(node)
else:
logging.warning('Unable to solve 1:m diff:\n%s -> %s', nodes_str, form)
else:
logging.warning('Unable to solve n:m diff:\n%s -> %s', nodes_str, form)
# Second, solve the cases when multiple nodes match one form (without any spaces).
elif len(nodes) > 1:
# If the match is exact, we can choose between MWT ans SpaceAfter solutions.
if not self.prefer_mwt and ''.join([n.form for n in nodes]) == form:
pass # SpaceAfter=No will be added later on.
# If one of the nodes is already a MWT, we cannot have nested MWTs.
# TODO: enlarge the MWT instead of failing.
elif any(isinstance(n, MWT) for n in nodes):
logging.warning('Unable to solve partial-MWT diff:\n%s -> %s', nodes_str, form)
# MWT with too many words are suspicious.
elif len(nodes) > self.max_mwt_length:
logging.warning('Not creating too long (%d>%d) MWT:\n%s -> %s',
len(nodes), self.max_mwt_length, nodes_str, form)
# Otherwise, create a new MWT.
else:
node.root.create_multiword_token(nodes, form)
# Third, solve the 1-1 cases.
else:
self.store_orig_form(node, form)
node.form = form
def _nodes_to_chars(nodes):
chars, char_nodes = [], []
for node in nodes:
form = node.form
if node.misc['SpaceAfter'] != 'No' and node != nodes[-1]:
form += ' '
chars.extend(form)
char_nodes.append(node)
char_nodes.extend([None] * (len(form) - 1))
return ''.join(chars), char_nodes
def _log_diffs(diffs, tree_chars, text, msg):
if logging.getLogger().isEnabledFor(logging.DEBUG):
logging.warning('=== After %s:', msg)
for diff in diffs:
logging.warning(_diff2str(diff, tree_chars, text))
def _diff2str(diff, tree, text):
old = '|' + ''.join(tree[diff[1]:diff[2]]) + '|'
new = '|' + ''.join(text[diff[3]:diff[4]]) + '|'
if diff[0] == 'equal':
return '{:7} {!s:>50}'.format(diff[0], old)
return '{:7} {!s:>50} --> {!s}'.format(diff[0], old, new)
| gpl-3.0 | 6,423,815,890,427,901,000 | 42.75188 | 99 | 0.559117 | false |
storiesofsolidarity/story-database | stories/admin.py | 1 | 1393 | from django.contrib import admin
from models import Location, Story
from people.models import Author
class LocationAdmin(admin.ModelAdmin):
list_display = ('zipcode', 'city_fmt', 'county_fmt', 'state_fmt', 'story_count')
list_filter = ('state',)
search_fields = ('zipcode', 'city', 'county')
admin.site.register(Location, LocationAdmin)
class EmployerFilter(admin.SimpleListFilter):
title = 'author employer'
parameter_name = 'employer'
def lookups(self, request, model_admin):
employer_set = set()
for a in Author.objects.all():
if a.employer:
employer_set.add(a.employer.split(' ', 1)[0])
return [(str(c), str(c)) for c in employer_set if c]
def queryset(self, request, queryset):
if self.value() or self.value() == 'None':
return queryset.filter(author__employer__startswith=self.value())
else:
return queryset
class StoryAdmin(admin.ModelAdmin):
list_display = ('excerpt', 'author_display', 'employer', 'anonymous', 'created_at')
list_filter = (EmployerFilter, 'location__state', 'truncated')
date_hierarchy = 'created_at'
readonly_fields = ('truncated',)
raw_id_fields = ('author', 'location')
search_fields = ('location__city', 'author__user__first_name', 'author__user__last_name', 'content')
admin.site.register(Story, StoryAdmin)
| agpl-3.0 | -544,278,433,607,546,560 | 33.825 | 104 | 0.648959 | false |
peppelinux/inventario_verdebinario | museo/models.py | 1 | 4183 | from django.db import models
from photologue.models import ImageModel
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
class Produttore(ImageModel):
id_tabella = models.AutoField(primary_key=True)
nome = models.CharField(max_length=135, blank=True)
nome_abbreviato = models.CharField(max_length=135, blank=True)
#slug = models.SlugField(unique=True, help_text=('"slug": un identificatore automatico e univoco'))
descrizione = models.TextField(max_length=1024, blank=True)
data_nascita = models.DateField(null=True, blank=True)
data_chiusura = models.DateField(null=True, blank=True)
#immagine_logo = models.ImageField(upload_to="LoghiProduttori", blank=True)
url = models.CharField(max_length=256, blank=True)
def save(self, *args, **kwargs):
if self.nome_abbreviato == None or self.nome_abbreviato.split() == []:
self.nome_abbreviato = self.nome.upper()
super(self.__class__, self).save(*args, **kwargs) # Call the "real" save() method.
class Meta:
ordering = ['nome']
db_table = 'produttore'
verbose_name_plural = "Produttore"
# def get_absolute_url(self):
# return '%s' % (self.url)
def __str__(self):
return '%s' % (self.nome_abbreviato)
class SchedaTecnica(models.Model):
id_tabella = models.AutoField(primary_key=True)
modello = models.CharField(max_length=135, blank=True)
produttore = models.ForeignKey(Produttore, null=True, blank=True, on_delete=models.SET_NULL)
paese_di_origine = models.CharField(max_length=135, blank=True)
anno = models.CharField(max_length=135, blank=True)
tastiera = models.CharField(max_length=135, blank=True)
cpu = models.CharField(max_length=135, blank=True)
velocita = models.CharField(max_length=135, blank=True)
memoria_volatile = models.CharField(max_length=135, blank=True)
memoria_di_massa = models.CharField(max_length=135, blank=True)
modalita_grafica = models.CharField(max_length=135, blank=True)
audio = models.CharField(max_length=135, blank=True)
dispositivi_media = models.CharField(max_length=135, blank=True)
alimentazione = models.CharField(max_length=135, blank=True)
prezzo = models.CharField(max_length=135, blank=True)
descrizione = models.TextField(max_length=1024, blank=True)
data_inserimento = models.DateField(null=True, blank=False, auto_now_add=True)
class Meta:
db_table = 'scheda_tecnica'
verbose_name_plural = "Scheda Tecnica"
class FotoHardwareMuseo(ImageModel):
id_tabella = models.AutoField(primary_key=True)
#immagine = models.ImageField(upload_to="FotoHardwareMuseo/%d.%m.%Y", blank=True)
etichetta_verde = models.CharField(max_length=135, blank=True)
data_inserimento = models.DateField(null=True, blank=False, auto_now_add=True)
seriale = models.CharField(max_length=384, blank=True)
didascalia = models.TextField(max_length=328, blank=True)
scheda_tecnica = models.ForeignKey(SchedaTecnica, null=True, blank=True, on_delete=models.SET_NULL)
class Meta:
db_table = 'foto_hardware_museo'
verbose_name_plural = "Foto Hardware Museo"
def __str__(self):
return '%s %s' % (self.seriale, self.scheda_tecnica)
def get_absolute_url(self):
#return '/media/foto/FotoHardwareMuseo/' + self.data_inserimento.strftime('%d.%m.%Y') + '/' + self.image.name
return '/media/%s' % self.image.name
def admin_thumbnail(self):
func = getattr(self, 'get_admin_thumbnail_url', None)
if func is None:
return _('An "admin_thumbnail" photo size has not been defined.')
else:
if hasattr(self, 'get_absolute_url'):
return '<a class="foto_admin_thumbs" target="_blank" href="%s"><img src="%s"></a>' % \
(self.get_absolute_url(), func())
else:
return '<a class="foto_admin_thumbs" target="_blank" href="%s"><img src="%s"></a>' % \
(self.image.url, func())
admin_thumbnail.short_description = _('Thumbnail')
admin_thumbnail.allow_tags = True
| gpl-3.0 | 3,234,902,780,985,170,000 | 44.967033 | 117 | 0.671528 | false |
XtheOne/Inverter-Data-Logger | InverterLib.py | 1 | 3301 | import socket
import struct
import os
import binascii
import sys
if sys.version[0] == '2':
reload(sys)
sys.setdefaultencoding('cp437')
def getNetworkIp():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.connect(('<broadcast>', 0))
return s.getsockname()[0]
def createV4RequestFrame(logger_sn):
"""Create request frame for inverter logger.
The request string is build from several parts. The first part is a
fixed 4 char string; the second part is the reversed hex notation of
the s/n twice; then again a fixed string of two chars; a checksum of
the double s/n with an offset; and finally a fixed ending char.
Args:
logger_sn (int): Serial number of the inverter
Returns:
str: Information request string for inverter
"""
#frame = (headCode) + (dataFieldLength) + (contrlCode) + (sn) + (sn) + (command) + (checksum) + (endCode)
frame_hdr = binascii.unhexlify('680241b1') #from SolarMan / new Omnik app
command = binascii.unhexlify('0100')
defchk = binascii.unhexlify('87')
endCode = binascii.unhexlify('16')
tar = bytearray.fromhex(hex(logger_sn)[8:10] + hex(logger_sn)[6:8] + hex(logger_sn)[4:6] + hex(logger_sn)[2:4])
frame = bytearray(frame_hdr + tar + tar + command + defchk + endCode)
checksum = 0
frame_bytes = bytearray(frame)
for i in range(1, len(frame_bytes) - 2, 1):
checksum += frame_bytes[i] & 255
frame_bytes[len(frame_bytes) - 2] = int((checksum & 255))
return bytearray(frame_bytes)
def expand_path(path):
"""
Expand relative path to absolute path.
Args:
path: file path
Returns: absolute path to file
"""
if os.path.isabs(path):
return path
else:
return os.path.dirname(os.path.abspath(__file__)) + "/" + path
def getLoggers():
# Create the datagram socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((getNetworkIp(), 48899))
# Set a timeout so the socket does not block indefinitely when trying to receive data.
sock.settimeout(3)
# Set the time-to-live for messages to 1 so they do not go past the local network segment.
ttl = struct.pack('b', 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
SendData = "WIFIKIT-214028-READ" # Lotto/TM = "AT+YZAPP=214028,READ"
gateways = ''
try:
# Send data to the broadcast address
sent = sock.sendto(SendData, ('<broadcast>', 48899))
# Look for responses from all recipients
while True:
try:
data, server = sock.recvfrom(1024)
except socket.timeout:
break
else:
if (data == SendData): continue #skip sent data
a = data.split(',')
wifi_ip, wifi_mac, wifi_sn = a[0],a[1],a[2]
if (len(gateways)>1):
gateways = gateways+','
gateways = gateways+wifi_ip+','+wifi_sn
finally:
sock.close()
return gateways
| gpl-3.0 | 8,714,258,900,020,281,000 | 33.385417 | 115 | 0.62678 | false |
mvaled/sentry | tests/sentry/deletions/test_tagkey.py | 1 | 3690 | from __future__ import absolute_import
from sentry import tagstore
from sentry.tagstore.models import EventTag
from sentry.models import ScheduledDeletion
from sentry.tasks.deletion import run_deletion
from sentry.testutils import TestCase
class DeleteTagKeyTest(TestCase):
def test_simple(self):
team = self.create_team(name="test", slug="test")
project = self.create_project(teams=[team], name="test1", slug="test1")
group = self.create_group(project=project)
key = "foo"
value = "bar"
tk = tagstore.create_tag_key(
key=key, project_id=project.id, environment_id=self.environment.id
)
tv = tagstore.create_tag_value(
key=key, value=value, project_id=project.id, environment_id=self.environment.id
)
tagstore.create_group_tag_key(
key=key, group_id=group.id, project_id=project.id, environment_id=self.environment.id
)
tagstore.create_group_tag_value(
key=key,
value=value,
group_id=group.id,
project_id=project.id,
environment_id=self.environment.id,
)
tagstore.create_event_tags(
group_id=group.id,
project_id=project.id,
event_id=1,
environment_id=self.environment.id,
tags=[(tk.key, tv.value)],
)
project2 = self.create_project(teams=[team], name="test2")
env2 = self.create_environment(project=project2)
group2 = self.create_group(project=project2)
tk2 = tagstore.create_tag_key(project2.id, env2.id, key)
tv2 = tagstore.create_tag_value(
key=key, value=value, project_id=project2.id, environment_id=env2.id
)
tagstore.create_group_tag_key(
key=key, group_id=group2.id, project_id=project2.id, environment_id=env2.id
)
tagstore.create_group_tag_value(
key=key, value=value, group_id=group2.id, project_id=project2.id, environment_id=env2.id
)
tagstore.create_event_tags(
group_id=group2.id,
project_id=project2.id,
environment_id=env2.id,
event_id=1,
tags=[(tk2.key, tv2.value)],
)
deletion = ScheduledDeletion.schedule(tk, days=0)
deletion.update(in_progress=True)
with self.tasks():
run_deletion(deletion.id)
try:
tagstore.get_group_tag_value(
group.project_id, group.id, self.environment.id, key, value
)
assert False # verify exception thrown
except tagstore.GroupTagValueNotFound:
pass
try:
tagstore.get_group_tag_key(group.project_id, group.id, self.environment.id, key)
assert False # verify exception thrown
except tagstore.GroupTagKeyNotFound:
pass
try:
tagstore.get_tag_value(project.id, self.environment.id, key, value)
assert False # verify exception thrown
except tagstore.TagValueNotFound:
pass
try:
tagstore.get_tag_key(project.id, self.environment.id, key)
assert False # verify exception thrown
except tagstore.TagKeyNotFound:
pass
assert tagstore.get_tag_key(project2.id, env2.id, key) is not None
assert tagstore.get_group_tag_key(group2.project_id, group2.id, env2.id, key) is not None
assert (
tagstore.get_group_tag_value(group2.project_id, group2.id, env2.id, key, value)
is not None
)
assert EventTag.objects.filter(key_id=tk2.id).exists()
| bsd-3-clause | 1,916,412,189,685,323,800 | 37.041237 | 100 | 0.604607 | false |
jiaojianbupt/tools | project_manager/alias.py | 1 | 1746 | # -*- coding: utf-8 -*-
"""
Created by jiaojian at 2018/6/29 16:30
"""
import os
import sys
import termios
from tools.utils.basic_printer import print_with_style, ConsoleColor
HOME = os.environ['HOME']
def get_input():
fd = sys.stdin.fileno()
old_tty_info = termios.tcgetattr(fd)
new_tty_info = old_tty_info[:]
new_tty_info[3] &= ~termios.ICANON
new_tty_info[3] &= ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, new_tty_info)
answer = os.read(fd, 1)
termios.tcsetattr(fd, termios.TCSANOW, old_tty_info)
return answer
def add_alias():
if sys.platform == 'darwin':
bash_profile_name = '.bash_profile'
else:
bash_profile_name = '.bashrc'
linux_bash_profile_path = os.path.join(HOME, bash_profile_name)
exec_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'main.py')
alias = 'alias updateall="python %s"' % exec_file_path
if os.path.exists(linux_bash_profile_path):
with open(linux_bash_profile_path, 'rw') as bashrc_file:
bash_profile = bashrc_file.read()
if bash_profile.find(alias) >= 0:
return
answer = ''
while not answer or answer not in {'y', 'n'}:
print_with_style('Add \'%s\' to your %s?(y/n)' % (alias, bash_profile_name), color=ConsoleColor.YELLOW)
answer = get_input()
if answer == 'n':
return
elif answer == 'y':
break
bash_profile = bash_profile + '\n' + alias
with open(linux_bash_profile_path, 'w') as bashrc_file:
bashrc_file.write(bash_profile)
print_with_style('Alias added.', color=ConsoleColor.YELLOW)
| gpl-3.0 | -6,002,306,353,356,231 | 35.375 | 119 | 0.587056 | false |
amw2104/fireplace | setup.py | 1 | 1046 | #!/usr/bin/env python
import os.path
import fireplace
from setuptools import setup, find_packages
README = open(os.path.join(os.path.dirname(__file__), "README.md")).read()
CLASSIFIERS = [
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)"
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Topic :: Games/Entertainment :: Simulation",
]
setup(
name="fireplace",
version=fireplace.__version__,
packages=find_packages(exclude="tests"),
package_data={"": ["CardDefs.xml"]},
include_package_data=True,
tests_require=["pytest"],
author=fireplace.__author__,
author_email=fireplace.__email__,
description="Pure-python Hearthstone re-implementation and simulator",
classifiers=CLASSIFIERS,
download_url="https://github.com/jleclanche/python-bna/tarball/master",
long_description=README,
license="AGPLv3",
url="https://github.com/jleclanche/fireplace",
)
| agpl-3.0 | -585,384,975,807,615,200 | 28.885714 | 85 | 0.720841 | false |
dapengchen123/code_v1 | reid/datasets/market1501.py | 1 | 3563 | from __future__ import print_function, absolute_import
import os.path as osp
from ..utils.data import Dataset
from ..utils.osutils import mkdir_if_missing
from ..utils.serialization import write_json
class Market1501(Dataset):
url = 'https://drive.google.com/file/d/0B8-rUzbwVRk0c054eEozWG9COHM/view'
md5 = '65005ab7d12ec1c44de4eeafe813e68a'
def __init__(self, root, split_id=0, num_val=0.3, download=False):
super(Market1501, self).__init__(root, split_id=split_id)
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. " +
"You can use download=True to download it.")
self.load(num_val)
def download(self):
if self._check_integrity():
print("Files already downloaded and verified")
return
import re
import hashlib
import shutil
from glob import glob
from zipfile import ZipFile
raw_dir = osp.join(self.root, 'raw')
mkdir_if_missing(raw_dir)
# Download the raw zip file
fpath = osp.join(raw_dir, 'Market-1501-v15.09.15.zip')
if osp.isfile(fpath) and \
hashlib.md5(open(fpath, 'rb').read()).hexdigest() == self.md5:
print("Using downloaded file: " + fpath)
else:
raise RuntimeError("Please download the dataset manually from {} "
"to {}".format(self.url, fpath))
# Extract the file
exdir = osp.join(raw_dir, 'Market-1501-v15.09.15')
if not osp.isdir(exdir):
print("Extracting zip file")
with ZipFile(fpath) as z:
z.extractall(path=raw_dir)
# Format
images_dir = osp.join(self.root, 'images')
mkdir_if_missing(images_dir)
# 1501 identities (+1 for background) with 6 camera views each
identities = [[[] for _ in range(6)] for _ in range(1502)]
def register(subdir, pattern=re.compile(r'([-\d]+)_c(\d)')):
fpaths = sorted(glob(osp.join(exdir, subdir, '*.jpg')))
pids = set()
for fpath in fpaths:
fname = osp.basename(fpath)
pid, cam = map(int, pattern.search(fname).groups())
if pid == -1: continue # junk images are just ignored
assert 0 <= pid <= 1501 # pid == 0 means background
assert 1 <= cam <= 6
cam -= 1
pids.add(pid)
fname = ('{:08d}_{:02d}_{:04d}.jpg'
.format(pid, cam, len(identities[pid][cam])))
identities[pid][cam].append(fname)
shutil.copy(fpath, osp.join(images_dir, fname))
return pids
trainval_pids = register('bounding_box_train')
gallery_pids = register('bounding_box_test')
query_pids = register('query')
assert query_pids <= gallery_pids
assert trainval_pids.isdisjoint(gallery_pids)
# Save meta information into a json file
meta = {'name': 'Market1501', 'shot': 'multiple', 'num_cameras': 6,
'identities': identities}
write_json(meta, osp.join(self.root, 'meta.json'))
# Save the only training / test split
splits = [{
'trainval': sorted(list(trainval_pids)),
'query': sorted(list(query_pids)),
'gallery': sorted(list(gallery_pids))}]
write_json(splits, osp.join(self.root, 'splits.json'))
| mit | -2,535,048,846,858,501,600 | 36.505263 | 78 | 0.561605 | false |
glenflet/ZtoRGBpy | ZtoRGBpy/_info.py | 1 | 2082 | # -*- coding: utf-8 -*-
# =================================================================================
# Copyright 2019 Glen Fletcher <mail@glenfletcher.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# All documentation this file as docstrings or comments are licensed under the
# Creative Commons Attribution-ShareAlike 4.0 International License; you may
# not use this documentation except in compliance with this License.
# You may obtain a copy of this License at
#
# https://creativecommons.org/licenses/by-sa/4.0
#
# =================================================================================
"""
ZtoRGB information definition module
Special private module used for automatic processing, and inclusion
.. moduleauthor:: Glen Fletcher <mail@glenfletcher.com>
"""
__authors__ = [
("Glen Fletcher", "mail@glenfletcher.com")]
__copyright__ = "2019 Glen Fletcher"
__license__ = """\
The source code for this package is licensed under the [Apache 2.0 License](http://www.apache.org/licenses/LICENSE-2.0),
while the documentation including docstrings and comments embedded in the source code are licensed under the
[Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0)
"""
__contact__ = "Glen Fletcher <mail@glenfletcher.com>"
__version__ = "2.0"
__title__ = "ZtoRGBpy"
__desc__ = """\
Complex number to perceptually uniform RGB subset mapping library"""
__all__ = [
'__authors__', '__copyright__', '__license__',
'__contact__', '__version__', '__title__',
'__desc__']
| mit | -2,552,345,746,239,531,500 | 40.64 | 120 | 0.662344 | false |
ElecProg/decmath | decmath/trig.py | 1 | 4598 | from decimal import getcontext, Decimal
from decmath import _pi, _to_Decimal, sign
# Trigonometric functions
def acos(x):
"""Return the arc cosine (measured in radians) of x."""
x = _to_Decimal(x)
if x.is_nan():
return Decimal("NaN")
elif abs(x) > 1:
raise ValueError("Domain error: acos accepts -1 <= x <= 1.")
elif x == -1:
return _pi()
elif x == 0:
return _pi() / 2
elif x == 1:
return Decimal(0)
getcontext().prec += 2
one_half = Decimal('0.5')
i, lasts, s, gamma, fact, num = Decimal(0), 0, _pi() / 2 - x, 1, 1, x
while s != lasts:
lasts = s
i += 1
fact *= i
num *= x * x
gamma *= i - one_half
coeff = gamma / ((2 * i + 1) * fact)
s -= coeff * num
getcontext().prec -= 2
return +s
def asin(x):
"""Return the arc sine (measured in radians) of x."""
x = _to_Decimal(x)
if x.is_nan():
return Decimal("NaN")
elif abs(x) > 1:
raise ValueError("Domain error: asin accepts -1 <= x <= 1.")
elif x == -1:
return -_pi() / 2
elif x == 0:
return Decimal(0)
elif x == 1:
return _pi() / 2
getcontext().prec += 2
one_half = Decimal('0.5')
i, lasts, s, gamma, fact, num = Decimal(0), 0, x, 1, 1, x
while s != lasts:
lasts = s
i += 1
fact *= i
num *= x * x
gamma *= i - one_half
coeff = gamma / ((2 * i + 1) * fact)
s += coeff * num
getcontext().prec -= 2
return +s
def atan(x):
"""Return the arc tangent (measured in radians) of x."""
x = _to_Decimal(x)
if x.is_nan():
return Decimal("NaN")
elif x == Decimal('-Inf'):
return -_pi() / 2
elif x == 0:
return Decimal(0)
elif x == Decimal('Inf'):
return _pi() / 2
if x < -1:
c = _pi() / -2
x = 1 / x
elif x > 1:
c = _pi() / 2
x = 1 / x
else:
c = 0
getcontext().prec += 2
x_squared = x**2
y = x_squared / (1 + x_squared)
y_over_x = y / x
i, lasts, s, coeff, num = Decimal(0), 0, y_over_x, 1, y_over_x
while s != lasts:
lasts = s
i += 2
coeff *= i / (i + 1)
num *= y
s += coeff * num
if c:
s = c - s
getcontext().prec -= 2
return +s
def atan2(y, x):
"""Return the arc tangent (measured in radians) of y/x.
Unlike atan(y/x), the signs of both x and y are considered."""
y = _to_Decimal(y)
x = _to_Decimal(x)
abs_y = abs(y)
abs_x = abs(x)
y_is_real = abs_y != Decimal('Inf')
if y.is_nan() or x.is_nan():
return Decimal("NaN")
if x:
if y_is_real:
a = y and atan(y / x) or Decimal(0)
if x < 0:
a += sign(y) * _pi()
return a
elif abs_y == abs_x:
x = sign(x)
y = sign(y)
return _pi() * (Decimal(2) * abs(x) - x) / (Decimal(4) * y)
if y:
return atan(sign(y) * Decimal('Inf'))
elif sign(x) < 0:
return sign(y) * _pi()
else:
return sign(y) * Decimal(0)
def cos(x):
"""Return the cosine of x as measured in radians."""
x = _to_Decimal(x) % (2 * _pi())
if x.is_nan():
return Decimal('NaN')
elif x == _pi() / 2 or x == 3 * _pi() / 2:
return Decimal(0)
getcontext().prec += 2
i, lasts, s, fact, num, sign = 0, 0, 1, 1, 1, 1
while s != lasts:
lasts = s
i += 2
fact *= i * (i - 1)
num *= x * x
sign *= -1
s += num / fact * sign
getcontext().prec -= 2
return +s
def hypot(x, y):
"""Return the Euclidean distance, sqrt(x*x + y*y)."""
return (_to_Decimal(x).__pow__(2) + _to_Decimal(y).__pow__(2)).sqrt()
def sin(x):
"""Return the sine of x as measured in radians."""
x = _to_Decimal(x) % (2 * _pi())
if x.is_nan():
return Decimal('NaN')
elif x == 0 or x == _pi():
return Decimal(0)
getcontext().prec += 2
i, lasts, s, fact, num, sign = 1, 0, x, 1, x, 1
while s != lasts:
lasts = s
i += 2
fact *= i * (i - 1)
num *= x * x
sign *= -1
s += num / fact * sign
getcontext().prec -= 2
return +s
def tan(x):
"""Return the tangent of x (measured in radians)."""
x = _to_Decimal(x)
if x.is_nan():
return Decimal('NaN')
elif x == _pi() / 2:
return Decimal('Inf')
elif x == 3 * _pi() / 2:
return Decimal('-Inf')
return sin(x) / cos(x)
| mit | -8,599,662,519,578,526,000 | 22.579487 | 73 | 0.45933 | false |
martindurant/astrobits | time_series.py | 1 | 12543 | """Take a list of files and known star coordinates, and
perform photometry on them all, either with apertures (phot)
or by PSF fitting (daophot, which required additional
parameters and is apropriate to poor S/N or crowded fields).
Makes extensive use of iraf tasks; set all photometry parameters
before running:
datapars - for data characteristics
centerpars - finding the reference star on each image.
centerpars, photpars, fitskypars - for controling aperture photometry
daopars - for controling daophot
filelist: set of image files, in IRAF syntax (image.fits[1][*,*,2] etc);
can be more than one per cube.
coords: name a file containing all star coords for photometry, based on
an image unshifted relative to (0,0) in the shifts list. Be pure numbers
for phot method, .mag or .als for daophot method.
shifts: name a file containing shifts, a tuple of shifts arrays, image
header keywords (tuple of two= or None for no shifts
refstar: coords of star for deriving (x,y) offset, as in coords
timestamp: source of the timing information: a header keyword, delta-t
for uniform sampling or a file with times (in whatever formate you'll be
using later.
psf: whether to use daophot or aperture phot for analysis. If this is a
filename, that is the PSF profile to use for every image; if it is "True",
make a new PSF for every image. Pars below only for full PSF fitting
pststars: a .pst file from daophot, listing the IDs of stars for making
the PSF for each image. NB: DAOphot refuses to measure any star with SNR<2.
ids: which stars are interesting, by ID (in input coord list order)
coords: starting well-measured coords (pdump-ed from a .als, perhaps).
"""
import os
import numpy
from glob import glob
import pyfits
from pylab import find
from numpy import load,vstack,save,median
thisdir = os.getcwd()
os.chdir("/home/durant")
from pyraf import iraf
iraf.cd(thisdir)
iraf.digiphot()
iraf.daophot()
import pyraf
import pyfits
import numpy as n
def shift_file_coords(filename,xshift,yshift,output,sort=None):
"""Understands filetypes: 2-column ascii numbers, .mag, .als, .pst.
NB: shift means where each image is, relative to the original (not where
it should be moved to).
"""
if not(sort):
sort = 'num'
if filename.find('.mag')>0: sort = 'mag'
if filename.find('.als')>0: sort = 'als'
if filename.find('.pst')>0: sort = 'pst'
if not(sort=='num' or sort=='mag' or sort=='als' or sort=='pst'):
raise ValueError('Unknown input filetype: %s'%filename)
if sort=='num': # shift 2-column numeric ASCII table
x,y = load(filename,usecols=[0,1],unpack=True)
x += xshift
y += yshift
X = vstack((x,y))
save(output,X.transpose())
return
if sort=='mag': #shift a .mag photometry file
fred = open(filename)
freda= open(output,'w')
for line in fred:
if line.split()[-1]=='\\' and len(line.split())==9 and line[0]!='#':
x = float(line.split()[0]) + xshift
y = float(line.split()[1]) + yshift
line = "%-14.3f %-11.3f"%(x,y)+line[21:]
freda.write(line)
if sort=='als': #shift a .als DAOphot photometry file
fred = open(filename)
freda= open(output,'w')
for line in fred:
if line.split()[-1]=='\\' and len(line.split())==8 and line[0]!='#':
x = float(line.split()[1]) + xshift
y = float(line.split()[2]) + yshift
line = line[:9] + "%-10.3f %-10.3f"%(x,y) + line[29:]
freda.write(line)
if sort=='pst': #shift a PSF star list for DAOphot
fred = open(filename)
freda= open(output,'w')
for line in fred:
if line[0]!="#":
x = float(line.split()[1]) + xshift
y = float(line.split()[2]) + yshift
line = line[:9] + "%-10.3f %-10.3f"%(x,y) + line[29:]
freda.write(line)
fred.close()
freda.close()
def recentre(image,refcoordfile):
"""Returns improved shift by centroiding
on the reference star using phot. This can be VERY
sensitive to the parameters in centerpars."""
xin,yin = load(refcoordfile,unpack=True)
try:
iraf.phot(image,refcoordfile,'temp.mag',inter="no",calgorithm='centroid',
mode='h',verify='no',update='no',verbose='no')
xout,yout=iraf.pdump('temp.mag','xcen,ycen','yes',Stdout=1)[0].split()
except:
print "Recentring failed on", image
return 0.,0.
xout,yout = float(xout),float(yout)
return xout-xin,yout-yin
vary_par = 1.
vary_max = 10
vary_min = 6
vary_fwhm= 0
def setaperture(image,refstar):
"""Measure the FWHM of the reference star unsing simple DAOphot editor
and then set the photometry aperture to this number"""
x,y = load(refstar,unpack=True)
fred = open('tempaperfile','w')
fred.write("%f %f 100 a\nq"%(x,y))
fred.close()
try:
output=iraf.daoedit(image,icomm='tempaperfile',Stdout=1,Stderr=1)
except:
print "Aperture setting failed on",image
return
FWHM = float(output[3].split()[4])
iraf.photpars.apertures = min(max(FWHM*vary_par,vary_min),vary_max)
iraf.daopars.fitrad = min(max(FWHM*vary_par,vary_min),vary_max)
global vary_fwhm
vary_fwhm = FWHM
print "FWHM: ", FWHM, " aperture: ",iraf.photpars.apertures
def apphot(image,coords,refstar=None,centre=False,vary=False):
"""Apperture photometry with centering based on a reference star.
NB: centre refers to shifting the coordinates by centroiding on the
reference star; recentering on the final phot depends on
centerpars.calgorithm ."""
iraf.dele('temp.mag*')
if centre:
xsh,ysh = recentre(image,refstar)
print "Fine centring: ", xsh,ysh
else: #no recentreing by reference star (but could still have calgorithm!=none)
xsh,ysh = 0,0
if vary:
setaperture(image,refstar)
shift_file_coords(coords,xsh,ysh,'tempcoords')
iraf.phot(image,'tempcoords','temp.mag2',inter="no",
mode='h',verify='no',update='no',verbose='no')
out = iraf.pdump('temp.mag2','id,flux,msky,stdev','yes',Stdout=1)
return out
def psfphot(image,coords,pststars,refstar,centre=True,vary=False):
"""PSF photometry. Centering is through phot on refstar.
Assume coords is a .als file for now. Recentering is always done
for the reference star, never for the targets."""
iraf.dele('temp.mag*')
iraf.dele('temp.psf.fits')
iraf.dele('temp.als')
if centre:
xsh,ysh = recentre(image,refstar)
print "Fine Centring: ", xsh,ysh
else: xsh,ysh = 0,0
if vary:
setaperture(image,refstar)
shift_file_coords(coords,xsh,ysh,'tempcoords2',sort='als')
shift_file_coords(pststars,xsh,ysh,'temppst2',sort='pst')
iraf.phot(image,'tempcoords2','temp.mag2',inter="no",calgorithm='none',
mode='h',verify='no',update='no',verbose='no')
iraf.psf(image,'temp.mag2','temppst2','temp.psf','temp.mag.pst','temp.mag.psg',
inter='no',mode='h',verify='no',update='no',verbose='no')
iraf.allstar(image,'temp.mag2','temp.psf','temp.als','temp.mag.arj',"default",
mode='h',verify='no',update='no',verbose='no')
out = iraf.pdump('temp.als','id,mag,merr,msky','yes',Stdout=1)
return out
def simplepsfphot(image,coords,psf,refstar,centre=True,vary=False):
"""PSF photometry, with a given PSF file in psf used for every image"""
iraf.dele('temp.mag*')
iraf.dele('temp.als')
iraf.dele('temp.sub.fits')
if centre:
xsh,ysh = recentre(image,refstar)
print "Fine Centring: ", xsh,ysh
else: xsh,ysh = 0,0
if vary:
setaperture(image,refstar)
shift_file_coords(coords,xsh,ysh,'tempcoords2',sort='als')
iraf.phot(image,'tempcoords2','temp.mag2',inter="no",calgorithm='none',
mode='h',verify='no',update='no',verbose='no')
iraf.allstar(image,'temp.mag2',psf,'temp.als','temp.mag.arj','temp.sub.fits',
mode='h',verify='no',update='no',verbose='no')
out = iraf.pdump('temp.als','id,mag,merr,msky','yes',Stdout=1)
return out
def custom1(filename): # for NACO timing mode cubes - removes horizontal banding
#iraf.imarith(filename,'-','dark','temp')
iraf.imarith(filename,'/','flatK','temp')
im = pyfits.getdata('temp.fits')
med = median(im.transpose())
out = ((im).transpose()-med).transpose()
(pyfits.ImageHDU(out)).writeto("temp2.fits",clobber=True)
iraf.imdel('temp')
iraf.imcopy('temp2[1]','temp')
def get_id(starid,output='output'):
"""from the output of the photometry, grab the magnitudes and magerrs of starid"""
mag = load(output,usecols=[4+starid*4])
merr= load(output,usecols=[5+starid*4])
return mag,merr
def run(filelist,coords,refstar,shifts=None,centre=False,psf=False,pststars=None,
ids=None,dark=0,flat=1,timestamp="TIME",output='output',custom_process=None,
vary=False):
"""If psf==True, must include all extra par files.
If PSF is a filename (.psf.fits), this profileis used to fit every image.
Timestamp can be either a file of times (same length as filelist), a header
keyword, or an array of times.
The input list can include [] notation for multiple extensions or sections
of each file (incompatible with header-based time-stamps).
custom_process(file) is a function taking a filename (possible including [x]
syntax) and places a processed image in temp.fits."""
output = open(output,'w')
x = load(coords,usecols=[1])
numstars = len(x)
myfiles = open(filelist).readlines()
myfiles = [myfiles[i][:-1] for i in range(len(myfiles))]
if timestamp.__class__ == numpy.ndarray: #--sort out times--
times = 1 #times=1 means we know the times beforehand
elif len(glob(timestamp))>0:
timestamp = load(timestamp,usecols=[0])
times=1
else:
times=0 #times=0 mean find the time from each image
if type(shifts)==type(" "): #--sort out shifts--
xshifts,yshifts = load(shifts,unpack=True)#filename give, assuming 2 columns
xshifts,yshifts = -xshifts,-yshifts #these are in the opposite sense to coords from stack
elif n.iterable(shifts):
xshifts=n.array(shifts[0]) #for shifts given as arrays/lists
yshifts=n.array(shifts[1])
else:
print "No shifts" #assume all shifts are zero
xshifts = n.zeros(len(myfiles))
yshifts = n.zeros(len(myfiles))
for i,thisfile in enumerate(myfiles): #run!
print i,thisfile
if times:
time = timestamp[i] #known time
else:
time = pyfits.getval(thisfile,timestamp) #FITS keyword
try:
iraf.dele('temp.fits')
if custom_process: #arbitrary subroutine to process a file -> temp.fits
custom_process(thisfile)
else: #typical dark/bias subtract and flatfield
iraf.imarith(thisfile,'-',dark,'temp')
iraf.imarith('temp','/',flat,'temp')
shift_file_coords(coords,xshifts[i],yshifts[i],'tempcoords') #apply coarse shifts
shift_file_coords(refstar,xshifts[i],yshifts[i],'tempref',sort='num')
if psf:
if psf is True: #full PSF fit
shift_file_coords(pststars,xshifts[i],yshifts[i],'temppst')
out=psfphot('temp.fits','tempcoords','temppst','tempref',centre,vary)
else: #DAOphot with known PSF
out=simplepsfphot('temp.fits','tempcoords',psf,'tempref',centre,vary)
else: #aperture photometry
out=apphot('temp.fits','tempcoords','tempref',centre,vary=vary)
output.write("%s %s %s "%(thisfile,time,vary_fwhm))
myids = n.array([int(out[i].split()[0]) for i in range(len(out))])
for i in ids or range(numstars):
try: #search for each requested ID
foundid = find(myids==i)[0]
output.write(out[foundid]+" ")
except: #ID not found
output.write(" 0 0 0 0 ")
output.write("\n")
except KeyboardInterrupt: #exit on Ctrl-C
break
except pyraf.irafglobals.IrafError, err:
print "IRAF error ",err,thisfile
break
except ValueError, err:
print "Value error ",err,thisfile
raise
output.close()
#iraf.dele('temp*')
| mit | -5,665,480,579,238,870,000 | 42.251724 | 97 | 0.63358 | false |
dunkhong/grr | grr/server/grr_response_server/databases/db_yara_test_lib.py | 1 | 1573 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""A module with test cases for the YARA database method."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
from grr_response_server.databases import db
from grr_response_server.rdfvalues import objects as rdf_objects
class DatabaseTestYaraMixin(object):
"""A mixin class for testing YARA methods of database implementations."""
def testWriteYaraSignatureReferenceIncorrectUsername(self):
blob_id = rdf_objects.BlobID(os.urandom(32))
with self.assertRaises(db.UnknownGRRUserError) as context:
self.db.WriteYaraSignatureReference(blob_id=blob_id, username="quux")
self.assertEqual(context.exception.username, "quux")
def testWriteYaraSignatureReferenceDuplicated(self):
self.db.WriteGRRUser("foo")
blob_id = rdf_objects.BlobID(os.urandom(32))
# Writing duplicated signatures is possible, it should not raise.
self.db.WriteYaraSignatureReference(blob_id=blob_id, username="foo")
self.db.WriteYaraSignatureReference(blob_id=blob_id, username="foo")
def testVerifyYaraSignatureReferenceSimple(self):
self.db.WriteGRRUser("foo")
blob_id = rdf_objects.BlobID(os.urandom(32))
self.db.WriteYaraSignatureReference(blob_id=blob_id, username="foo")
self.assertTrue(self.db.VerifyYaraSignatureReference(blob_id))
def testVerifyYaraSignatureReferenceIncorrect(self):
blob_id = rdf_objects.BlobID(os.urandom(32))
self.assertFalse(self.db.VerifyYaraSignatureReference(blob_id))
| apache-2.0 | 6,214,949,513,647,433,000 | 33.195652 | 75 | 0.760331 | false |
tensorflow/datasets | tensorflow_datasets/text/tiny_shakespeare_test.py | 1 | 1291 | # coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tiny Shakespeare dataset."""
from tensorflow_datasets import testing
from tensorflow_datasets.text import tiny_shakespeare
class TinyShakespeareTest(testing.DatasetBuilderTestCase):
DATASET_CLASS = tiny_shakespeare.TinyShakespeare
SPLITS = {
"train": 1,
"validation": 1,
"test": 1,
}
# If you are calling `download/download_and_extract` with a dict, like:
# dl_manager.download({'some_key': 'http://a.org/out.txt', ...})
# then the tests needs to provide the fake output paths relative to the
# fake data directory
# DL_EXTRACT_RESULT = {'some_key': 'output_file1.txt', ...}
if __name__ == "__main__":
testing.test_main()
| apache-2.0 | -3,372,135,436,074,852,000 | 32.973684 | 74 | 0.719597 | false |
aboyett/blockdiag | src/blockdiag/plugins/autoclass.py | 1 | 1130 | # -*- coding: utf-8 -*-
# Copyright 2011 Takeshi KOMIYA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from blockdiag import plugins
class AutoClass(plugins.NodeHandler):
def on_created(self, node):
if node.id is None:
return
for name, klass in self.diagram.classes.items():
pattern = "_%s$" % re.escape(name)
if re.search(pattern, node.id):
node.label = re.sub(pattern, '', node.id)
node.set_attributes(klass.attrs)
def setup(self, diagram, **kwargs):
plugins.install_node_handler(AutoClass(diagram, **kwargs))
| apache-2.0 | 6,593,639,296,591,402,000 | 32.235294 | 75 | 0.676106 | false |
Hellowlol/PyTunes | modules/newznab.py | 1 | 9273 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import cherrypy
import pytunes
import math
from pytunes.proxy import get_image
from urllib2 import urlopen, quote
from json import loads
import logging
class Newznab:
def __init__(self):
self.logger = logging.getLogger('modules.newznab')
pytunes.MODULES.append({
'name': 'Newznab Search',
'id': 'newznab',
'fields': [
{'type':'bool', 'label':'Enable', 'name':'newznab_enable'},
{'type':'text', 'label':'Menu name', 'name':'newznab_name', 'placeholder':''},
#{'type':'select',
# 'label':'Default NZB Client',
# 'name':'default_nzb_id',
# 'options':[],
# 'desc':'Only Enabled Clients Will Show'
#},
{'type':'text', 'label':'Console Category', 'name':'newznab_console', 'desc':'From Sabnzbd Configuration'},
{'type':'text', 'label':'Movies Category', 'name':'newznab_movies', 'desc':'From Sabnzbd Configuration'},
{'type':'text', 'label':'Audio Category', 'name':'newznab_audio', 'desc':'From Sabnzbd Configuration'},
{'type':'text', 'label':'PC Category', 'name':'newznab_pc', 'desc':'From Sabnzbd Configuration'},
{'type':'text', 'label':'TV Category', 'name':'newznab_tv', 'desc':'From Sabnzbd Configuration'},
{'type':'text', 'label':'XXX Category', 'name':'newznab_xxx', 'desc':'From Sabnzbd Configuration'},
{'type':'text', 'label':'Books Category', 'name':'newznab_books', 'desc':'From Sabnzbd Configuration'},
{'type':'text', 'label':'Other Category', 'name':'newznab_other', 'desc':'From Sabnzbd Configuration'}
]})
pytunes.MODULES.append({
'name': 'Newznab Servers',
'id': 'newznab_update_server',
'action': '%ssettings/setnewzserver' % pytunes.WEBDIR,
#'test': pytunes.WEBDIR + 'newznab/ping',
'fields': [
{'type':'select',
'label':'Newznab Servers',
'name':'newznab_server_id',
'options':[
{'name':'New', 'value':0}
]},
{'type':'text',
'label':'Name',
'name':'newznab_server_name'},
{'type':'text', 'label':'Host', 'name':'newznab_server_host'},
{'type':'text', 'label':'Apikey', 'name':'newznab_server_apikey'},
{'type':'bool', 'label':'Use SSL', 'name':'newznab_server_ssl'}
]})
@cherrypy.expose()
def index(self, query='', **kwargs):
return pytunes.LOOKUP.get_template('newznab.html').render(query=query, scriptname='newznab')
"""
NOT IMPLEMENTET
@cherrypy.expose()
@cherrypy.tools.json_out()
def ping(self, newznab_host, newznab_apikey, **kwargs):
self.logger.debug("Pinging newznab-host")
return 1
"""
@cherrypy.expose()
def thumb(self, url, h=None, w=None, o=100):
if url.startswith('rageid'):
settings = pytunes.settings
host = settings.get('newznab_host', '').replace('http://', '').replace('https://', '')
ssl = 's' if settings.get('newznab_ssl', 0) else ''
url = 'http%s://%s/covers/tv/%s.jpg' % (ssl, host, url[6:])
return get_image(url, h, w, o)
@cherrypy.expose()
def getcategories(self, **kwargs):
self.logger.debug("Fetching available categories")
ret = ''
try:
settings = pytunes.settings
self.current = settings.get_current_newznab_host()
host = self.current.host.replace('http://', '').replace('https://', '')
ssl = '' if self.current.ssl == '0' else 's'
apikey = self.current.apikey
url = 'http%s://%s/api?t=caps&o=xml' % (ssl, host)
self.logger.debug("Fetching Cat information from: %s" % url)
caps = urlopen(url, timeout=10).read()
lines = caps.split('\n')
opt_line = '<option value="%s">%s</option>'
for line in lines:
if 'category' in line and 'genre' not in line and not '/cat' in line:
junk,id,name = line.strip().split(' ')
id = id.split('"')[1]
main_name = name.split('"')[1]
ret += opt_line % (id, main_name)
if 'subcat' in line:
subcat = line.strip().split(' name')
id = subcat[0].split('"')[1]
name = '%s > %s' % (main_name, subcat[1].split('"')[1])
ret += opt_line % (id, name)
except:
self.logger.error('Unable to fetch categories from: %s' % url)
return ret
@cherrypy.expose()
def search(self, q='', cat='', **kwargs):
ret = ''
row = '<tr><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>'
settings = pytunes.settings
sab_cat = {
'1000':settings.get('newznab_console', ''),
'2000':settings.get('newznab_movies', ''),
'3000':settings.get('newznab_audio', ''),
'4000':settings.get('newznab_pc', ''),
'5000':settings.get('newznab_tv', ''),
'6000':settings.get('newznab_xxx', ''),
'7000':settings.get('newznab_books', ''),
'8000':settings.get('newznab_other', '')
}
if cat:
cat = '&cat=%s' % cat
res = self.fetch('search&q=%s%s&extended=1' % (quote(q), cat))
#put in staticvars
link = "<a href='/newznab/AddNzbFromUrl?nzb_url=%s&nzb_category=%s' class='ajax-link' title='Download' cat='%s'><i class='icon-download-alt'></i></a>"
try:
results = res['channel']['item']
except:
results = res
grabs = '0'
for each in results:
files = str(each['attr'][4]['@attributes']['value'])
grabs = str(each['attr'][6]['@attributes']['value'])
category = each['category']
title = each['title']
cat = sab_cat[str(each['attr'][0]['@attributes']['value'])]
num = int(each['enclosure']['@attributes']['length'])
for x in [' bytes',' KB',' MB',' GB']:
if num < 1024.0:
size = "%3.2f%s" % (num, x)
break
num /= 1024.0
dl = link % (quote(each['link']), cat, cat)
ret += row % (title, category, size, files, grabs, dl)
return ret
@cherrypy.expose()
@cherrypy.tools.json_out()
def AddNzbFromUrl(self, nzb_url, nzb_category=''):
self.logger.debug("Adding nzb from url")
if nzb_category:
nzb_category = '&cat=%s' % nzb_category
return self.send('&mode=addurl&name=%s%s' % (quote(nzb_url), nzb_category))
def fetch(self, cmd):
try:
settings = pytunes.settings
self.current = settings.get_current_newznab_host()
host = self.current.host.replace('http://', '').replace('https://', '')
ssl = 's' if settings.get('newznab_ssl') == 'on' else ''
apikey = self.current.apikey
url = 'http%s://%s/api?o=json&apikey=%s&t=%s' ( ssl, host, apikey, cmd)
self.logger.debug("Fetching information from: %s" % url)
return loads(urlopen(url, timeout=30).read())
except Exception, e:
self.logger.debug("Exception%s: " % str(e))
self.logger.error("Unable to fetch information from: newznab %s" % str(e))
def send(self, link):
try:
host = pytunes.settings.get('sabnzbd_host', '')
port = str(pytunes.settings.get('sabnzbd_port', ''))
apikey = pytunes.settings.get('sabnzbd_apikey', '')
sabnzbd_basepath = pytunes.settings.get('sabnzbd_basepath', '/sabnzbd/')
ssl = 's' if pytunes.settings.get('sabnzbd_ssl', 0) else ''
if(sabnzbd_basepath == ""):
sabnzbd_basepath = "/sabnzbd/"
if not(sabnzbd_basepath.endswith('/')):
sabnzbd_basepath += "/"
url = 'http%s://%s:%s%sapi?output=json&apikey=%s%s' % (ssl, host, port, sabnzbd_basepath, apikey, link)
self.logger.debug("Sending NZB to: %s: " % url)
return loads(urlopen(url, timeout=10).read())
except:
self.logger.error("Cannot contact sabnzbd")
return
#Future use...use staticvars
@cherrypy.expose()
def GetClients(self):
nzbclients = ''
if pytunes.settings.get('nzbget_enable', ''):
nzbclients += '<option id="nzbget">NZBget</option>'
if pytunes.settings.get('sabnzbd_enable', ''):
nzbclients += '<option id="sabnzbd">Sabnzbd+</option>'
if not nzbclients:
nzbclients = '<option>No Clients Enabled</option>'
return nzbclients
| gpl-3.0 | -824,705,614,698,123,100 | 43.234146 | 158 | 0.502211 | false |
freelawproject/recap-server | settings.py | 1 | 1377 | """Settings are derived by compiling any files ending in .py in the settings
directory, in alphabetical order.
This results in the following concept:
- default settings are in 10-public.py (this should contain most settings)
- custom settings are in 05-private.py (an example of this file is here for
you)
- any overrides to public settings can go in 20-private.py (you'll need to
create this)
"""
from __future__ import with_statement
import os
import glob
import sys
def _generate_secret_key(file_path):
import random
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
def random_char():
return chars[int(len(chars)*random.random())]
rand_str = ''.join(random_char() for i in range(64))
with open(file_path, 'w') as f:
f.write('SECRET_KEY=%s\n' % repr(rand_str))
ROOT_PATH = os.path.dirname(__file__)
# Try importing the SECRET_KEY from the file secret_key.py. If it doesn't exist,
# there is an import error, and the key is generated and written to the file.
try:
from secret_key import SECRET_KEY
except ImportError:
_generate_secret_key(os.path.join(ROOT_PATH, 'secret_key.py'))
from secret_key import SECRET_KEY
# Load the conf files.
conf_files = glob.glob(os.path.join(
os.path.dirname(__file__), 'settings', '*.py'))
conf_files.sort()
for f in conf_files:
execfile(os.path.abspath(f))
| gpl-3.0 | 8,784,527,857,870,266,000 | 31.023256 | 80 | 0.697168 | false |
mxamin/youtube-dl | youtube_dl/extractor/criterion.py | 1 | 1284 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class CriterionIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?criterion\.com/films/(?P<id>[0-9]+)-.+'
_TEST = {
'url': 'http://www.criterion.com/films/184-le-samourai',
'md5': 'bc51beba55685509883a9a7830919ec3',
'info_dict': {
'id': '184',
'ext': 'mp4',
'title': 'Le Samouraï',
'description': 'md5:a2b4b116326558149bef81f76dcbb93f',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
final_url = self._search_regex(
r'so.addVariable\("videoURL", "(.+?)"\)\;', webpage, 'video url')
title = self._og_search_title(webpage)
description = self._html_search_meta('description', webpage)
thumbnail = self._search_regex(
r'so.addVariable\("thumbnailURL", "(.+?)"\)\;',
webpage, 'thumbnail url')
return {
'id': video_id,
'url': final_url,
'title': title,
'description': description,
'thumbnail': thumbnail,
}
| unlicense | -7,290,849,255,959,012,000 | 30.292683 | 77 | 0.535464 | false |
qedsoftware/commcare-hq | custom/opm/constants.py | 1 | 1732 | from corehq.apps.fixtures.models import FixtureDataItem
from corehq.util.quickcache import quickcache
DOMAIN = 'opm'
PREG_REG_XMLNS = "http://openrosa.org/formdesigner/D127C457-3E15-4F5E-88C3-98CD1722C625"
VHND_XMLNS = "http://openrosa.org/formdesigner/ff5de10d75afda15cddb3b00a0b1e21d33a50d59"
BIRTH_PREP_XMLNS = "http://openrosa.org/formdesigner/50378991-FEC3-408D-B4A5-A264F3B52184"
DELIVERY_XMLNS = "http://openrosa.org/formdesigner/492F8F0E-EE7D-4B28-B890-7CDA5F137194"
CHILD_FOLLOWUP_XMLNS = "http://openrosa.org/formdesigner/C90C2C1F-3B34-47F3-B3A3-061EAAC1A601"
CFU1_XMLNS = "http://openrosa.org/formdesigner/d642dd328514f2af92c093d414d63e5b2670b9c"
CFU2_XMLNS = "http://openrosa.org/formdesigner/9ef423bba8595a99976f0bc9532617841253a7fa"
CFU3_XMLNS = "http://openrosa.org/formdesigner/f15b9f8fb92e2552b1885897ece257609ed16649"
GROWTH_MONITORING_XMLNS= "http://openrosa.org/formdesigner/F1356F3F-C695-491F-9277-7F9B5522200C"
CLOSE_FORM = "http://openrosa.org/formdesigner/41A1B3E0-C1A4-41EA-AE90-71A328F0D8FD"
CHILDREN_FORMS = [CFU1_XMLNS, CFU2_XMLNS, CFU3_XMLNS, CHILD_FOLLOWUP_XMLNS]
OPM_XMLNSs = [PREG_REG_XMLNS, VHND_XMLNS, BIRTH_PREP_XMLNS, DELIVERY_XMLNS,
CHILD_FOLLOWUP_XMLNS, CFU1_XMLNS, CFU2_XMLNS, CFU3_XMLNS,
GROWTH_MONITORING_XMLNS, CLOSE_FORM]
# TODO Move these to a cached fixtures lookup
MONTH_AMT = 250
TWO_YEAR_AMT = 2000
THREE_YEAR_AMT = 3000
@quickcache([], timeout=30 * 60)
def get_fixture_data():
fixtures = FixtureDataItem.get_indexed_items(DOMAIN, 'condition_amounts', 'condition')
return dict((k, int(fixture['rs_amount'])) for k, fixture in fixtures.items())
class InvalidRow(Exception):
"""
Raise this in the row constructor to skip row
"""
| bsd-3-clause | 7,714,274,633,423,886,000 | 44.578947 | 96 | 0.769053 | false |
tonioo/modoboa | modoboa/lib/u2u_decode.py | 1 | 2282 | # -*- coding: utf-8 -*-
"""
Unstructured rfc2047 header to unicode.
A stupid (and not accurate) answer to https://bugs.python.org/issue1079.
"""
from __future__ import unicode_literals
import re
from email.header import decode_header, make_header
from email.utils import parseaddr
from django.utils.encoding import smart_text
# check spaces between encoded_words (and strip them)
sre = re.compile(r"\?=[ \t]+=\?")
# re pat for MIME encoded_word (without trailing spaces)
mre = re.compile(r"=\?[^?]*?\?[bq]\?[^?\t]*?\?=", re.I)
# re do detect encoded ASCII characters
ascii_re = re.compile(r"=[\dA-F]{2,3}", re.I)
def clean_spaces(m):
"""Replace unencoded spaces in string.
:param str m: a match object
:return: the cleaned string
"""
return m.group(0).replace(" ", "=20")
def clean_non_printable_char(m):
"""Strip non printable characters."""
code = int(m.group(0)[1:], 16)
if code < 20:
return ""
return m.group(0)
def decode_mime(m):
"""Substitute matching encoded_word with unicode equiv."""
h = decode_header(clean_spaces(m))
try:
u = smart_text(make_header(h))
except (LookupError, UnicodeDecodeError):
return m.group(0)
return u
def clean_header(header):
"""Clean header function."""
header = "".join(header.splitlines())
header = sre.sub("?==?", header)
return ascii_re.sub(clean_non_printable_char, header)
def u2u_decode(s):
"""utility function for (final) decoding of mime header
note: resulting string is in one line (no \n within)
note2: spaces between enc_words are stripped (see RFC2047)
"""
return mre.sub(decode_mime, clean_header(s)).strip(" \r\t\n")
def decode_address(value):
"""Special function for address decoding.
We need a dedicated processing because RFC1342 explicitely says
address MUST NOT contain encoded-word:
These are the ONLY locations where an encoded-word may appear. In
particular, an encoded-word MUST NOT appear in any portion of an
"address". In addition, an encoded-word MUST NOT be used in a
Received header field.
"""
phrase, address = parseaddr(clean_header(value))
if phrase:
phrase = mre.sub(decode_mime, phrase)
return phrase, address
| isc | -962,520,203,660,710,000 | 26.493976 | 72 | 0.660824 | false |
tudarmstadt-lt/topicrawler | lt.lm/src/main/py/mr_ngram_count.py | 1 | 1297 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test:
cat data | map | sort | reduce
cat data | ./x.py -m | sort | ./x.py -r
hadoop jar /opt/cloudera/parcels/CDH/lib/hadoop-mapreduce/hadoop-streaming.jar \
-files x.py \
-mapper 'x.py -m' \
-reducer 'x.py -r' \
-input in \
-output out
@author: stevo
"""
from __future__ import print_function
from __future__ import division
import itertools as it
import sys
def readlines():
with sys.stdin as f:
for line in f:
if line.strip():
yield line
def mapper(lines):
for line in lines:
print('{}'.format(line.rstrip()))
def line2tuple(lines):
for line in lines:
splits = line.rstrip().split('\t')
yield splits
def reducer(lines, mincount=1):
for key, values in it.groupby(lines, lambda line : line.rstrip()):
num = reduce(lambda x, y: x + 1, values, 0)
if num >= mincount:
print('{}\t{}'.format(key, num))
if len(sys.argv) < 2:
raise Exception('specify mapper (-m) or reducer (-r) function')
t = sys.argv[1]
mincount = int(sys.argv[2]) if len(sys.argv) > 2 else 1
if '-m' == t:
mapper(readlines());
elif '-r' == t:
reducer(readlines(), mincount);
else:
raise Exception('specify mapper (-m) or reducer (-r) function') | apache-2.0 | -1,830,261,497,265,860,000 | 22.6 | 80 | 0.597533 | false |
Ziqi-Li/bknqgis | bokeh/bokeh/sphinxext/example_handler.py | 1 | 2905 | import sys
from ..application.handlers.code_runner import CodeRunner
from ..application.handlers.handler import Handler
from ..io import set_curdoc, curdoc
class ExampleHandler(Handler):
""" A stripped-down handler similar to CodeHandler but that does
some appropriate monkeypatching to
"""
_output_funcs = ['output_notebook', 'output_file', 'reset_output']
_io_funcs = ['show', 'save']
def __init__(self, source, filename):
super(ExampleHandler, self).__init__(self)
self._runner = CodeRunner(source, filename, [])
def modify_document(self, doc):
if self.failed:
return
module = self._runner.new_module()
sys.modules[module.__name__] = module
doc._modules.append(module)
old_doc = curdoc()
set_curdoc(doc)
old_io, old_doc = self._monkeypatch()
try:
self._runner.run(module, lambda: None)
finally:
self._unmonkeypatch(old_io, old_doc)
set_curdoc(old_doc)
def _monkeypatch(self):
def _pass(*args, **kw): pass
def _add_root(obj, *args, **kw):
from bokeh.io import curdoc
curdoc().add_root(obj)
def _curdoc(*args, **kw):
return curdoc()
# these functions are transitively imported from io into plotting,
# so we have to patch them all. Assumption is that no other patching
# has occurred, i.e. we can just save the funcs being patched once,
# from io, and use those as the originals to replace everywhere
import bokeh.io as io
import bokeh.plotting as p
mods = [io, p]
# TODO (bev) restore when bkcharts package is ready (but remove at 1.0 release)
# import bkcharts as c
# mods.append(c)
old_io = {}
for f in self._output_funcs + self._io_funcs:
old_io[f] = getattr(io, f)
for mod in mods:
for f in self._output_funcs:
setattr(mod, f, _pass)
for f in self._io_funcs:
setattr(mod, f, _add_root)
import bokeh.document as d
old_doc = d.Document
d.Document = _curdoc
return old_io, old_doc
def _unmonkeypatch(self, old_io, old_doc):
import bokeh.io as io
import bokeh.plotting as p
mods = [io, p]
# TODO (bev) restore when bkcharts package is ready (but remove at 1.0 release)
# import bkcharts as c
# mods.append(c)
for mod in mods:
for f in old_io:
setattr(mod, f, old_io[f])
import bokeh.document as d
d.Document = old_doc
@property
def failed(self):
return self._runner.failed
@property
def error(self):
return self._runner.error
@property
def error_detail(self):
return self._runner.error_detail
| gpl-2.0 | -5,235,527,630,608,026,000 | 27.203883 | 87 | 0.578313 | false |
BurningNetel/ctf-manager | CTFmanager/tests/views/event/test_event.py | 1 | 6138 | import json
from django.core.urlresolvers import reverse
from CTFmanager.tests.views.base import ViewTestCase
class EventPageAJAXJoinEventTest(ViewTestCase):
""" Tests that a user can join an event
A user should be able to join upcoming events.
And get a response without the page reloading
"""
def get_valid_event_join_post(self):
event = self.create_event()
response = self.client.post(reverse('event_join', args=[event.name]))
_json = json.loads(response.content.decode())
return _json, event
def test_POST_returns_expected_json_on_valid_post(self):
_json, event = self.get_valid_event_join_post()
self.assertEqual(200, _json['status_code'])
def test_POST_gives_correct_user_count(self):
_json, event = self.get_valid_event_join_post()
self.assertEqual(1, _json['members'])
def test_logout_POST_gives_401_and_negative(self):
self.client.logout()
_json, event = self.get_valid_event_join_post()
self.assertEqual(-1, _json['members'])
self.assertEqual(401, _json['status_code'])
def test_duplicate_POST_gives_304_and_negative(self):
_json, event = self.get_valid_event_join_post()
response = self.client.post(reverse('event_join', args=[event.name]))
_json = json.loads(response.content.decode())
self.assertEqual(-1, _json['members'])
self.assertEqual(304, _json['status_code'])
def test_valid_DELETE_gives_valid_json(self):
event = self.create_event_join_user()
response = self.client.delete(reverse('event_join', args=[event.name]))
_json = json.loads(response.content.decode())
self.assertEqual(200, _json['status_code'])
self.assertEqual(0, _json['members'])
def test_duplicate_DELETE_gives_304_and_negative(self):
event = self.create_event_join_user()
self.client.delete(reverse('event_join', args=[event.name]))
response = self.client.delete(reverse('event_join', args=[event.name]))
_json = json.loads(response.content.decode())
self.assertEqual(304, _json['status_code'])
self.assertEqual(-1, _json['members'])
def test_logout_then_DELTE_gives_401_and_negative(self):
event = self.create_event_join_user()
self.client.logout()
response = self.client.delete(reverse('event_join', args=[event.name]))
_json = json.loads(response.content.decode())
self.assertEqual(401, _json['status_code'])
self.assertEqual(-1, _json['members'])
def create_event_join_user(self):
event = self.create_event()
event.join(self.user)
return event
class EventPageTest(ViewTestCase):
def test_events_page_requires_authentication(self):
self.client.logout()
response = self.client.get(reverse('events'))
self.assertRedirects(response, reverse('login') + '?next=' + reverse('events'))
def test_events_page_renders_events_template(self):
response = self.client.get(reverse('events'))
self.assertTemplateUsed(response, 'event/events.html')
def test_events_page_contains_new_event_button(self):
response = self.client.get(reverse('events'))
expected = 'id="btn_add_event" href="/events/new/">Add Event</a>'
self.assertContains(response, expected)
def test_events_page_displays_only_upcoming_events(self):
event_future = self.create_event("hatCTF", True)
event_past = self.create_event("RuCTF_2015", False)
response = self.client.get(reverse('events'))
_event = response.context['events']
self.assertEqual(_event[0], event_future)
self.assertEqual(len(_event), 1)
self.assertNotEqual(_event[0], event_past)
def test_events_page_has_correct_headers(self):
response = self.client.get(reverse('events'))
expected = 'Upcoming Events'
expected2 = 'Archive'
self.assertContains(response, expected)
self.assertContains(response, expected2)
def test_empty_events_set_shows_correct_message(self):
response = self.client.get(reverse('events'))
expected = 'No upcoming events!'
self.assertContains(response, expected)
def test_events_page_display_archive(self):
event_past = self.create_event('past_event', False)
response = self.client.get(reverse('events'))
archive = response.context['archive']
self.assertContains(response, '<table id="table_archive"')
self.assertContains(response, event_past.name)
self.assertEqual(archive[0], event_past)
def test_events_page_displays_error_message_when_nothing_in_archive(self):
response = self.client.get(reverse('events'))
archive = response.context['archive']
self.assertEqual(len(archive), 0)
self.assertContains(response, 'No past events!')
def test_event_page_displays_event_members_count(self):
event = self.create_event()
response = self.client.get(reverse('events'))
self.assertContains(response, '0 Participating')
event.members.add(self.user)
event.save()
response = self.client.get(reverse('events'))
self.assertContains(response, '1 Participating')
def test_event_page_displays_correct_button_text(self):
event = self.create_event()
response = self.client.get(reverse('events'))
self.assertContains(response, 'Join</button>')
event.join(self.user)
response = self.client.get(reverse('events'))
self.assertContains(response, 'Leave</button>')
def test_event_page_shows_username_in_popup(self):
event = self.create_event()
response = self.client.get(reverse('events'))
self.assertContains(response, self.user.username, 1)
self.assertContains(response, 'Nobody has joined yet!')
event.join(self.user)
response = self.client.get(reverse('events'))
self.assertContains(response, self.user.username, 2)
self.assertNotContains(response, 'Nobody has joined yet!') | gpl-3.0 | -6,477,876,122,721,076,000 | 38.352564 | 87 | 0.654774 | false |
jeffmurphy/cif-router | poc/cif-router.py | 1 | 21349 | #!/usr/bin/python
#
#
# cif-router proof of concept
#
# cif-router [-p pubport] [-r routerport] [-m myname] [-h]
# -p default: 5556
# -r default: 5555
# -m default: cif-router
#
# cif-router is a zmq device with the following sockets:
# XPUB
# for republishing messages
# XSUB
# for subscribing to message feeds
# ROUTER
# for routing REQ/REP messages between clients
# also for accepting REQs from clients
# locally accepted types:
# REGISTER, UNREGISTER, LIST-CLIENTS
# locally generated replies:
# UNAUTHORIZED, OK, FAILED
#
# communication between router and clients is via CIF.msg passing
# the 'ControlStruct' portion of CIF.msg is used for communication
#
# a typical use case:
#
# cif-smrt's REQ connects to ROUTER and sends a REGISTER message with dst=cif-router
# cif-router's ROUTER responds with SUCCESS (if valid) or UNAUTHORIZED (if not valid)
# the apikey will be validated during this step
# cif-router's XSUB connects to cif-smrt's XPUB
# cif-smrt begins publishing CIF messages
# cif-router re-publishes the CIF messages to clients connected to cif-router's XPUB
# clients may be: cif-correlator, cif-db
import sys
import zmq
import time
import datetime
import threading
import getopt
import json
import pprint
import struct
sys.path.append('/usr/local/lib/cif-protocol/pb-python/gen-py')
import msg_pb2
import feed_pb2
import RFC5070_IODEF_v1_pb2
import MAEC_v2_pb2
import control_pb2
import cifsupport
sys.path.append('../../libcif/lib')
from CIF.RouterStats import *
from CIF.CtrlCommands.Clients import *
from CIF.CtrlCommands.Ping import *
from CIFRouter.MiniClient import *
from CIF.CtrlCommands.ThreadTracker import ThreadTracker
myname = "cif-router"
def dosubscribe(client, m):
client = m.src
if client in publishers :
print "dosubscribe: we've seen this client before. re-using old connection."
return control_pb2.ControlType.SUCCESS
elif clients.isregistered(client) == True:
if clients.apikey(client) == m.apikey:
print "dosubscribe: New publisher to connect to " + client
publishers[client] = time.time()
addr = m.iPublishRequest.ipaddress
port = m.iPublishRequest.port
print "dosubscribe: connect our xsub -> xpub on " + addr + ":" + str(port)
xsub.connect("tcp://" + addr + ":" + str(port))
return control_pb2.ControlType.SUCCESS
print "dosubscribe: iPublish from a registered client with a bad apikey: " + client + " " + m.apikey
print "dosubscribe: iPublish from a client who isnt registered: \"" + client + "\""
return control_pb2.ControlType.FAILED
def list_clients(client, apikey):
if clients.isregistered(client) == True and clients.apikey(client) == apikey:
return clients.asmessage()
return None
def make_register_reply(msgfrom, _apikey):
msg = control_pb2.ControlType()
msg.version = msg.version # required
msg.type = control_pb2.ControlType.REPLY
msg.command = control_pb2.ControlType.REGISTER
msg.dst = msgfrom
msg.src = "cif-router"
print "mrr " + _apikey
msg.apikey = _apikey
return msg
def make_unregister_reply(msgfrom, _apikey):
msg = control_pb2.ControlType()
msg.version = msg.version # required
msg.type = control_pb2.ControlType.REPLY
msg.command = control_pb2.ControlType.UNREGISTER
msg.dst = msgfrom
msg.src = "cif-router"
msg.apikey = _apikey
return msg
def make_msg_seq(msg):
_md5 = hashlib.md5()
_md5.update(msg.SerializeToString())
return _md5.digest()
def handle_miniclient_reply(socket, routerport, publisherport):
pending_registers = miniclient.pending_apikey_lookups()
print "pending_apikey_lookups: ", pending_registers
for apikey in pending_registers:
if apikey in register_wait_map:
reply_to = register_wait_map[apikey]
apikey_results = miniclient.get_pending_apikey(apikey)
print " send reply to: ", reply_to
msg = make_register_reply(reply_to['msgfrom'], apikey)
msg.status = control_pb2.ControlType.FAILED
if apikey_results != None:
if apikey_results.revoked == False:
if apikey_results.expires == 0 or apikey_results.expires >= time.time():
msg.registerResponse.REQport = routerport
msg.registerResponse.PUBport = publisherport
msg.status = control_pb2.ControlType.SUCCESS
clients.register(reply_to['msgfrom'], reply_to['from_zmqid'], apikey)
print " Register succeeded."
else:
print " Register failed: key expired"
else:
print " Register failed: key revoked"
else:
print " Register failed: unknown key"
msg.seq = reply_to['msgseq']
socket.send_multipart([reply_to['from_zmqid'], '', msg.SerializeToString()])
del register_wait_map[apikey]
elif apikey in unregister_wait_map:
reply_to = unregister_wait_map[apikey]
apikey_results = miniclient.get_pending_apikey(apikey)
print " send reply to: ", reply_to
msg = make_unregister_reply(reply_to['msgfrom'], apikey)
msg.status = control_pb2.ControlType.FAILED
if apikey_results != None:
if apikey_results.revoked == False:
if apikey_results.expires == 0 or apikey_results.expires >= time.time():
msg.status = control_pb2.ControlType.SUCCESS
clients.unregister(reply_to['msgfrom'])
print " Unregister succeeded."
else:
print " Unregister failed: key expired"
else:
print " Unregister failed: key revoked"
else:
print " Unregister failed: unknown key"
msg.seq = reply_to['msgseq']
socket.send_multipart([reply_to['from_zmqid'], '', msg.SerializeToString()])
del unregister_wait_map[apikey]
miniclient.remove_pending_apikey(apikey)
def myrelay(pubport):
relaycount = 0
print "[myrelay] Create XPUB socket on " + str(pubport)
xpub = context.socket(zmq.PUB)
xpub.bind("tcp://*:" + str(pubport))
while True:
try:
relaycount = relaycount + 1
m = xsub.recv()
_m = msg_pb2.MessageType()
_m.ParseFromString(m)
if _m.type == msg_pb2.MessageType.QUERY:
mystats.setrelayed(1, 'QUERY')
elif _m.type == msg_pb2.MessageType.REPLY:
mystats.setrelayed(1, 'REPLY')
elif _m.type == msg_pb2.MessageType.SUBMISSION:
mystats.setrelayed(1, 'SUBMISSION')
for bmt in _m.submissionRequest:
mystats.setrelayed(1, bmt.baseObjectType)
print "[myrelay] total:%d got:%d bytes" % (relaycount, len(m))
#print "[myrelay] got msg on our xsub socket: " , m
xpub.send(m)
except Exception as e:
print "[myrelay] invalid message received: ", e
def usage():
print "cif-router [-r routerport] [-p pubport] [-m myid] [-a myapikey] [-dn dbname] [-dk dbkey] [-h]"
print " routerport = 5555, pubport = 5556, myid = cif-router"
print " dbkey = a8fd97c3-9f8b-477b-b45b-ba06719a0088"
print " dbname = cif-db"
try:
opts, args = getopt.getopt(sys.argv[1:], 'p:r:m:h')
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(2)
global mystats
global clients
global thread_tracker
context = zmq.Context()
clients = Clients()
mystats = RouterStats()
publishers = {}
routerport = 5555
publisherport = 5556
myid = "cif-router"
dbkey = 'a8fd97c3-9f8b-477b-b45b-ba06719a0088'
dbname = 'cif-db'
global apikey
apikey = 'a1fd11c1-1f1b-477b-b45b-ba06719a0088'
miniclient = None
miniclient_id = myid + "-miniclient"
register_wait_map = {}
unregister_wait_map = {}
for o, a in opts:
if o == "-r":
routerport = a
elif o == "-p":
publisherport = a
elif o == "-m":
myid = a
elif o == "-dk":
dbkey = a
elif o == "-dn":
dbname = a
elif o == "-a":
apikey = a
elif o == "-h":
usage()
sys.exit(2)
print "Create ROUTER socket on " + str(routerport)
global socket
socket = context.socket(zmq.ROUTER)
socket.bind("tcp://*:" + str(routerport))
socket.setsockopt(zmq.IDENTITY, myname)
poller = zmq.Poller()
poller.register(socket, zmq.POLLIN)
print "Create XSUB socket"
xsub = context.socket(zmq.SUB)
xsub.setsockopt(zmq.SUBSCRIBE, '')
print "Connect XSUB<->XPUB"
thread = threading.Thread(target=myrelay, args=(publisherport,))
thread.start()
while not thread.isAlive():
print "waiting for pubsub relay thread to become alive"
time.sleep(1)
thread_tracker = ThreadTracker(False)
thread_tracker.add(id=thread.ident, user='Router', host='localhost', state='Running', info="PUBSUB Relay")
print "Entering event loop"
try:
open_for_business = False
while True:
sockets_with_data_ready = dict(poller.poll(1000))
#print "[up " + str(int(mystats.getuptime())) + "s]: Wakeup: "
if miniclient != None:
if miniclient.pending() == True:
print "\tMiniclient has replies we need to handle."
handle_miniclient_reply(socket, routerport, publisherport)
if sockets_with_data_ready and sockets_with_data_ready.get(socket) == zmq.POLLIN:
print "[up " + str(int(mystats.getuptime())) + "s]: Got an inbound message"
rawmsg = socket.recv_multipart()
#print " Got ", rawmsg
msg = control_pb2.ControlType()
try:
msg.ParseFromString(rawmsg[2])
except Exception as e:
print "Received message isn't a protobuf: ", e
mystats.setbad()
else:
from_zmqid = rawmsg[0] # save the ZMQ identity of who sent us this message
#print "Got msg: "#, msg.seq
try:
cifsupport.versionCheck(msg)
except Exception as e:
print "\tReceived message has incompatible version: ", e
mystats.setbadversion(1, msg.version)
else:
if cifsupport.isControl(msg):
msgfrom = msg.src
msgto = msg.dst
msgcommand = msg.command
msgcommandtext = control_pb2._CONTROLTYPE_COMMANDTYPE.values_by_number[msg.command].name
msgid = msg.seq
if msgfrom != '' and msg.apikey != '':
if msgto == myname and msg.type == control_pb2.ControlType.REPLY:
print "\tREPLY for me: ", msgcommand
if msgcommand == control_pb2.ControlType.APIKEY_GET:
print "\tReceived a REPLY for an APIKEY_GET"
elif msgto == myname and msg.type == control_pb2.ControlType.COMMAND:
print "\tCOMMAND for me: ", msgcommandtext
mystats.setcontrols(1, msgcommandtext)
"""
For REGISTER:
We allow only the db to register with us while we are not
open_for_business. Once the DB registers, we are open_for_business
since we can then start validating apikeys. Until that time, we can
only validate the dbkey that is specified on the command line when
you launch this program.
"""
if msgcommand == control_pb2.ControlType.REGISTER:
print "\tREGISTER from: " + msgfrom
msg.status = control_pb2.ControlType.FAILED
msg.type = control_pb2.ControlType.REPLY
msg.seq = msgid
if msgfrom == miniclient_id and msg.apikey == apikey:
clients.register(msgfrom, from_zmqid, msg.apikey)
msg.status = control_pb2.ControlType.SUCCESS
msg.registerResponse.REQport = routerport
msg.registerResponse.PUBport = publisherport
print "\tMiniClient has registered."
socket.send_multipart([from_zmqid, '', msg.SerializeToString()])
elif msgfrom == dbname and msg.apikey == dbkey:
clients.register(msgfrom, from_zmqid, msg.apikey)
msg.status = control_pb2.ControlType.SUCCESS
msg.registerResponse.REQport = routerport
msg.registerResponse.PUBport = publisherport
open_for_business = True
print "\tDB has connected successfully. Sending reply to DB."
print "\tStarting embedded client"
miniclient = MiniClient(apikey, "127.0.0.1", "127.0.0.1:" + str(routerport), 5557, miniclient_id, thread_tracker, True)
socket.send_multipart([from_zmqid, '', msg.SerializeToString()])
elif open_for_business == True:
"""
Since we need to wait for the DB to response, we note this pending request, ask the miniclient
to handle the lookup. We will poll the MC to see if the lookup has finished. Reply to client
will be sent from handle_miniclient_reply()
"""
miniclient.lookup_apikey(msg.apikey)
register_wait_map[msg.apikey] = {'msgfrom': msgfrom, 'from_zmqid': from_zmqid, 'msgseq': msg.seq}
else:
print "\tNot open_for_business yet. Go away."
elif msgcommand == control_pb2.ControlType.UNREGISTER:
"""
If the database unregisters, then we are not open_for_business any more.
"""
print "\tUNREGISTER from: " + msgfrom
if open_for_business == True:
if msgfrom == dbname and msg.apikey == dbkey:
print "\t\tDB unregistered. Closing for business."
open_for_business = False
clients.unregister(msgfrom)
msg.status = control_pb2.ControlType.SUCCESS
msg.seq = msgid
socket.send_multipart([ from_zmqid, '', msg.SerializeToString()])
else:
"""
Since we need to wait for the DB to response, we note this pending request, ask the miniclient
to handle the lookup. We will poll the MC to see if the lookup has finished. Reply to the client
will be sent from handle_miniclient_reply()
"""
miniclient.lookup_apikey(msg.apikey)
unregister_wait_map[msg.apikey] = {'msgfrom': msgfrom, 'from_zmqid': from_zmqid, 'msgseq': msg.seq}
elif msgcommand == control_pb2.ControlType.LISTCLIENTS:
print "\tLIST-CLIENTS for: " + msgfrom
if open_for_business == True:
rv = list_clients(msg.src, msg.apikey)
msg.seq = msgid
msg.status = msg.status | control_pb2.ControlType.FAILED
if rv != None:
msg.status = msg.status | control_pb2.ControlType.SUCCESS
msg.listClientsResponse.client.extend(rv.client)
msg.listClientsResponse.connectTimestamp.extend(rv.connectTimestamp)
socket.send_multipart( [ from_zmqid, '', msg.SerializeToString() ] )
elif msg.command == control_pb2.ControlType.STATS:
print "\tSTATS for: " + msgfrom
if open_for_business == True:
tmp = msg.dst
msg.dst = msg.src
msg.src = tmp
msg.status = control_pb2.ControlType.SUCCESS
msg.statsResponse.statsType = control_pb2.StatsResponse.ROUTER
msg.statsResponse.stats = mystats.asjson()
socket.send_multipart( [ from_zmqid, '', msg.SerializeToString() ] )
elif msg.command == control_pb2.ControlType.THREADS_LIST:
tmp = msg.dst
msg.dst = msg.src
msg.src = tmp
msg.status = control_pb2.ControlType.SUCCESS
thread_tracker.asmessage(msg.listThreadsResponse)
socket.send_multipart( [ from_zmqid, '', msg.SerializeToString() ] )
if msg.command == control_pb2.ControlType.PING:
c = Ping.makereply(msg)
socket.send_multipart( [ from_zmqid, '', c.SerializeToString() ] )
elif msgcommand == control_pb2.ControlType.IPUBLISH:
print "\tIPUBLISH from: " + msgfrom
if open_for_business == True:
rv = dosubscribe(from_zmqid, msg)
msg.status = rv
socket.send_multipart( [from_zmqid, '', msg.SerializeToString()] )
else:
print "\tCOMMAND for someone else: cmd=", msgcommandtext, "src=", msgfrom, " dst=", msgto
msgto_zmqid = clients.getzmqidentity(msgto)
if msgto_zmqid != None:
socket.send_multipart([msgto_zmqid, '', msg.SerializeToString()])
else:
print "\tUnknown message destination: ", msgto
else:
print "\tmsgfrom and/or msg.apikey is empty"
except KeyboardInterrupt:
print "Shut down."
if thread.isAlive():
try:
thread._Thread__stop()
except:
print(str(thread.getName()) + ' could not be terminated')
sys.exit(0)
| bsd-3-clause | -4,783,758,994,462,898,000 | 44.230932 | 161 | 0.492154 | false |
EmilianStankov/Viridis-Media-Player | source/playlist_tests.py | 1 | 1257 | import unittest
from playlist import Playlist, load_playlist_from_db
class TestPlaylist(unittest.TestCase):
"""Playlist tests"""
def setUp(self):
self.pl = Playlist("playlist", ["song_one", "song_two"])
self.pl.save_to_db()
def tearDown(self):
self.pl.delete_from_db()
def test_get_playlist_name(self):
self.assertEqual(self.pl.get_name(), "playlist")
def test_get_playlist_files(self):
self.assertEqual(self.pl.get_files(), ["song_one", "song_two"])
def test_add_new_file_to_playlist(self):
self.pl.add_file("song_three")
self.assertEqual(self.pl.get_files(),
["song_one", "song_two", "song_three"])
def test_remove_file_from_playlist(self):
self.pl.remove_file("song_one")
self.assertEqual(self.pl.get_files(), ["song_two"])
def test_remove_file_that_is_not_in_playlist(self):
self.assertRaises(ValueError, self.pl.remove_file("song_three"))
def test_load_playlist_from_database(self):
pl2 = load_playlist_from_db("playlist")
self.assertEqual(pl2.get_name(), "playlist")
self.assertEqual(pl2.get_files(), ["song_one", "song_two"])
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -3,823,412,644,465,673,000 | 30.425 | 72 | 0.618934 | false |
fdouetteau/PyBabe | pybabe/pivot.py | 1 | 2935 |
try:
from collections import OrderedDict
except:
## 2.6 Fallback
from ordereddict import OrderedDict
from base import StreamHeader, StreamFooter, BabeBase
class OrderedDefaultdict(OrderedDict):
def __init__(self, *args, **kwargs):
newdefault = None
newargs = ()
if args:
newdefault = args[0]
if not (newdefault is None or callable(newdefault)):
raise TypeError('first argument must be callable or None')
newargs = args[1:]
self.default_factory = newdefault
super(self.__class__, self).__init__(*newargs, **kwargs)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self): # optional, for pickle support
args = self.default_factory if self.default_factory else tuple()
return type(self), args, None, None, self.items()
class OrderedSet(set):
def __init__(self):
self.list = []
def add(self, elt):
if elt in self:
return
else:
super(OrderedSet, self).add(elt)
self.list.append(elt)
def __iter__(self):
return self.list.__iter__()
def pivot(stream, pivot, group):
"Create a pivot around field, grouping on identical value for 'group'"
groups = OrderedDefaultdict(dict)
pivot_values = OrderedSet()
header = None
group_n = map(StreamHeader.keynormalize, group)
for row in stream:
if isinstance(row, StreamHeader):
header = row
elif isinstance(row, StreamFooter):
# HEADER IS : GROUP + (OTHER FIELDS * EACH VALUE
other_fields = [f for f in header.fields if not f in group and not f == pivot]
other_fields_k = map(StreamHeader.keynormalize, other_fields)
fields = group + [f + "-" + str(v)
for v in pivot_values.list for f in other_fields]
newheader = header.replace(fields=fields)
yield newheader
for _, row_dict in groups.iteritems():
## Create a line per group
mrow = row_dict.itervalues().next()
group_cols = [getattr(mrow, col) for col in group_n]
for v in pivot_values:
if v in row_dict:
mrow = row_dict[v]
group_cols.extend([getattr(mrow, col) for col in other_fields_k])
else:
group_cols.extend([None for col in other_fields])
yield group_cols
yield row
else:
kgroup = ""
for f in group_n:
kgroup = kgroup + str(getattr(row, f))
groups[kgroup][getattr(row, pivot)] = row
pivot_values.add(getattr(row, pivot))
BabeBase.register("pivot", pivot)
| bsd-3-clause | -1,801,747,529,367,375,600 | 33.529412 | 90 | 0.560136 | false |
rbn42/stiler | config.py | 1 | 1027 | WinBorder = 2
LeftPadding = 15
BottomPadding = 15
TopPadding = BottomPadding
RightPadding = BottomPadding
NavigateAcrossWorkspaces = True # availabe in Unity7
TempFile = "/dev/shm/.stiler_db"
LockFile = "/dev/shm/.stiler.lock"
# This is the congiguration that works for unity7. If you are using a
# different Desktop Environment, close all windows and execute "wmctrl
# -lG" to find out all the applications need to exclude.
EXCLUDE_APPLICATIONS = ['<unknown>', 'x-nautilus-desktop', 'unity-launcher',
'unity-panel', 'Hud', 'unity-dash', 'Desktop',
'Docky',
'screenkey', 'XdndCollectionWindowImp']
# An alternative method to exclude applications.
EXCLUDE_WM_CLASS = ['wesnoth-1.12']
UNRESIZABLE_APPLICATIONS = ['Screenkey']
RESIZE_STEP = 50
MOVE_STEP = 50
MIN_WINDOW_WIDTH = 50
MIN_WINDOW_HEIGHT = 50
#NOFRAME_WMCLASS = ['Wine']
# In i3-wm's window tree, only one child of a node is allowed to split.
#MAX_KD_TREE_BRANCH = 1
MAX_KD_TREE_BRANCH = 2
| mit | 8,967,949,853,643,365,000 | 31.09375 | 76 | 0.685492 | false |
ojii/sandlib | lib/lib_pypy/_ctypes/primitive.py | 1 | 11496 | import _ffi
import _rawffi
import weakref
import sys
SIMPLE_TYPE_CHARS = "cbBhHiIlLdfguzZqQPXOv?"
from _ctypes.basics import _CData, _CDataMeta, cdata_from_address,\
CArgObject
from _ctypes.builtin import ConvMode
from _ctypes.array import Array
from _ctypes.pointer import _Pointer, as_ffi_pointer
#from _ctypes.function import CFuncPtr # this import is moved at the bottom
# because else it's circular
class NULL(object):
pass
NULL = NULL()
TP_TO_DEFAULT = {
'c': 0,
'u': 0,
'b': 0,
'B': 0,
'h': 0,
'H': 0,
'i': 0,
'I': 0,
'l': 0,
'L': 0,
'q': 0,
'Q': 0,
'f': 0.0,
'd': 0.0,
'g': 0.0,
'P': None,
# not part of struct
'O': NULL,
'z': None,
'Z': None,
'?': False,
}
if sys.platform == 'win32':
TP_TO_DEFAULT['X'] = NULL
TP_TO_DEFAULT['v'] = 0
DEFAULT_VALUE = object()
class GlobalPyobjContainer(object):
def __init__(self):
self.objs = []
def add(self, obj):
num = len(self.objs)
self.objs.append(weakref.ref(obj))
return num
def get(self, num):
return self.objs[num]()
pyobj_container = GlobalPyobjContainer()
def generic_xxx_p_from_param(cls, value):
if value is None:
return cls(None)
if isinstance(value, basestring):
return cls(value)
if isinstance(value, _SimpleCData) and \
type(value)._type_ in 'zZP':
return value
return None # eventually raise
def from_param_char_p(cls, value):
"used by c_char_p and c_wchar_p subclasses"
res = generic_xxx_p_from_param(cls, value)
if res is not None:
return res
if isinstance(value, (Array, _Pointer)):
from ctypes import c_char, c_byte, c_wchar
if type(value)._type_ in [c_char, c_byte, c_wchar]:
return value
def from_param_void_p(cls, value):
"used by c_void_p subclasses"
res = generic_xxx_p_from_param(cls, value)
if res is not None:
return res
if isinstance(value, Array):
return value
if isinstance(value, (_Pointer, CFuncPtr)):
return cls.from_address(value._buffer.buffer)
if isinstance(value, (int, long)):
return cls(value)
FROM_PARAM_BY_TYPE = {
'z': from_param_char_p,
'Z': from_param_char_p,
'P': from_param_void_p,
}
class SimpleType(_CDataMeta):
def __new__(self, name, bases, dct):
try:
tp = dct['_type_']
except KeyError:
for base in bases:
if hasattr(base, '_type_'):
tp = base._type_
break
else:
raise AttributeError("cannot find _type_ attribute")
if (not isinstance(tp, str) or
not len(tp) == 1 or
tp not in SIMPLE_TYPE_CHARS):
raise ValueError('%s is not a type character' % (tp))
default = TP_TO_DEFAULT[tp]
ffiarray = _rawffi.Array(tp)
result = type.__new__(self, name, bases, dct)
result._ffiargshape = tp
result._ffishape = tp
result._fficompositesize = None
result._ffiarray = ffiarray
if tp == 'z':
# c_char_p
def _getvalue(self):
addr = self._buffer[0]
if addr == 0:
return None
else:
return _rawffi.charp2string(addr)
def _setvalue(self, value):
if isinstance(value, basestring):
if isinstance(value, unicode):
value = value.encode(ConvMode.encoding,
ConvMode.errors)
#self._objects = value
array = _rawffi.Array('c')(len(value)+1, value)
self._objects = CArgObject(value, array)
value = array.buffer
elif value is None:
value = 0
self._buffer[0] = value
result.value = property(_getvalue, _setvalue)
result._ffiargtype = _ffi.types.Pointer(_ffi.types.char)
elif tp == 'Z':
# c_wchar_p
def _getvalue(self):
addr = self._buffer[0]
if addr == 0:
return None
else:
return _rawffi.wcharp2unicode(addr)
def _setvalue(self, value):
if isinstance(value, basestring):
if isinstance(value, str):
value = value.decode(ConvMode.encoding,
ConvMode.errors)
#self._objects = value
array = _rawffi.Array('u')(len(value)+1, value)
self._objects = CArgObject(value, array)
value = array.buffer
elif value is None:
value = 0
self._buffer[0] = value
result.value = property(_getvalue, _setvalue)
result._ffiargtype = _ffi.types.Pointer(_ffi.types.unichar)
elif tp == 'P':
# c_void_p
def _getvalue(self):
addr = self._buffer[0]
if addr == 0:
return None
return addr
def _setvalue(self, value):
if isinstance(value, str):
array = _rawffi.Array('c')(len(value)+1, value)
self._objects = CArgObject(value, array)
value = array.buffer
elif value is None:
value = 0
self._buffer[0] = value
result.value = property(_getvalue, _setvalue)
elif tp == 'u':
def _setvalue(self, val):
if isinstance(val, str):
val = val.decode(ConvMode.encoding, ConvMode.errors)
# possible if we use 'ignore'
if val:
self._buffer[0] = val
def _getvalue(self):
return self._buffer[0]
result.value = property(_getvalue, _setvalue)
elif tp == 'c':
def _setvalue(self, val):
if isinstance(val, unicode):
val = val.encode(ConvMode.encoding, ConvMode.errors)
if val:
self._buffer[0] = val
def _getvalue(self):
return self._buffer[0]
result.value = property(_getvalue, _setvalue)
elif tp == 'O':
def _setvalue(self, val):
num = pyobj_container.add(val)
self._buffer[0] = num
def _getvalue(self):
return pyobj_container.get(self._buffer[0])
result.value = property(_getvalue, _setvalue)
elif tp == 'X':
from ctypes import WinDLL
# Use WinDLL("oleaut32") instead of windll.oleaut32
# because the latter is a shared (cached) object; and
# other code may set their own restypes. We need out own
# restype here.
oleaut32 = WinDLL("oleaut32")
SysAllocStringLen = oleaut32.SysAllocStringLen
SysStringLen = oleaut32.SysStringLen
SysFreeString = oleaut32.SysFreeString
def _getvalue(self):
addr = self._buffer[0]
if addr == 0:
return None
else:
size = SysStringLen(addr)
return _rawffi.wcharp2rawunicode(addr, size)
def _setvalue(self, value):
if isinstance(value, basestring):
if isinstance(value, str):
value = value.decode(ConvMode.encoding,
ConvMode.errors)
array = _rawffi.Array('u')(len(value)+1, value)
value = SysAllocStringLen(array.buffer, len(value))
elif value is None:
value = 0
if self._buffer[0]:
SysFreeString(self._buffer[0])
self._buffer[0] = value
result.value = property(_getvalue, _setvalue)
elif tp == '?': # regular bool
def _getvalue(self):
return bool(self._buffer[0])
def _setvalue(self, value):
self._buffer[0] = bool(value)
result.value = property(_getvalue, _setvalue)
elif tp == 'v': # VARIANT_BOOL type
def _getvalue(self):
return bool(self._buffer[0])
def _setvalue(self, value):
if value:
self._buffer[0] = -1 # VARIANT_TRUE
else:
self._buffer[0] = 0 # VARIANT_FALSE
result.value = property(_getvalue, _setvalue)
# make pointer-types compatible with the _ffi fast path
if result._is_pointer_like():
def _as_ffi_pointer_(self, ffitype):
return as_ffi_pointer(self, ffitype)
result._as_ffi_pointer_ = _as_ffi_pointer_
return result
from_address = cdata_from_address
def from_param(self, value):
if isinstance(value, self):
return value
from_param_f = FROM_PARAM_BY_TYPE.get(self._type_)
if from_param_f:
res = from_param_f(self, value)
if res is not None:
return res
else:
try:
return self(value)
except (TypeError, ValueError):
pass
return super(SimpleType, self).from_param(value)
def _CData_output(self, resbuffer, base=None, index=-1):
output = super(SimpleType, self)._CData_output(resbuffer, base, index)
if self.__bases__[0] is _SimpleCData:
return output.value
return output
def _sizeofinstances(self):
return _rawffi.sizeof(self._type_)
def _alignmentofinstances(self):
return _rawffi.alignment(self._type_)
def _is_pointer_like(self):
return self._type_ in "sPzUZXO"
class _SimpleCData(_CData):
__metaclass__ = SimpleType
_type_ = 'i'
def __init__(self, value=DEFAULT_VALUE):
if not hasattr(self, '_buffer'):
self._buffer = self._ffiarray(1, autofree=True)
if value is not DEFAULT_VALUE:
self.value = value
def _ensure_objects(self):
if self._type_ not in 'zZP':
assert self._objects is None
return self._objects
def _getvalue(self):
return self._buffer[0]
def _setvalue(self, value):
self._buffer[0] = value
value = property(_getvalue, _setvalue)
del _getvalue, _setvalue
def __ctypes_from_outparam__(self):
meta = type(type(self))
if issubclass(meta, SimpleType) and meta != SimpleType:
return self
return self.value
def __repr__(self):
if type(self).__bases__[0] is _SimpleCData:
return "%s(%r)" % (type(self).__name__, self.value)
else:
return "<%s object at 0x%x>" % (type(self).__name__,
id(self))
def __nonzero__(self):
return self._buffer[0] not in (0, '\x00')
from _ctypes.function import CFuncPtr
| bsd-3-clause | 4,007,503,311,104,080,000 | 31.752137 | 78 | 0.501044 | false |
kubeflow/kubeflow | py/kubeflow/kubeflow/ci/common_ui_tests.py | 1 | 4563 | """"Argo Workflow for running frontend unit tests"""
from kubeflow.kubeflow.ci import workflow_utils
from kubeflow.testing import argo_build_util
class Builder(workflow_utils.ArgoTestBuilder):
def __init__(self, name=None, namespace=None, bucket=None,
test_target_name=None, **kwargs):
super().__init__(name=name, namespace=namespace, bucket=bucket,
test_target_name=test_target_name, **kwargs)
def _create_install_modules_task(self, task_template):
install = argo_build_util.deep_copy(task_template)
install["name"] = "npm-modules-install"
install["container"]["image"] = "node:12.20.1-stretch-slim"
install["container"]["command"] = ["npm"]
install["container"]["args"] = ["ci"]
ui_dir = ("%s/components/crud-web-apps/common/"
"frontend/kubeflow-common-lib/") % self.src_dir
install["container"]["workingDir"] = ui_dir
return install
def _create_ui_tests_task(self, task_template):
ui_tests = argo_build_util.deep_copy(task_template)
img = "browserless/chrome:1.44-chrome-stable"
ui_tests["name"] = "common-ui-tests"
ui_tests["container"]["image"] = img
ui_tests["container"]["command"] = ["npm"]
ui_tests["container"]["args"] = ["run", "test-ci"]
ui_dir = ("%s/components/crud-web-apps/common/"
"frontend/kubeflow-common-lib/") % self.src_dir
ui_tests["container"]["workingDir"] = ui_dir
return ui_tests
def _create_ui_build_task(self, task_template):
ui_build = argo_build_util.deep_copy(task_template)
ui_build["name"] = "build-common-ui-library"
ui_build["container"]["image"] = "node:12.20.1-stretch-slim"
ui_build["container"]["command"] = ["npm"]
ui_build["container"]["args"] = ["run", "build"]
ui_dir = ("%s/components/crud-web-apps/common/"
"frontend/kubeflow-common-lib/") % self.src_dir
ui_build["container"]["workingDir"] = ui_dir
return ui_build
def _create_exit_handler(self, task_template):
ui_build = argo_build_util.deep_copy(task_template)
ui_build["name"] = "rm-node-modules"
ui_build["container"]["image"] = "node:12.20.1-stretch-slim"
ui_build["container"]["command"] = ["rm"]
ui_build["container"]["args"] = ["-r", "node_modules"]
ui_dir = ("%s/components/crud-web-apps/common/"
"frontend/kubeflow-common-lib/") % self.src_dir
ui_build["container"]["workingDir"] = ui_dir
return ui_build
def build(self):
"""Build the Argo workflow graph"""
workflow = self.build_init_workflow()
task_template = self.build_task_template()
# install npm modules
modules_install_task = self._create_install_modules_task(task_template)
argo_build_util.add_task_to_dag(workflow, workflow_utils.E2E_DAG_NAME,
modules_install_task,
[self.mkdir_task_name])
# run common ui frontend tests
ui_tests_task = self._create_ui_tests_task(task_template)
argo_build_util.add_task_to_dag(workflow, workflow_utils.E2E_DAG_NAME,
ui_tests_task,
[modules_install_task["name"]])
# build the node module from the lib source code
build_step = self._create_ui_build_task(task_template)
argo_build_util.add_task_to_dag(workflow, workflow_utils.E2E_DAG_NAME,
build_step,
[modules_install_task["name"]])
# EXIT-HANDLER: remove node_modules folder as exit handler
rm_node_modules = self._create_exit_handler(task_template)
argo_build_util.add_task_to_dag(workflow, workflow_utils.EXIT_DAG_NAME,
rm_node_modules, [])
# Set the labels on all templates
workflow = argo_build_util.set_task_template_labels(workflow)
return workflow
def create_workflow(name=None, namespace=None, bucket=None, **kwargs):
"""Create workflow returns an Argo workflow to test kfctl upgrades.
Args:
name: Name to give to the workflow. This can also be used to name
things associated with the workflow.
"""
builder = Builder(name=name, namespace=namespace, bucket=bucket, **kwargs)
return builder.build()
| apache-2.0 | 3,693,132,880,695,768,600 | 39.380531 | 79 | 0.586895 | false |
mongolab/mongoctl | mongoctl/tests/sharded_test.py | 1 | 2582 | # The MIT License
# Copyright (c) 2012 ObjectLabs Corporation
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import unittest
import time
from mongoctl.tests.test_base import MongoctlTestBase, append_user_arg
########################################################################################################################
# Servers
SHARD_TEST_SERVERS = [
"ConfigServer1",
"ConfigServer2",
"ConfigServer3",
"Mongos1",
"Mongos2",
"ShardServer1",
"ShardServer2",
"ShardServer3",
"ShardServer4",
"ShardServer5",
"ShardServer6",
"ShardArbiter"
]
########################################################################################################################
### Sharded Servers
class ShardedTest(MongoctlTestBase):
########################################################################################################################
def test_sharded(self):
# Start all sharded servers
for s_id in SHARD_TEST_SERVERS:
self.assert_start_server(s_id, start_options=["--rs-add"])
print "Sleeping for 10 seconds..."
# sleep for 10 of seconds
time.sleep(10)
conf_cmd = ["configure-shard-cluster", "ShardedCluster"]
append_user_arg(conf_cmd)
# Configure the sharded cluster
self.mongoctl_assert_cmd(conf_cmd)
###########################################################################
def get_my_test_servers(self):
return SHARD_TEST_SERVERS
# booty
if __name__ == '__main__':
unittest.main()
| mit | 1,538,437,245,596,689,700 | 33.891892 | 124 | 0.585593 | false |
jamasi/Xtal-xplore-R | gui/doublespinslider.py | 1 | 3682 | # -*- coding: utf-8 -*-
"""DoubleSpinSlider - a custom widget combining a slider with a spinbox
Copyright (C) 2014 Jan M. Simons <marten@xtal.rwth-aachen.de>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import division, print_function, absolute_import
from decimal import Decimal
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import pyqtSlot
class DoubleSpinSlider(QtGui.QWidget):
"""This is a QWidget containing a QSlider and a QDoubleSpinBox"""
def __init__(self, parent=None, width=50, height=100, dpi=100):
#super(DoubleSpinSlider, self).__init__(parent)
QtGui.QWidget.__init__(self, parent)
self._vLayout = QtGui.QVBoxLayout()
self._label = QtGui.QLabel(parent)
self._label.setAlignment(QtCore.Qt.AlignCenter)
self._vLayout.addWidget(self._label)
self._dSBox = QtGui.QDoubleSpinBox(parent)
self._dSBox.setWrapping(True)
self._dSBox.setDecimals(4)
self._dSBox.setMaximum(1.00000000)
self._dSBox.setSingleStep(0.1000000000)
self._vLayout.addWidget(self._dSBox)
self._hLayout = QtGui.QHBoxLayout()
self._vSlider = QtGui.QSlider(parent)
self._vSlider.setMinimum(0)
self._vSlider.setMaximum(10000)
self._vSlider.setPageStep(1000)
self._vSlider.setOrientation(QtCore.Qt.Vertical)
self._vSlider.setTickPosition(QtGui.QSlider.TicksBothSides)
self._vSlider.setTickInterval(0)
self._hLayout.addWidget(self._vSlider)
self._vLayout.addLayout(self._hLayout)
self.setLayout(self._vLayout)
self.setParent(parent)
# map functions
self.setText = self._label.setText
self.text = self._label.text
self.setValue = self._dSBox.setValue
self.value = self._dSBox.value
self._vSlider.valueChanged.connect(self.ChangeSpinBox)
self._dSBox.valueChanged.connect(self.ChangeSlider)
def _multiplier(self):
return 10.000000 ** self._dSBox.decimals()
@pyqtSlot(int)
def ChangeSpinBox(self, slidervalue):
#print("sv: {}".format(slidervalue))
newvalue = round(slidervalue / (self._multiplier()),4)
#print("nv: {}".format(newvalue))
if newvalue != self._dSBox.value():
self._dSBox.setValue(newvalue)
@pyqtSlot('double')
def ChangeSlider(self, spinboxvalue):
newvalue = spinboxvalue * self._multiplier()
#print("sb: {sb} mult: {mult} prod: {prod}".format(
# sb=spinboxvalue,
# mult=int(10.00000000 ** self._dSBox.decimals()),
# prod=newvalue))
self._vSlider.setValue(newvalue)
@pyqtSlot('double')
def setMaximum(self, maximum):
self._dSBox.setMaximum(maximum)
self._vSlider.setMaximum(maximum * self._multiplier())
@pyqtSlot('double')
def setMinimum(self, minimum):
self._dSBox.setMinimum(minimum)
self._vSlider.setMinimum(minimum * self._multiplier())
| agpl-3.0 | 7,329,879,116,559,789,000 | 38.591398 | 77 | 0.655894 | false |
pepetreshere/odoo | addons/account/tests/test_reconciliation_matching_rules.py | 1 | 42618 | # -*- coding: utf-8 -*-
from freezegun import freeze_time
from odoo.addons.account.tests.common import AccountTestInvoicingCommon
from odoo.tests.common import Form
from odoo.tests import tagged
@tagged('post_install', '-at_install')
class TestReconciliationMatchingRules(AccountTestInvoicingCommon):
@classmethod
def setUpClass(cls, chart_template_ref=None):
super().setUpClass(chart_template_ref=chart_template_ref)
#################
# Company setup #
#################
cls.currency_data_2 = cls.setup_multi_currency_data({
'name': 'Dark Chocolate Coin',
'symbol': '🍫',
'currency_unit_label': 'Dark Choco',
'currency_subunit_label': 'Dark Cacao Powder',
}, rate2016=10.0, rate2017=20.0)
cls.company = cls.company_data['company']
cls.account_pay = cls.company_data['default_account_payable']
cls.current_assets_account = cls.env['account.account'].search([
('user_type_id', '=', cls.env.ref('account.data_account_type_current_assets').id),
('company_id', '=', cls.company.id)], limit=1)
cls.bank_journal = cls.env['account.journal'].search([('type', '=', 'bank'), ('company_id', '=', cls.company.id)], limit=1)
cls.cash_journal = cls.env['account.journal'].search([('type', '=', 'cash'), ('company_id', '=', cls.company.id)], limit=1)
cls.tax21 = cls.env['account.tax'].create({
'name': '21%',
'type_tax_use': 'purchase',
'amount': 21,
})
cls.tax12 = cls.env['account.tax'].create({
'name': '12%',
'type_tax_use': 'purchase',
'amount': 12,
})
cls.partner_1 = cls.env['res.partner'].create({'name': 'partner_1', 'company_id': cls.company.id})
cls.partner_2 = cls.env['res.partner'].create({'name': 'partner_2', 'company_id': cls.company.id})
cls.partner_3 = cls.env['res.partner'].create({'name': 'partner_3', 'company_id': cls.company.id})
###############
# Rules setup #
###############
cls.rule_1 = cls.env['account.reconcile.model'].create({
'name': 'Invoices Matching Rule',
'sequence': '1',
'rule_type': 'invoice_matching',
'auto_reconcile': False,
'match_nature': 'both',
'match_same_currency': True,
'match_total_amount': True,
'match_total_amount_param': 100,
'match_partner': True,
'match_partner_ids': [(6, 0, (cls.partner_1 + cls.partner_2 + cls.partner_3).ids)],
'company_id': cls.company.id,
'line_ids': [(0, 0, {'account_id': cls.current_assets_account.id})],
})
cls.rule_2 = cls.env['account.reconcile.model'].create({
'name': 'write-off model',
'rule_type': 'writeoff_suggestion',
'match_partner': True,
'match_partner_ids': [],
'line_ids': [(0, 0, {'account_id': cls.current_assets_account.id})],
})
##################
# Invoices setup #
##################
cls.invoice_line_1 = cls._create_invoice_line(100, cls.partner_1, 'out_invoice')
cls.invoice_line_2 = cls._create_invoice_line(200, cls.partner_1, 'out_invoice')
cls.invoice_line_3 = cls._create_invoice_line(300, cls.partner_1, 'in_refund', name="RBILL/2019/09/0013")
cls.invoice_line_4 = cls._create_invoice_line(1000, cls.partner_2, 'in_invoice')
cls.invoice_line_5 = cls._create_invoice_line(600, cls.partner_3, 'out_invoice')
cls.invoice_line_6 = cls._create_invoice_line(600, cls.partner_3, 'out_invoice', ref="RF12 3456")
cls.invoice_line_7 = cls._create_invoice_line(200, cls.partner_3, 'out_invoice', pay_reference="RF12 3456")
####################
# Statements setup #
####################
# TODO : account_number, partner_name, transaction_type, narration
invoice_number = cls.invoice_line_1.move_id.name
cls.bank_st, cls.bank_st_2, cls.cash_st = cls.env['account.bank.statement'].create([
{
'name': 'test bank journal',
'journal_id': cls.bank_journal.id,
'line_ids': [
(0, 0, {
'payment_ref': 'invoice %s-%s-%s' % tuple(invoice_number.split('/')[1:]),
'partner_id': cls.partner_1.id,
'amount': 100,
'sequence': 1,
}),
(0, 0, {
'payment_ref': 'xxxxx',
'partner_id': cls.partner_1.id,
'amount': 600,
'sequence': 2,
}),
],
}, {
'name': 'second test bank journal',
'journal_id': cls.bank_journal.id,
'line_ids': [
(0, 0, {
'payment_ref': 'nawak',
'narration': 'Communication: RF12 3456',
'partner_id': cls.partner_3.id,
'amount': 600,
'sequence': 1,
}),
(0, 0, {
'payment_ref': 'RF12 3456',
'partner_id': cls.partner_3.id,
'amount': 600,
'sequence': 2,
}),
(0, 0, {
'payment_ref': 'baaaaah',
'ref': 'RF12 3456',
'partner_id': cls.partner_3.id,
'amount': 600,
'sequence': 2,
}),
],
}, {
'name': 'test cash journal',
'journal_id': cls.cash_journal.id,
'line_ids': [
(0, 0, {
'payment_ref': 'yyyyy',
'partner_id': cls.partner_2.id,
'amount': -1000,
'sequence': 1,
}),
],
}
])
cls.bank_line_1, cls.bank_line_2 = cls.bank_st.line_ids
cls.bank_line_3, cls.bank_line_4, cls.bank_line_5 = cls.bank_st_2.line_ids
cls.cash_line_1 = cls.cash_st.line_ids
cls._post_statements(cls)
@classmethod
def _create_invoice_line(cls, amount, partner, type, currency=None, pay_reference=None, ref=None, name=None):
''' Create an invoice on the fly.'''
invoice_form = Form(cls.env['account.move'].with_context(default_move_type=type, default_invoice_date='2019-09-01', default_date='2019-09-01'))
invoice_form.partner_id = partner
if currency:
invoice_form.currency_id = currency
if pay_reference:
invoice_form.payment_reference = pay_reference
if ref:
invoice_form.ref = ref
if name:
invoice_form.name = name
with invoice_form.invoice_line_ids.new() as invoice_line_form:
invoice_line_form.name = 'xxxx'
invoice_line_form.quantity = 1
invoice_line_form.price_unit = amount
invoice_line_form.tax_ids.clear()
invoice = invoice_form.save()
invoice.action_post()
lines = invoice.line_ids
return lines.filtered(lambda l: l.account_id.user_type_id.type in ('receivable', 'payable'))
def _post_statements(self):
self.bank_st.balance_end_real = self.bank_st.balance_end
self.bank_st_2.balance_end_real = self.bank_st_2.balance_end
self.cash_st.balance_end_real = self.cash_st.balance_end
(self.bank_st + self.bank_st_2 + self.cash_st).button_post()
def _check_statement_matching(self, rules, expected_values, statements=None):
if statements is None:
statements = self.bank_st + self.cash_st
statement_lines = statements.mapped('line_ids').sorted()
matching_values = rules._apply_rules(statement_lines, None)
for st_line_id, values in matching_values.items():
values.pop('reconciled_lines', None)
values.pop('write_off_vals', None)
self.assertDictEqual(values, expected_values[st_line_id])
def test_matching_fields(self):
# Check without restriction.
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': [
self.invoice_line_2.id,
self.invoice_line_3.id,
self.invoice_line_1.id,
], 'model': self.rule_1,
'partner': self.bank_line_2.partner_id},
self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id},
})
def test_matching_fields_match_text_location(self):
self.rule_1.match_text_location_label = True
self.rule_1.match_text_location_reference = False
self.rule_1.match_text_location_note = False
self._check_statement_matching(self.rule_1, {
self.bank_line_3.id: {'aml_ids': [self.invoice_line_5.id], 'model': self.rule_1, 'partner': self.bank_line_3.partner_id},
self.bank_line_4.id: {'aml_ids': [self.invoice_line_7.id], 'model': self.rule_1, 'partner': self.bank_line_4.partner_id},
self.bank_line_5.id: {'aml_ids': [self.invoice_line_6.id], 'model': self.rule_1, 'partner': self.bank_line_5.partner_id},
}, statements=self.bank_st_2)
self.rule_1.match_text_location_label = True
self.rule_1.match_text_location_reference = False
self.rule_1.match_text_location_note = True
self._check_statement_matching(self.rule_1, {
self.bank_line_3.id: {'aml_ids': [self.invoice_line_6.id], 'model': self.rule_1, 'partner': self.bank_line_3.partner_id},
self.bank_line_4.id: {'aml_ids': [self.invoice_line_7.id], 'model': self.rule_1, 'partner': self.bank_line_4.partner_id},
self.bank_line_5.id: {'aml_ids': [self.invoice_line_5.id], 'model': self.rule_1, 'partner': self.bank_line_5.partner_id},
}, statements=self.bank_st_2)
self.rule_1.match_text_location_label = True
self.rule_1.match_text_location_reference = True
self.rule_1.match_text_location_note = False
self._check_statement_matching(self.rule_1, {
self.bank_line_3.id: {'aml_ids': [self.invoice_line_5.id], 'model': self.rule_1, 'partner': self.bank_line_3.partner_id},
self.bank_line_4.id: {'aml_ids': [self.invoice_line_7.id], 'model': self.rule_1, 'partner': self.bank_line_4.partner_id},
self.bank_line_5.id: {'aml_ids': [self.invoice_line_7.id], 'model': self.rule_1, 'partner': self.bank_line_5.partner_id},
}, statements=self.bank_st_2)
self.rule_1.match_text_location_label = True
self.rule_1.match_text_location_reference = True
self.rule_1.match_text_location_note = True
self._check_statement_matching(self.rule_1, {
self.bank_line_3.id: {'aml_ids': [self.invoice_line_6.id], 'model': self.rule_1, 'partner': self.bank_line_3.partner_id},
self.bank_line_4.id: {'aml_ids': [self.invoice_line_7.id], 'model': self.rule_1, 'partner': self.bank_line_4.partner_id},
self.bank_line_5.id: {'aml_ids': [self.invoice_line_7.id], 'model': self.rule_1, 'partner': self.bank_line_5.partner_id},
}, statements=self.bank_st_2)
self.rule_1.match_text_location_label = False
self.rule_1.match_text_location_reference = False
self.rule_1.match_text_location_note = False
self._check_statement_matching(self.rule_1, {
self.bank_line_3.id: {'aml_ids': [self.invoice_line_5.id], 'model': self.rule_1, 'partner': self.bank_line_3.partner_id},
self.bank_line_4.id: {'aml_ids': [self.invoice_line_5.id], 'model': self.rule_1, 'partner': self.bank_line_4.partner_id},
self.bank_line_5.id: {'aml_ids': [self.invoice_line_6.id], 'model': self.rule_1, 'partner': self.bank_line_5.partner_id},
}, statements=self.bank_st_2)
def test_matching_fields_match_journal_ids(self):
self.rule_1.match_journal_ids |= self.cash_st.journal_id
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': []},
self.bank_line_2.id: {'aml_ids': []},
self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id},
})
self.rule_1.match_journal_ids |= self.bank_st.journal_id + self.cash_st.journal_id
def test_matching_fields_match_nature(self):
self.rule_1.match_nature = 'amount_received'
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': [
self.invoice_line_2.id,
self.invoice_line_3.id,
self.invoice_line_1.id,
], 'model': self.rule_1, 'partner': self.bank_line_2.partner_id},
self.cash_line_1.id: {'aml_ids': []},
})
self.rule_1.match_nature = 'amount_paid'
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': []},
self.bank_line_2.id: {'aml_ids': []},
self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id},
})
self.rule_1.match_nature = 'both'
def test_matching_fields_match_amount(self):
self.rule_1.match_amount = 'lower'
self.rule_1.match_amount_max = 150
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': []},
self.cash_line_1.id: {'aml_ids': []},
})
self.rule_1.match_amount = 'greater'
self.rule_1.match_amount_min = 200
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': []},
self.bank_line_2.id: {'aml_ids': [
self.invoice_line_1.id,
self.invoice_line_2.id,
self.invoice_line_3.id,
], 'model': self.rule_1, 'partner': self.bank_line_2.partner_id},
self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id},
})
self.rule_1.match_amount = 'between'
self.rule_1.match_amount_min = 200
self.rule_1.match_amount_max = 800
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': []},
self.bank_line_2.id: {'aml_ids': [
self.invoice_line_1.id,
self.invoice_line_2.id,
self.invoice_line_3.id,
], 'model': self.rule_1, 'partner': self.bank_line_2.partner_id},
self.cash_line_1.id: {'aml_ids': []},
})
self.rule_1.match_amount = False
def test_matching_fields_match_label(self):
self.rule_1.match_label = 'contains'
self.rule_1.match_label_param = 'yyyyy'
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': []},
self.bank_line_2.id: {'aml_ids': []},
self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id},
})
self.rule_1.match_label = 'not_contains'
self.rule_1.match_label_param = 'xxxxx'
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': []},
self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id},
})
self.rule_1.match_label = 'match_regex'
self.rule_1.match_label_param = 'xxxxx|yyyyy'
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': []},
self.bank_line_2.id: {'aml_ids': [
self.invoice_line_1.id,
self.invoice_line_2.id,
self.invoice_line_3.id,
], 'model': self.rule_1, 'partner': self.bank_line_2.partner_id},
self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id},
})
self.rule_1.match_label = False
def test_matching_fields_match_total_amount(self):
# Check match_total_amount: line amount >= total residual amount.
self.rule_1.match_total_amount_param = 90.0
self.bank_line_1.amount += 5
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'status': 'write_off', 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': [
self.invoice_line_2.id,
self.invoice_line_3.id,
self.invoice_line_1.id,
], 'model': self.rule_1, 'partner': self.bank_line_2.partner_id},
self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id},
})
self.rule_1.match_total_amount_param = 100.0
self.bank_line_1.amount -= 5
# Check match_total_amount: line amount <= total residual amount.
self.rule_1.match_total_amount_param = 90.0
self.bank_line_1.amount -= 5
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'status': 'write_off', 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': [
self.invoice_line_2.id,
self.invoice_line_3.id,
self.invoice_line_1.id,
], 'model': self.rule_1, 'partner': self.bank_line_2.partner_id},
self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id},
})
self.rule_1.match_total_amount_param = 100.0
self.bank_line_1.amount += 5
def test_matching_fields_match_partner_category_ids(self):
test_category = self.env['res.partner.category'].create({'name': 'Consulting Services'})
self.partner_2.category_id = test_category
self.rule_1.match_partner_category_ids |= test_category
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': []},
self.bank_line_2.id: {'aml_ids': []},
self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id},
})
self.rule_1.match_partner_category_ids = False
def test_mixin_rules(self):
''' Test usage of rules together.'''
# rule_1 is used before rule_2.
self.rule_1.sequence = 1
self.rule_2.sequence = 2
self._check_statement_matching(self.rule_1 + self.rule_2, {
self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': [
self.invoice_line_2.id,
self.invoice_line_3.id,
self.invoice_line_1.id,
], 'model': self.rule_1, 'partner': self.bank_line_2.partner_id},
self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id},
})
# rule_2 is used before rule_1.
self.rule_1.sequence = 2
self.rule_2.sequence = 1
self._check_statement_matching(self.rule_1 + self.rule_2, {
self.bank_line_1.id: {'aml_ids': [], 'model': self.rule_2, 'status': 'write_off', 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': [], 'model': self.rule_2, 'status': 'write_off', 'partner': self.bank_line_2.partner_id},
self.cash_line_1.id: {'aml_ids': [], 'model': self.rule_2, 'status': 'write_off', 'partner': self.cash_line_1.partner_id},
})
# rule_2 is used before rule_1 but only on partner_1.
self.rule_2.match_partner_ids |= self.partner_1
self._check_statement_matching(self.rule_1 + self.rule_2, {
self.bank_line_1.id: {'aml_ids': [], 'model': self.rule_2, 'status': 'write_off', 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': [], 'model': self.rule_2, 'status': 'write_off', 'partner': self.bank_line_2.partner_id},
self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id},
})
def test_auto_reconcile(self):
''' Test auto reconciliation.'''
self.bank_line_1.amount += 5
self.rule_1.sequence = 2
self.rule_1.auto_reconcile = True
self.rule_1.match_total_amount_param = 90
self.rule_2.sequence = 1
self.rule_2.match_partner_ids |= self.partner_2
self.rule_2.auto_reconcile = True
self._check_statement_matching(self.rule_1 + self.rule_2, {
self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'status': 'reconciled', 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': []},
self.cash_line_1.id: {'aml_ids': [], 'model': self.rule_2, 'status': 'reconciled', 'partner': self.cash_line_1.partner_id},
})
# Check first line has been well reconciled.
self.assertRecordValues(self.bank_line_1.line_ids, [
{'partner_id': self.partner_1.id, 'debit': 105.0, 'credit': 0.0},
{'partner_id': self.partner_1.id, 'debit': 0.0, 'credit': 5.0},
{'partner_id': self.partner_1.id, 'debit': 0.0, 'credit': 100.0},
])
# Check second line has been well reconciled.
self.assertRecordValues(self.cash_line_1.line_ids, [
{'partner_id': self.partner_2.id, 'debit': 0.0, 'credit': 1000.0},
{'partner_id': self.partner_2.id, 'debit': 1000.0, 'credit': 0.0},
])
def test_larger_invoice_auto_reconcile(self):
''' Test auto reconciliation with an invoice with larger amount than the
statement line's, for rules without write-offs.'''
self.bank_line_1.amount = 40
self.invoice_line_1.move_id.payment_reference = self.bank_line_1.payment_ref
self.rule_1.sequence = 2
self.rule_1.auto_reconcile = True
self.rule_1.line_ids = [(5, 0, 0)]
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'status': 'reconciled', 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': []},
}, statements=self.bank_st)
# Check first line has been well reconciled.
self.assertRecordValues(self.bank_line_1.line_ids, [
{'partner_id': self.partner_1.id, 'debit': 40.0, 'credit': 0.0},
{'partner_id': self.partner_1.id, 'debit': 0.0, 'credit': 40.0},
])
self.assertEqual(self.invoice_line_1.amount_residual, 60.0, "The invoice should have been partially reconciled")
def test_auto_reconcile_with_tax(self):
''' Test auto reconciliation with a tax amount included in the bank statement line'''
self.rule_1.write({
'auto_reconcile': True,
'rule_type': 'writeoff_suggestion',
'line_ids': [(1, self.rule_1.line_ids.id, {
'amount': 50,
'force_tax_included': True,
'tax_ids': [(6, 0, self.tax21.ids)],
}), (0, 0, {
'amount': 100,
'force_tax_included': False,
'tax_ids': [(6, 0, self.tax12.ids)],
'account_id': self.current_assets_account.id,
})]
})
self.bank_line_1.amount = -121
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': [], 'model': self.rule_1, 'status': 'reconciled', 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': [], 'model': self.rule_1, 'status': 'reconciled', 'partner': self.bank_line_2.partner_id},
}, statements=self.bank_st)
# Check first line has been well reconciled.
self.assertRecordValues(self.bank_line_1.line_ids, [
{'partner_id': self.partner_1.id, 'debit': 0.0, 'credit': 121.0, 'tax_ids': [], 'tax_line_id': False},
{'partner_id': self.partner_1.id, 'debit': 0.0, 'credit': 7.26, 'tax_ids': [], 'tax_line_id': False},
{'partner_id': self.partner_1.id, 'debit': 50.0, 'credit': 0.0, 'tax_ids': [self.tax21.id], 'tax_line_id': False},
{'partner_id': self.partner_1.id, 'debit': 10.5, 'credit': 0.0, 'tax_ids': [], 'tax_line_id': self.tax21.id},
{'partner_id': self.partner_1.id, 'debit': 60.5, 'credit': 0.0, 'tax_ids': [self.tax12.id], 'tax_line_id': False},
{'partner_id': self.partner_1.id, 'debit': 7.26, 'credit': 0.0, 'tax_ids': [], 'tax_line_id': self.tax12.id},
])
def test_reverted_move_matching(self):
partner = self.partner_1
AccountMove = self.env['account.move']
move = AccountMove.create({
'journal_id': self.bank_journal.id,
'line_ids': [
(0, 0, {
'account_id': self.account_pay.id,
'partner_id': partner.id,
'name': 'One of these days',
'debit': 10,
}),
(0, 0, {
'account_id': self.bank_journal.payment_credit_account_id.id,
'partner_id': partner.id,
'name': 'I\'m gonna cut you into little pieces',
'credit': 10,
})
],
})
payment_bnk_line = move.line_ids.filtered(lambda l: l.account_id == self.bank_journal.payment_credit_account_id)
move.action_post()
move_reversed = move._reverse_moves()
self.assertTrue(move_reversed.exists())
self.bank_line_1.write({
'payment_ref': '8',
'partner_id': partner.id,
'amount': -10,
})
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': [payment_bnk_line.id], 'model': self.rule_1, 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': []},
}, statements=self.bank_st)
def test_match_different_currencies(self):
partner = self.env['res.partner'].create({'name': 'Bernard Gagnant'})
self.rule_1.write({'match_partner_ids': [(6, 0, partner.ids)], 'match_same_currency': False})
currency_inv = self.env.ref('base.EUR')
currency_statement = self.env.ref('base.JPY')
currency_statement.active = True
invoice_line = self._create_invoice_line(100, partner, 'out_invoice', currency=currency_inv)
self.bank_line_1.write({'partner_id': partner.id, 'foreign_currency_id': currency_statement.id, 'amount_currency': 100, 'payment_ref': 'test'})
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': invoice_line.ids, 'model': self.rule_1, 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': []},
}, statements=self.bank_st)
def test_invoice_matching_rule_no_partner(self):
""" Tests that a statement line without any partner can be matched to the
right invoice if they have the same payment reference.
"""
self.invoice_line_1.move_id.write({'payment_reference': 'Tournicoti66'})
self.bank_line_1.write({
'payment_ref': 'Tournicoti66',
'partner_id': None,
'amount': 95,
})
self.rule_1.write({
'line_ids': [(5, 0, 0)],
'match_partner': False,
'match_label': 'contains',
'match_label_param': 'Tournicoti', # So that we only match what we want to test
})
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': []},
}, self.bank_st)
def test_inv_matching_rule_auto_rec_no_partner_with_writeoff(self):
self.invoice_line_1.move_id.write({'payment_reference': 'doudlidou355'})
self.bank_line_1.write({
'payment_ref': 'doudlidou355',
'partner_id': None,
'amount': 95,
})
self.rule_1.write({
'match_partner': False,
'match_label': 'contains',
'match_label_param': 'doudlidou', # So that we only match what we want to test
'match_total_amount_param': 90,
'auto_reconcile': True,
})
# Check bank reconciliation
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'partner': self.bank_line_1.partner_id, 'status': 'reconciled'},
self.bank_line_2.id: {'aml_ids': []},
}, self.bank_st)
# Check invoice line has been fully reconciled, with a write-off.
self.assertRecordValues(self.bank_line_1.line_ids, [
{'partner_id': self.partner_1.id, 'debit': 95.0, 'credit': 0.0, 'account_id': self.bank_journal.default_account_id.id, 'reconciled': False},
{'partner_id': self.partner_1.id, 'debit': 5.0, 'credit': 0.0, 'account_id': self.current_assets_account.id, 'reconciled': False},
{'partner_id': self.partner_1.id, 'debit': 0.0, 'credit': 100.0, 'account_id': self.invoice_line_1.account_id.id, 'reconciled': True},
])
self.assertEqual(self.invoice_line_1.amount_residual, 0.0, "The invoice should have been fully reconciled")
def test_partner_mapping_rule(self):
self.bank_line_1.write({'partner_id': None, 'payment_ref': 'toto42', 'narration': None})
self.bank_line_2.write({'partner_id': None})
# Do the test for both rule 1 and 2, so that we check invoice matching and write-off rules
for rule in (self.rule_1 + self.rule_2):
# To cope for minor differences in rule results
matching_amls = rule.rule_type == 'invoice_matching' and self.invoice_line_1.ids or []
result_status = rule.rule_type == 'writeoff_suggestion' and {'status': 'write_off'} or {}
match_result = {**result_status, 'aml_ids': matching_amls, 'model': rule, 'partner': self.partner_1}
no_match_result = {'aml_ids': []}
# Without mapping, there should be no match
self._check_statement_matching(rule, {
self.bank_line_1.id: no_match_result,
self.bank_line_2.id: no_match_result,
}, self.bank_st)
# We add some mapping for payment reference to rule_1
rule.write({
'partner_mapping_line_ids': [(0, 0, {
'partner_id': self.partner_1.id,
'payment_ref_regex': 'toto.*',
})]
})
# bank_line_1 should now match
self._check_statement_matching(rule, {
self.bank_line_1.id: match_result,
self.bank_line_2.id: no_match_result,
}, self.bank_st)
# If we now add a narration regex to the same mapping line, nothing should match
rule.partner_mapping_line_ids.write({'narration_regex': ".*coincoin"})
self.bank_line_1.write({'narration': None}) # Reset from possible previous iteration
self._check_statement_matching(rule, {
self.bank_line_1.id: no_match_result,
self.bank_line_2.id: no_match_result,
}, self.bank_st)
# If we set the narration so that it matches the new mapping criterium, line_1 matches
self.bank_line_1.write({'narration': "42coincoin"})
self._check_statement_matching(rule, {
self.bank_line_1.id: match_result,
self.bank_line_2.id: no_match_result,
}, self.bank_st)
def test_partner_name_in_communication(self):
self.invoice_line_1.partner_id.write({'name': "Archibald Haddock"})
self.bank_line_1.write({'partner_id': None, 'payment_ref': '1234//HADDOCK-Archibald'})
self.bank_line_2.write({'partner_id': None})
self.rule_1.write({'match_partner': False})
# bank_line_1 should match, as its communication contains the invoice's partner name
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': []},
}, self.bank_st)
def test_partner_name_with_regexp_chars(self):
self.invoice_line_1.partner_id.write({'name': "Archibald + Haddock"})
self.bank_line_1.write({'partner_id': None, 'payment_ref': '1234//HADDOCK+Archibald'})
self.bank_line_2.write({'partner_id': None})
self.rule_1.write({'match_partner': False})
# The query should still work
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': []},
}, self.bank_st)
def test_match_multi_currencies(self):
''' Ensure the matching of candidates is made using the right statement line currency.
In this test, the value of the statement line is 100 USD = 300 GOL = 900 DAR and we want to match two journal
items of:
- 100 USD = 200 GOL (= 600 DAR from the statement line point of view)
- 14 USD = 280 DAR
Both journal items should be suggested to the user because they represents 98% of the statement line amount
(DAR).
'''
partner = self.env['res.partner'].create({'name': 'Bernard Perdant'})
journal = self.env['account.journal'].create({
'name': 'test_match_multi_currencies',
'code': 'xxxx',
'type': 'bank',
'currency_id': self.currency_data['currency'].id,
})
matching_rule = self.env['account.reconcile.model'].create({
'name': 'test_match_multi_currencies',
'rule_type': 'invoice_matching',
'match_partner': True,
'match_partner_ids': [(6, 0, partner.ids)],
'match_total_amount': True,
'match_total_amount_param': 95.0,
'match_same_currency': False,
'company_id': self.company_data['company'].id,
})
statement = self.env['account.bank.statement'].create({
'name': 'test_match_multi_currencies',
'journal_id': journal.id,
'line_ids': [
(0, 0, {
'journal_id': journal.id,
'date': '2016-01-01',
'payment_ref': 'line',
'partner_id': partner.id,
'foreign_currency_id': self.currency_data_2['currency'].id,
'amount': 300.0, # Rate is 3 GOL = 1 USD in 2016.
'amount_currency': 900.0, # Rate is 10 DAR = 1 USD in 2016 but the rate used by the bank is 9:1.
}),
],
})
statement_line = statement.line_ids
statement.button_post()
move = self.env['account.move'].create({
'move_type': 'entry',
'date': '2017-01-01',
'journal_id': self.company_data['default_journal_sale'].id,
'line_ids': [
# Rate is 2 GOL = 1 USD in 2017.
# The statement line will consider this line equivalent to 600 DAR.
(0, 0, {
'account_id': self.company_data['default_account_receivable'].id,
'partner_id': partner.id,
'currency_id': self.currency_data['currency'].id,
'debit': 100.0,
'credit': 0.0,
'amount_currency': 200.0,
}),
# Rate is 20 GOL = 1 USD in 2017.
(0, 0, {
'account_id': self.company_data['default_account_receivable'].id,
'partner_id': partner.id,
'currency_id': self.currency_data_2['currency'].id,
'debit': 14.0,
'credit': 0.0,
'amount_currency': 280.0,
}),
# Line to balance the journal entry:
(0, 0, {
'account_id': self.company_data['default_account_revenue'].id,
'debit': 0.0,
'credit': 114.0,
}),
],
})
move.action_post()
move_line_1 = move.line_ids.filtered(lambda line: line.debit == 100.0)
move_line_2 = move.line_ids.filtered(lambda line: line.debit == 14.0)
with freeze_time('2017-01-01'):
self._check_statement_matching(matching_rule, {
statement_line.id: {'aml_ids': (move_line_1 + move_line_2).ids, 'model': matching_rule, 'partner': statement_line.partner_id}
}, statements=statement)
def test_inv_matching_with_write_off(self):
self.rule_1.match_total_amount_param = 90
self.bank_st.line_ids[1].unlink() # We don't need this one here
statement_line = self.bank_st.line_ids[0]
statement_line.write({
'payment_ref': self.invoice_line_1.move_id.payment_reference,
'amount': 90,
})
# Test the invoice-matching part
self._check_statement_matching(self.rule_1, {
statement_line.id: {'aml_ids': self.invoice_line_1.ids, 'model': self.rule_1, 'partner': self.invoice_line_1.partner_id, 'status': 'write_off'},
}, self.bank_st)
# Test the write-off part
expected_write_off = {
'balance': 10,
'currency_id': False,
'reconcile_model_id': self.rule_1.id,
'account_id': self.current_assets_account.id,
}
matching_result = self.rule_1._apply_rules(statement_line)
self.assertEqual(len(matching_result[statement_line.id].get('write_off_vals', [])), 1, "Exactly one write-off line should be proposed.")
full_write_off_dict = matching_result[statement_line.id]['write_off_vals'][0]
to_compare = {
key: full_write_off_dict[key]
for key in expected_write_off.keys()
}
self.assertDictEqual(expected_write_off, to_compare)
def test_inv_matching_with_write_off_autoreconcile(self):
self.bank_line_1.amount = 95
self.rule_1.sequence = 2
self.rule_1.auto_reconcile = True
self.rule_1.match_total_amount_param = 90
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'status': 'reconciled', 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': []},
}, statements=self.bank_st)
# Check first line has been properly reconciled.
self.assertRecordValues(self.bank_line_1.line_ids, [
{'partner_id': self.partner_1.id, 'debit': 95.0, 'credit': 0.0, 'account_id': self.bank_journal.default_account_id.id, 'reconciled': False},
{'partner_id': self.partner_1.id, 'debit': 5.0, 'credit': 0.0, 'account_id': self.current_assets_account.id, 'reconciled': False},
{'partner_id': self.partner_1.id, 'debit': 0.0, 'credit': 100.0, 'account_id': self.invoice_line_1.account_id.id, 'reconciled': True},
])
self.assertEqual(self.invoice_line_1.amount_residual, 0.0, "The invoice should have been fully reconciled")
def test_avoid_amount_matching_bypass(self):
""" By the default, if the label of statement lines exactly matches a payment reference, it bypasses any kind of amount verification.
This is annoying in some setups, so a config parameter was introduced to handle that.
"""
self.env['ir.config_parameter'].set_param('account.disable_rec_models_bypass', '1')
self.rule_1.match_total_amount_param = 90
second_inv_matching_rule = self.env['account.reconcile.model'].create({
'name': 'Invoices Matching Rule',
'sequence': 2,
'rule_type': 'invoice_matching',
'auto_reconcile': False,
'match_nature': 'both',
'match_same_currency': False,
'match_total_amount': False,
'match_partner': True,
'company_id': self.company.id,
})
self.bank_line_1.write({
'payment_ref': self.invoice_line_1.move_id.payment_reference,
'amount': 99,
})
self.bank_line_2.write({
'payment_ref': self.invoice_line_2.move_id.payment_reference,
'amount': 1,
})
self._check_statement_matching(self.rule_1 + second_inv_matching_rule, {
self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'status': 'write_off', 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': [self.invoice_line_2.id], 'model': second_inv_matching_rule, 'partner': self.bank_line_2.partner_id}
}, statements=self.bank_st)
| agpl-3.0 | 4,953,351,938,277,983,000 | 47.982759 | 157 | 0.55464 | false |
denisenkom/django-sqlserver | tests/pagination/tests.py | 1 | 15383 | from __future__ import unicode_literals
import unittest
import warnings
from datetime import datetime
import django
from django.core.paginator import (
EmptyPage, InvalidPage, PageNotAnInteger, Paginator,
)
if django.VERSION >= (1, 11, 0):
from django.core.paginator import UnorderedObjectListWarning
from django.test import TestCase
from django.utils import six
from .custom import ValidAdjacentNumsPaginator
from .models import Article
class PaginationTests(unittest.TestCase):
"""
Tests for the Paginator and Page classes.
"""
def check_paginator(self, params, output):
"""
Helper method that instantiates a Paginator object from the passed
params and then checks that its attributes match the passed output.
"""
count, num_pages, page_range = output
paginator = Paginator(*params)
self.check_attribute('count', paginator, count, params)
self.check_attribute('num_pages', paginator, num_pages, params)
self.check_attribute('page_range', paginator, page_range, params, coerce=list)
def check_attribute(self, name, paginator, expected, params, coerce=None):
"""
Helper method that checks a single attribute and gives a nice error
message upon test failure.
"""
got = getattr(paginator, name)
if coerce is not None:
got = coerce(got)
self.assertEqual(
expected, got,
"For '%s', expected %s but got %s. Paginator parameters were: %s"
% (name, expected, got, params)
)
def test_paginator(self):
"""
Tests the paginator attributes using varying inputs.
"""
nine = [1, 2, 3, 4, 5, 6, 7, 8, 9]
ten = nine + [10]
eleven = ten + [11]
tests = (
# Each item is two tuples:
# First tuple is Paginator parameters - object_list, per_page,
# orphans, and allow_empty_first_page.
# Second tuple is resulting Paginator attributes - count,
# num_pages, and page_range.
# Ten items, varying orphans, no empty first page.
((ten, 4, 0, False), (10, 3, [1, 2, 3])),
((ten, 4, 1, False), (10, 3, [1, 2, 3])),
((ten, 4, 2, False), (10, 2, [1, 2])),
((ten, 4, 5, False), (10, 2, [1, 2])),
((ten, 4, 6, False), (10, 1, [1])),
# Ten items, varying orphans, allow empty first page.
((ten, 4, 0, True), (10, 3, [1, 2, 3])),
((ten, 4, 1, True), (10, 3, [1, 2, 3])),
((ten, 4, 2, True), (10, 2, [1, 2])),
((ten, 4, 5, True), (10, 2, [1, 2])),
((ten, 4, 6, True), (10, 1, [1])),
# One item, varying orphans, no empty first page.
(([1], 4, 0, False), (1, 1, [1])),
(([1], 4, 1, False), (1, 1, [1])),
(([1], 4, 2, False), (1, 1, [1])),
# One item, varying orphans, allow empty first page.
(([1], 4, 0, True), (1, 1, [1])),
(([1], 4, 1, True), (1, 1, [1])),
(([1], 4, 2, True), (1, 1, [1])),
# Zero items, varying orphans, no empty first page.
(([], 4, 0, False), (0, 0, [])),
(([], 4, 1, False), (0, 0, [])),
(([], 4, 2, False), (0, 0, [])),
# Zero items, varying orphans, allow empty first page.
(([], 4, 0, True), (0, 1, [1])),
(([], 4, 1, True), (0, 1, [1])),
(([], 4, 2, True), (0, 1, [1])),
# Number if items one less than per_page.
(([], 1, 0, True), (0, 1, [1])),
(([], 1, 0, False), (0, 0, [])),
(([1], 2, 0, True), (1, 1, [1])),
((nine, 10, 0, True), (9, 1, [1])),
# Number if items equal to per_page.
(([1], 1, 0, True), (1, 1, [1])),
(([1, 2], 2, 0, True), (2, 1, [1])),
((ten, 10, 0, True), (10, 1, [1])),
# Number if items one more than per_page.
(([1, 2], 1, 0, True), (2, 2, [1, 2])),
(([1, 2, 3], 2, 0, True), (3, 2, [1, 2])),
((eleven, 10, 0, True), (11, 2, [1, 2])),
# Number if items one more than per_page with one orphan.
(([1, 2], 1, 1, True), (2, 1, [1])),
(([1, 2, 3], 2, 1, True), (3, 1, [1])),
((eleven, 10, 1, True), (11, 1, [1])),
# Non-integer inputs
((ten, '4', 1, False), (10, 3, [1, 2, 3])),
((ten, '4', 1, False), (10, 3, [1, 2, 3])),
((ten, 4, '1', False), (10, 3, [1, 2, 3])),
((ten, 4, '1', False), (10, 3, [1, 2, 3])),
)
for params, output in tests:
self.check_paginator(params, output)
def test_invalid_page_number(self):
"""
Invalid page numbers result in the correct exception being raised.
"""
paginator = Paginator([1, 2, 3], 2)
with self.assertRaises(InvalidPage):
paginator.page(3)
with self.assertRaises(PageNotAnInteger):
paginator.validate_number(None)
with self.assertRaises(PageNotAnInteger):
paginator.validate_number('x')
# With no content and allow_empty_first_page=True, 1 is a valid page number
paginator = Paginator([], 2)
self.assertEqual(paginator.validate_number(1), 1)
def test_paginate_misc_classes(self):
class CountContainer(object):
def count(self):
return 42
# Paginator can be passed other objects with a count() method.
paginator = Paginator(CountContainer(), 10)
self.assertEqual(42, paginator.count)
self.assertEqual(5, paginator.num_pages)
self.assertEqual([1, 2, 3, 4, 5], list(paginator.page_range))
# Paginator can be passed other objects that implement __len__.
class LenContainer(object):
def __len__(self):
return 42
paginator = Paginator(LenContainer(), 10)
self.assertEqual(42, paginator.count)
self.assertEqual(5, paginator.num_pages)
self.assertEqual([1, 2, 3, 4, 5], list(paginator.page_range))
def check_indexes(self, params, page_num, indexes):
"""
Helper method that instantiates a Paginator object from the passed
params and then checks that the start and end indexes of the passed
page_num match those given as a 2-tuple in indexes.
"""
paginator = Paginator(*params)
if page_num == 'first':
page_num = 1
elif page_num == 'last':
page_num = paginator.num_pages
page = paginator.page(page_num)
start, end = indexes
msg = ("For %s of page %s, expected %s but got %s. Paginator parameters were: %s")
self.assertEqual(start, page.start_index(), msg % ('start index', page_num, start, page.start_index(), params))
self.assertEqual(end, page.end_index(), msg % ('end index', page_num, end, page.end_index(), params))
def test_page_indexes(self):
"""
Paginator pages have the correct start and end indexes.
"""
ten = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
tests = (
# Each item is three tuples:
# First tuple is Paginator parameters - object_list, per_page,
# orphans, and allow_empty_first_page.
# Second tuple is the start and end indexes of the first page.
# Third tuple is the start and end indexes of the last page.
# Ten items, varying per_page, no orphans.
((ten, 1, 0, True), (1, 1), (10, 10)),
((ten, 2, 0, True), (1, 2), (9, 10)),
((ten, 3, 0, True), (1, 3), (10, 10)),
((ten, 5, 0, True), (1, 5), (6, 10)),
# Ten items, varying per_page, with orphans.
((ten, 1, 1, True), (1, 1), (9, 10)),
((ten, 1, 2, True), (1, 1), (8, 10)),
((ten, 3, 1, True), (1, 3), (7, 10)),
((ten, 3, 2, True), (1, 3), (7, 10)),
((ten, 3, 4, True), (1, 3), (4, 10)),
((ten, 5, 1, True), (1, 5), (6, 10)),
((ten, 5, 2, True), (1, 5), (6, 10)),
((ten, 5, 5, True), (1, 10), (1, 10)),
# One item, varying orphans, no empty first page.
(([1], 4, 0, False), (1, 1), (1, 1)),
(([1], 4, 1, False), (1, 1), (1, 1)),
(([1], 4, 2, False), (1, 1), (1, 1)),
# One item, varying orphans, allow empty first page.
(([1], 4, 0, True), (1, 1), (1, 1)),
(([1], 4, 1, True), (1, 1), (1, 1)),
(([1], 4, 2, True), (1, 1), (1, 1)),
# Zero items, varying orphans, allow empty first page.
(([], 4, 0, True), (0, 0), (0, 0)),
(([], 4, 1, True), (0, 0), (0, 0)),
(([], 4, 2, True), (0, 0), (0, 0)),
)
for params, first, last in tests:
self.check_indexes(params, 'first', first)
self.check_indexes(params, 'last', last)
# When no items and no empty first page, we should get EmptyPage error.
with self.assertRaises(EmptyPage):
self.check_indexes(([], 4, 0, False), 1, None)
with self.assertRaises(EmptyPage):
self.check_indexes(([], 4, 1, False), 1, None)
with self.assertRaises(EmptyPage):
self.check_indexes(([], 4, 2, False), 1, None)
def test_page_sequence(self):
"""
A paginator page acts like a standard sequence.
"""
eleven = 'abcdefghijk'
page2 = Paginator(eleven, per_page=5, orphans=1).page(2)
self.assertEqual(len(page2), 6)
self.assertIn('k', page2)
self.assertNotIn('a', page2)
self.assertEqual(''.join(page2), 'fghijk')
self.assertEqual(''.join(reversed(page2)), 'kjihgf')
def test_get_page_hook(self):
"""
A Paginator subclass can use the ``_get_page`` hook to
return an alternative to the standard Page class.
"""
eleven = 'abcdefghijk'
paginator = ValidAdjacentNumsPaginator(eleven, per_page=6)
page1 = paginator.page(1)
page2 = paginator.page(2)
self.assertIsNone(page1.previous_page_number())
self.assertEqual(page1.next_page_number(), 2)
self.assertEqual(page2.previous_page_number(), 1)
self.assertIsNone(page2.next_page_number())
def test_page_range_iterator(self):
"""
Paginator.page_range should be an iterator.
"""
self.assertIsInstance(Paginator([1, 2, 3], 2).page_range, type(six.moves.range(0)))
class ModelPaginationTests(TestCase):
"""
Test pagination with Django model instances
"""
def setUp(self):
# Prepare a list of objects for pagination.
for x in range(1, 10):
a = Article(headline='Article %s' % x, pub_date=datetime(2005, 7, 29))
a.save()
def test_first_page(self):
paginator = Paginator(Article.objects.order_by('id'), 5)
p = paginator.page(1)
self.assertEqual("<Page 1 of 2>", six.text_type(p))
self.assertQuerysetEqual(p.object_list, [
"<Article: Article 1>",
"<Article: Article 2>",
"<Article: Article 3>",
"<Article: Article 4>",
"<Article: Article 5>"
])
self.assertTrue(p.has_next())
self.assertFalse(p.has_previous())
self.assertTrue(p.has_other_pages())
self.assertEqual(2, p.next_page_number())
with self.assertRaises(InvalidPage):
p.previous_page_number()
self.assertEqual(1, p.start_index())
self.assertEqual(5, p.end_index())
def test_last_page(self):
paginator = Paginator(Article.objects.order_by('id'), 5)
p = paginator.page(2)
self.assertEqual("<Page 2 of 2>", six.text_type(p))
self.assertQuerysetEqual(p.object_list, [
"<Article: Article 6>",
"<Article: Article 7>",
"<Article: Article 8>",
"<Article: Article 9>"
])
self.assertFalse(p.has_next())
self.assertTrue(p.has_previous())
self.assertTrue(p.has_other_pages())
with self.assertRaises(InvalidPage):
p.next_page_number()
self.assertEqual(1, p.previous_page_number())
self.assertEqual(6, p.start_index())
self.assertEqual(9, p.end_index())
def test_page_getitem(self):
"""
Tests proper behavior of a paginator page __getitem__ (queryset
evaluation, slicing, exception raised).
"""
paginator = Paginator(Article.objects.order_by('id'), 5)
p = paginator.page(1)
# Make sure object_list queryset is not evaluated by an invalid __getitem__ call.
# (this happens from the template engine when using eg: {% page_obj.has_previous %})
self.assertIsNone(p.object_list._result_cache)
with self.assertRaises(TypeError):
p['has_previous']
self.assertIsNone(p.object_list._result_cache)
self.assertNotIsInstance(p.object_list, list)
# Make sure slicing the Page object with numbers and slice objects work.
self.assertEqual(p[0], Article.objects.get(headline='Article 1'))
self.assertQuerysetEqual(p[slice(2)], [
"<Article: Article 1>",
"<Article: Article 2>",
]
)
# After __getitem__ is called, object_list is a list
self.assertIsInstance(p.object_list, list)
def test_paginating_unordered_queryset_raises_warning(self):
if django.VERSION < (1, 11, 0):
self.skipTest("does not work on older version of Django")
with warnings.catch_warnings(record=True) as warns:
# Prevent the RuntimeWarning subclass from appearing as an
# exception due to the warnings.simplefilter() in runtests.py.
warnings.filterwarnings('always', category=UnorderedObjectListWarning)
Paginator(Article.objects.all(), 5)
self.assertEqual(len(warns), 1)
warning = warns[0]
self.assertEqual(str(warning.message), (
"Pagination may yield inconsistent results with an unordered "
"object_list: <class 'pagination.models.Article'> QuerySet."
))
# The warning points at the Paginator caller (i.e. the stacklevel
# is appropriate).
self.assertEqual(warning.filename, __file__)
def test_paginating_unordered_object_list_raises_warning(self):
"""
Unordered object list warning with an object that has an orderd
attribute but not a model attribute.
"""
if django.VERSION < (1, 11, 0):
self.skipTest("does not work on older version of Django")
class ObjectList():
ordered = False
object_list = ObjectList()
with warnings.catch_warnings(record=True) as warns:
warnings.filterwarnings('always', category=UnorderedObjectListWarning)
Paginator(object_list, 5)
self.assertEqual(len(warns), 1)
self.assertEqual(str(warns[0].message), (
"Pagination may yield inconsistent results with an unordered "
"object_list: {!r}.".format(object_list)
))
| mit | 6,795,741,275,359,204,000 | 41.494475 | 119 | 0.536176 | false |
adw0rd/lettuce-py3 | lettuce/__init__.py | 1 | 6767 | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falcão <gabriel@nacaolivre.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__version__ = version = '0.2.22'
release = 'kryptonite'
import os
import sys
import traceback
import warnings
try:
from imp import reload
except ImportError:
# python 2.5 fallback
pass
import random
from lettuce.core import Feature, TotalResult
from lettuce.terrain import after
from lettuce.terrain import before
from lettuce.terrain import world
from lettuce.decorators import step, steps
from lettuce.registry import call_hook
from lettuce.registry import STEP_REGISTRY
from lettuce.registry import CALLBACK_REGISTRY
from lettuce.exceptions import StepLoadingError
from lettuce.plugins import (
xunit_output,
subunit_output,
autopdb,
smtp_mail_queue,
)
from lettuce import fs
from lettuce import exceptions
try:
from colorama import init as ms_windows_workaround
ms_windows_workaround()
except ImportError:
pass
__all__ = [
'after',
'before',
'step',
'steps',
'world',
'STEP_REGISTRY',
'CALLBACK_REGISTRY',
'call_hook',
]
try:
terrain = fs.FileSystem._import("terrain")
reload(terrain)
except Exception as e:
if not "No module named 'terrain'" in str(e):
string = 'Lettuce has tried to load the conventional environment ' \
'module "terrain"\nbut it has errors, check its contents and ' \
'try to run lettuce again.\n\nOriginal traceback below:\n\n'
sys.stderr.write(string)
sys.stderr.write(exceptions.traceback.format_exc())
raise SystemExit(1)
class Runner(object):
""" Main lettuce's test runner
Takes a base path as parameter (string), so that it can look for
features and step definitions on there.
"""
def __init__(self, base_path, scenarios=None,
verbosity=0, no_color=False, random=False,
enable_xunit=False, xunit_filename=None,
enable_subunit=False, subunit_filename=None,
tags=None, failfast=False, auto_pdb=False,
smtp_queue=None, root_dir=None, **kwargs):
""" lettuce.Runner will try to find a terrain.py file and
import it from within `base_path`
"""
self.tags = tags
self.single_feature = None
if os.path.isfile(base_path) and os.path.exists(base_path):
self.single_feature = base_path
base_path = os.path.dirname(base_path)
sys.path.insert(0, base_path)
self.loader = fs.FeatureLoader(base_path, root_dir)
self.verbosity = verbosity
self.scenarios = scenarios and list(map(int, scenarios.split(","))) or None
self.failfast = failfast
if auto_pdb:
autopdb.enable(self)
sys.path.remove(base_path)
if verbosity == 0:
from lettuce.plugins import non_verbose as output
elif verbosity == 1:
from lettuce.plugins import dots as output
elif verbosity == 2:
from lettuce.plugins import scenario_names as output
else:
if verbosity == 4:
from lettuce.plugins import colored_shell_output as output
msg = ('Deprecated in lettuce 2.2.21. Use verbosity 3 without '
'--no-color flag instead of verbosity 4')
warnings.warn(msg, DeprecationWarning)
elif verbosity == 3:
if no_color:
from lettuce.plugins import shell_output as output
else:
from lettuce.plugins import colored_shell_output as output
self.random = random
if enable_xunit:
xunit_output.enable(filename=xunit_filename)
if smtp_queue:
smtp_mail_queue.enable()
if enable_subunit:
subunit_output.enable(filename=subunit_filename)
reload(output)
self.output = output
def run(self):
""" Find and load step definitions, and them find and load
features under `base_path` specified on constructor
"""
results = []
if self.single_feature:
features_files = [self.single_feature]
else:
features_files = self.loader.find_feature_files()
if self.random:
random.shuffle(features_files)
if not features_files:
self.output.print_no_features_found(self.loader.base_dir)
return
# only load steps if we've located some features.
# this prevents stupid bugs when loading django modules
# that we don't even want to test.
try:
self.loader.find_and_load_step_definitions()
except StepLoadingError as e:
print("Error loading step definitions:\n", e)
return
call_hook('before', 'all')
failed = False
try:
for filename in features_files:
feature = Feature.from_file(filename)
results.append(
feature.run(self.scenarios,
tags=self.tags,
random=self.random,
failfast=self.failfast))
except exceptions.LettuceSyntaxError as e:
sys.stderr.write(e.msg)
failed = True
except exceptions.NoDefinitionFound as e:
sys.stderr.write(e.msg)
failed = True
except:
if not self.failfast:
e = sys.exc_info()[1]
print("Died with %s" % str(e))
traceback.print_exc()
else:
print()
print ("Lettuce aborted running any more tests "
"because was called with the `--failfast` option")
failed = True
finally:
total = TotalResult(results)
total.output_format()
call_hook('after', 'all', total)
if failed:
raise SystemExit(2)
return total
| gpl-3.0 | -6,675,595,172,369,562,000 | 30.469767 | 83 | 0.604936 | false |
sipwise/repoapi | repoapi/wsgi.py | 1 | 1088 | # Copyright (C) 2015 The Sipwise Team - http://sipwise.com
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""
WSGI config for repoapi project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "repoapi.settings.prod")
application = get_wsgi_application()
| gpl-3.0 | -8,167,005,503,863,965,000 | 35.266667 | 78 | 0.765625 | false |
sam-roth/Keypad | keypad/plugins/shell/bourne_model.py | 1 | 4068 | import subprocess
import shlex
from keypad.api import (Plugin,
register_plugin,
Filetype,
Cursor)
from keypad.abstract.code import IndentRetainingCodeModel, AbstractCompletionResults
from keypad.core.syntaxlib import SyntaxHighlighter, lazy
from keypad.core.processmgr.client import AsyncServerProxy
from keypad.core.fuzzy import FuzzyMatcher
from keypad.core.executors import future_wrap
from keypad.core.attributed_string import AttributedString
@lazy
def lexer():
from . import bourne_lexer
return bourne_lexer.Shell
class GetManPage:
def __init__(self, cmd):
self.cmd = cmd
def __call__(self, ns):
with subprocess.Popen(['man', self.cmd], stdout=subprocess.PIPE) as proc:
out, _ = proc.communicate()
import re
return [re.subn('.\x08', '', out.decode())[0]]
class ShellCompletionResults(AbstractCompletionResults):
def __init__(self, token_start, results, prox):
'''
token_start - the (line, col) position at which the token being completed starts
'''
super().__init__(token_start)
self.results = [(AttributedString(x.decode()),) for x in results]
self._prox = prox
def doc_async(self, index):
'''
Return a Future for the documentation for a given completion result as a list of
AttributedString.
'''
return self._prox.submit(GetManPage(self.text(index)))
@property
def rows(self):
'''
Return a list of tuples of AttributedString containing the contents of
each column for each row in the completion results.
'''
return self._filtered.rows
def text(self, index):
'''
Return the text that should be inserted for the given completion.
'''
return self._filtered.rows[index][0].text
def filter(self, text=''):
'''
Filter the completion results using the given text.
'''
self._filtered = FuzzyMatcher(text).filter(self.results, key=lambda x: x[0].text)
self._filtered.sort(lambda item: len(item[0].text))
def dispose(self):
pass
class GetPathItems:
def __init__(self, prefix):
self.prefix = prefix
def __call__(self, ns):
with subprocess.Popen(['bash',
'-c',
'compgen -c ' + shlex.quote(self.prefix)],
stdout=subprocess.PIPE) as proc:
out, _ = proc.communicate()
return [l.strip() for l in out.splitlines()]
class BourneCodeModel(IndentRetainingCodeModel):
completion_triggers = []
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self._prox = AsyncServerProxy()
self._prox.start()
def dispose(self):
self._prox.shutdown()
super().dispose()
def highlight(self):
'''
Rehighlight the buffer.
'''
highlighter = SyntaxHighlighter(
'keypad.plugins.shell.syntax',
lexer(),
dict(lexcat=None)
)
highlighter.highlight_buffer(self.buffer)
def completions_async(self, pos):
'''
Return a future to the completions available at the given position in the document.
Raise NotImplementedError if not implemented.
'''
c = Cursor(self.buffer).move(pos)
text_to_pos = c.line.text[:c.x]
for x, ch in reversed(list(enumerate(text_to_pos))):
if ch.isspace():
x += 1
break
else:
x = 0
print('text_to_pos', text_to_pos[x:], pos)
return self._prox.submit(GetPathItems(text_to_pos[x:]),
transform=lambda r: ShellCompletionResults((pos[0], x), r,
self._prox))
| gpl-3.0 | -4,100,649,501,340,423,000 | 26.863014 | 91 | 0.555556 | false |
dmvieira/P.O.D. | func.py | 1 | 5799 | from mergesort import *
def comeca(sequencia,entrada,entrada2,entrada3):
div=open(entrada3,'w')
t=open(entrada,'r')
saida=open(entrada2,'w')
x=t.readlines()
if (x[-1][-1])<>'\n':
comp=x[-1][-1]
comp=comp+'\n'
x.insert(-1,comp)
comp=x[-1]
comp=comp+'\n'
del(x[-1])
x.insert(-1,comp)
del(x[-1])
l=[]
b=0
t.close()
if sequencia=='r':
for j in range(0,len(x)):
k=len(x[j])
if x[j][0]=='>':
if b==1:
l.append(c)
l.append(x[j][:k-1])
c=""
b=1
else:
y=""
for i in range(0,k-1):
if x[j][i] == 'a' or x[j][i] == 'A' or x[j][i] == 'c' or x[j][i] == 'C' or x[j][i] == 'g' or x[j][i] == 'G' or x[j][i] == 'u' or x[j][i] == 'U' or x[j][i] == 'r' or x[j][i] == 'R' or x[j][i] == 'y' or x[j][i] == 'Y' or x[j][i] == 'k' or x[j][i] == 'K' or x[j][i] == 'm' or x[j][i] == 'M' or x[j][i] == 's' or x[j][i] == 'S' or x[j][i] == 'w' or x[j][i] == 'W' or x[j][i] == 'b' or x[j][i] == 'B' or x[j][i] == 'd' or x[j][i] == 'D' or x[j][i] == 'h' or x[j][i] == 'H' or x[j][i] == 'v' or x[j][i] == 'V' or x[j][i] == 'n' or x[j][i] == 'N':
y=y+x[j][i]
c=c+y
l.append(c)
elif sequencia=='p':
for j in range(0,len(x)):
k=len(x[j])
if x[j][0]=='>':
if b==1:
l.append(c)
l.append(x[j][:k-1])
c=""
b=1
else:
y=""
for i in range(0,k-1):
if x[j][i] == 'a' or x[j][i] == 'A' or x[j][i] == 'c' or x[j][i] == 'C' or x[j][i] == 'g' or x[j][i] == 'G' or x[j][i] == 'v' or x[j][i] == 'V' or x[j][i] == 'L' or x[j][i] == 'l' or x[j][i] == 'I' or x[j][i] == 'i' or x[j][i] == 'S' or x[j][i] == 's' or x[j][i] == 'T' or x[j][i] == 't' or x[j][i] == 'Y' or x[j][i] == 'y' or x[j][i] == 'M' or x[j][i] == 'm' or x[j][i] == 'd' or x[j][i] == 'D' or x[j][i] == 'n' or x[j][i] == 'N' or x[j][i] == 'E' or x[j][i] == 'e' or x[j][i] == 'Q' or x[j][i] == 'q' or x[j][i] == 'R' or x[j][i] == 'r' or x[j][i] == 'K' or x[j][i] == 'k' or x[j][i] == 'H' or x[j][i] == 'h' or x[j][i] == 'F' or x[j][i] == 'f' or x[j][i] == 'W' or x[j][i] == 'w' or x[j][i] == 'P' or x[j][i] == 'p' or x[j][i] == 'b' or x[j][i] == 'B' or x[j][i] == 'z' or x[j][i] == 'Z' or x[j][i] == 'x' or x[j][i] == 'X' or x[j][i] == 'u' or x[j][i] == 'U':
y=y+x[j][i]
c=c+y
l.append(c)
else:
for j in range(0,len(x)):
k=len(x[j])
if x[j][0]=='>':
if b==1:
l.append(c)
l.append(x[j][:k-1])
c=""
b=1
else:
y=""
for i in range(0,k-1):
if x[j][i] == 'a' or x[j][i] == 'A' or x[j][i] == 'c' or x[j][i] == 'C' or x[j][i] == 'g' or x[j][i] == 'G' or x[j][i] == 't' or x[j][i] == 'T' or x[j][i] == 'r' or x[j][i] == 'R' or x[j][i] == 'y' or x[j][i] == 'Y' or x[j][i] == 'k' or x[j][i] == 'K' or x[j][i] == 'm' or x[j][i] == 'M' or x[j][i] == 's' or x[j][i] == 'S' or x[j][i] == 'w' or x[j][i] == 'W' or x[j][i] == 'b' or x[j][i] == 'B' or x[j][i] == 'd' or x[j][i] == 'D' or x[j][i] == 'h' or x[j][i] == 'H' or x[j][i] == 'v' or x[j][i] == 'V' or x[j][i] == 'n' or x[j][i] == 'N':
y=y+x[j][i]
c=c+y
l.append(c)
dec,dic={},{}
for j in range(0,len(l),2):
alta=(l[j+1]).upper()
del(l[j+1])
l.insert(j+1,alta)
if (dic.has_key((l[j+1][::-1])))==True:
del(l[j+1])
l.insert((j+1),alta[::-1])
d={l[j]:l[j+1]}
dec.update(d)
d={l[j+1]:l[j]}
dic.update(d)
vou=dic.keys()
v=dec.values()
diversidade=[]
dic={}
for j in range(0,len(l),2):
alta=(l[j+1])
divo=(len(alta))/65
if divo > 0:
alta2=''
for h in range(1,divo+1):
alta2=alta2+alta[(65*(h-1)):(65*h)]+'\n'
alta=alta2+alta[65*divo:]
del(l[j+1])
l.insert(j+1,alta)
d= {alta:l[j]}
dic.update(d)
key=dic.keys()
value=dic.values()
for j in range(len(key)):
saida.write(value[j]+'\n'+key[j]+'\n')
diversidade.append((v.count(vou[j])))
saida.close()
ordena(diversidade, value, key, div)
div.close()
| gpl-3.0 | -1,694,803,398,801,581,800 | 52.196262 | 904 | 0.272116 | false |
cdriehuys/chmvh-website | chmvh_website/contact/forms.py | 1 | 2333 | import logging
from smtplib import SMTPException
from captcha.fields import ReCaptchaField
from django import forms
from django.conf import settings
from django.core import mail
from django.template import loader
logger = logging.getLogger("chmvh_website.{0}".format(__name__))
class ContactForm(forms.Form):
captcha = ReCaptchaField()
name = forms.CharField()
email = forms.EmailField()
message = forms.CharField(widget=forms.Textarea(attrs={"rows": 5}))
street_address = forms.CharField(required=False)
city = forms.CharField(required=False)
zipcode = forms.CharField(required=False)
template = loader.get_template("contact/email/message.txt")
def clean_city(self):
"""
If no city was provided, use a default string.
"""
if not self.cleaned_data["city"]:
return "<No City Given>"
return self.cleaned_data["city"]
def send_email(self):
assert self.is_valid(), self.errors
subject = "[CHMVH Website] Message from {}".format(
self.cleaned_data["name"]
)
address_line_2_parts = [self.cleaned_data["city"], "North Carolina"]
if self.cleaned_data["zipcode"]:
address_line_2_parts.append(self.cleaned_data["zipcode"])
address_line_1 = self.cleaned_data["street_address"]
address_line_2 = ", ".join(address_line_2_parts)
address = ""
if address_line_1:
address = "\n".join([address_line_1, address_line_2])
context = {
"name": self.cleaned_data["name"],
"email": self.cleaned_data["email"],
"message": self.cleaned_data["message"],
"address": address,
}
logger.debug("Preparing to send email")
try:
emails_sent = mail.send_mail(
subject,
self.template.render(context),
settings.DEFAULT_FROM_EMAIL,
["info@chapelhillvet.com"],
)
logger.info(
"Succesfully sent email from {0}".format(
self.cleaned_data["email"]
)
)
except SMTPException as e:
emails_sent = 0
logger.exception("Failed to send email.", exc_info=e)
return emails_sent == 1
| mit | 3,360,558,135,283,314,700 | 28.1625 | 76 | 0.582512 | false |