text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
#
# Copyright (c) 2008--2010 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
# Database types we support for out variables
#
# Data types
class DatabaseDataType:
type_name = None
def __init__(self, value=None, size=None):
self.size = size or 1
self.set_value(value)
def get_value(self):
return self.value
def set_value(self, value):
self.value = value
def __str__(self):
return self.type_name
class NUMBER(DatabaseDataType):
type_name = "NUMBER"
class STRING(DatabaseDataType):
type_name = "STRING"
def __init__(self, value=None, size=None):
DatabaseDataType.__init__(self, value=value, size=size)
if not size:
self.size = 4000
class BINARY(DatabaseDataType):
type_name = "BINARY"
class LONG_BINARY(DatabaseDataType):
type_name = "LONG_BINARY"
# XXX More data types to be added as we find need for them
| dmacvicar/spacewalk | backend/server/rhnSQL/sql_types.py | Python | gpl-2.0 | 1,465 | 0 |
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example targets an ad to a remarketing list.
The first targetable remarketing list, either owned by or shared to the ad's
advertiser, will be used. To create a remarketing list, see
create_remarketing_list.py. To share a remarketing list with the ad's
advertiser, see share_remarketing_list_to_advertiser.py.
"""
import argparse
import sys
from apiclient import sample_tools
from oauth2client import client
# Declare command-line flags.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
'profile_id', type=int,
help='The ID of the profile to use for targeting')
argparser.add_argument('ad_id', type=int, help='The ID of the ad to target')
def main(argv):
# Authenticate and construct service.
service, flags = sample_tools.init(
argv, 'dfareporting', 'v2.2', __doc__, __file__, parents=[argparser],
scope=['https://www.googleapis.com/auth/dfareporting',
'https://www.googleapis.com/auth/dfatrafficking'])
profile_id = flags.profile_id
ad_id = flags.ad_id
try:
# Retrieve the ad.
ad = service.ads().get(profileId=profile_id, id=ad_id).execute()
# Retrieve a single targetable remarketing list for the ad.
lists = service.targetableRemarketingLists().list(
profileId=profile_id, advertiserId=ad['advertiserId'],
maxResults=1).execute()
if lists['targetableRemarketingLists']:
list = lists['targetableRemarketingLists'][0]
# Update the ad with a list targeting expression
ad['remarketing_list_expression'] = { 'expression': list['id'] }
response = service.ads().update(profileId=profile_id, body=ad).execute()
print ('Ad %s updated to use remarketing list expression: "%s".'
% (response['id'],
response['remarketing_list_expression']['expression']))
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| falbassini/googleads-dfa-reporting-samples | python/v2.2/target_ad_to_remarketing_list.py | Python | apache-2.0 | 2,651 | 0.00679 |
"""
Redis Blueprint
===============
**Fabric environment:**
.. code-block:: yaml
blueprints:
- blues.redis
settings:
redis:
# bind: 0.0.0.0 # Set the bind address specifically (Default: 127.0.0.1)
"""
import re
from fabric.decorators import task
from fabric.utils import abort
from refabric.context_managers import sudo
from refabric.contrib import blueprints
from . import debian
from refabric.operations import run
__all__ = ['start', 'stop', 'restart', 'setup', 'configure']
blueprint = blueprints.get(__name__)
start = debian.service_task('redis-server', 'start')
stop = debian.service_task('redis-server', 'stop')
restart = debian.service_task('redis-server', 'restart')
@task
def setup():
"""
Install and configure Redis
"""
install()
configure()
def install():
with sudo():
debian.apt_get('install', 'redis-server')
def get_installed_version():
"""
Get installed version as tuple.
Parsed output format:
Redis server v=2.8.4 sha=00000000:0 malloc=jemalloc-3.4.1 bits=64 build=a...
"""
retval = run('redis-server --version')
m = re.match('.+v=(?P<version>[0-9\.]+).+', retval.stdout)
try:
_v = m.group('version')
v = tuple(map(int, str(_v).split('.')))
return v
except IndexError:
abort('Failed to get installed redis version')
@task
def configure():
"""
Configure Redis
"""
context = {
'bind': blueprint.get('bind', '127.0.0.1')
}
version = get_installed_version()
if version <= (2, 4):
config = 'redis-2.4.conf'
elif version < (3, 0):
config = 'redis-2.8.conf'
else:
config = 'redis-3.conf'
uploads = blueprint.upload(config, '/etc/redis/redis.conf', context)
if uploads:
if debian.lbs_release() >= '16.04':
debian.chown(location='/etc/redis/redis.conf',
owner='redis', group='root')
restart()
| 5monkeys/blues | blues/redis.py | Python | mit | 1,983 | 0.001513 |
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns(
'show.views',
url(r'^radioshow/entrylist/$', 'radioshow_entryitem_list', name='radioshow_entryitem_list'),
url(r'^showcontributor/list/(?P<slug>[\w-]+)/$', 'showcontributor_content_list', name='showcontributor_content_list'),
url(r'^showcontributor/appearance/(?P<slug>[\w-]+)/$', 'showcontributor_appearance_list', name='showcontributor_appearance_list'),
url(r'^showcontributor/(?P<slug>[\w-]+)/$', 'showcontributor_detail', name='showcontributor_detail'),
url(r'^showcontributor/content/(?P<slug>[\w-]+)/$', 'showcontributor_content_detail', name='showcontributor_content_detail'),
url(r'^showcontributor/contact/(?P<slug>[\w-]+)/$', 'showcontributor_contact', name='showcontributor_contact'),
)
| praekelt/panya-show | show/urls.py | Python | bsd-3-clause | 804 | 0.007463 |
n, k, l, c, d, p, nl, np = map(int,raw_input().split())
a = k*l
x = a/nl
y = c*d
z = p/np
print min(x,y,z)/n
| Sarthak30/Codeforces | soft_drinking.py | Python | gpl-2.0 | 109 | 0.027523 |
#!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 51142 if testnet else 61142
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| Marcdnd/cryptoescudo | contrib/spendfrom/spendfrom.py | Python | mit | 10,054 | 0.005968 |
# Copyright 2015-2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import tenacity
from oslo_messaging._drivers.zmq_driver.client.publishers.dealer \
import zmq_dealer_publisher_base
from oslo_messaging._drivers.zmq_driver.client import zmq_receivers
from oslo_messaging._drivers.zmq_driver.client import zmq_routing_table
from oslo_messaging._drivers.zmq_driver.client import zmq_senders
from oslo_messaging._drivers.zmq_driver.client import zmq_sockets_manager
from oslo_messaging._drivers.zmq_driver import zmq_address
from oslo_messaging._drivers.zmq_driver import zmq_async
from oslo_messaging._drivers.zmq_driver import zmq_names
LOG = logging.getLogger(__name__)
zmq = zmq_async.import_zmq()
class DealerPublisherDirect(zmq_dealer_publisher_base.DealerPublisherBase):
"""DEALER-publisher using direct dynamic connections.
Publishing directly to remote services assumes the following:
- All direct connections are dynamic - so they live per message,
thus each message send executes the following:
* Open a new socket
* Connect to some host got from the RoutingTable
* Send message(s)
* Close connection, destroy socket
- RoutingTable/RoutingTableUpdater implements local cache of
matchmaker (e.g. Redis) for target resolution to the list of
available hosts. Cache updates in a background thread.
- Caching of connections is not appropriate for directly connected
OS services, because finally it results in a full-mesh of
connections between services.
- Yes we lose on performance opening and closing connections
for each message, but that is done intentionally to implement
the dynamic connections concept. The key thought here is to
have minimum number of connected services at the moment.
- Using the local RoutingTable cache is done to optimise access
to the matchmaker so we don't call the matchmaker per each message
"""
def __init__(self, conf, matchmaker):
sender = zmq_senders.RequestSenderDirect(conf, async=True)
receiver = zmq_receivers.ReceiverDirect(conf)
super(DealerPublisherDirect, self).__init__(conf, matchmaker,
sender, receiver)
self.routing_table = zmq_routing_table.RoutingTableAdaptor(
conf, matchmaker, zmq.ROUTER)
def _get_round_robin_host_connection(self, target, socket):
host = self.routing_table.get_round_robin_host(target)
socket.connect_to_host(host)
failover_hosts = self.routing_table.get_all_round_robin_hosts(target)
upper_bound = self.conf.oslo_messaging_zmq.zmq_failover_connections
for host in failover_hosts[:upper_bound]:
socket.connect_to_host(host)
def _get_fanout_connection(self, target, socket):
for host in self.routing_table.get_fanout_hosts(target):
socket.connect_to_host(host)
def acquire_connection(self, request):
if request.msg_type in zmq_names.DIRECT_TYPES:
socket = self.sockets_manager.get_socket()
self._get_round_robin_host_connection(request.target, socket)
return socket
elif request.msg_type in zmq_names.MULTISEND_TYPES:
socket = self.sockets_manager.get_socket(immediate=False)
self._get_fanout_connection(request.target, socket)
return socket
def _finally_unregister(self, socket, request):
super(DealerPublisherDirect, self)._finally_unregister(socket, request)
self.receiver.unregister_socket(socket)
def _do_send(self, socket, request):
if request.msg_type in zmq_names.MULTISEND_TYPES:
for _ in range(socket.connections_count()):
self.sender.send(socket, request)
else:
self.sender.send(socket, request)
def send_request(self, socket, request):
@tenacity.retry(retry=tenacity.retry_if_exception_type(zmq.Again),
stop=tenacity.stop_after_delay(
self.conf.rpc_response_timeout))
def send_retrying():
self._do_send(socket, request)
return send_retrying()
def cleanup(self):
self.routing_table.cleanup()
super(DealerPublisherDirect, self).cleanup()
class DealerPublisherDirectStatic(DealerPublisherDirect):
"""DEALER-publisher using direct static connections.
For some reason direct static connections may be also useful.
Assume a case when some agents are not connected with control services
over RPC (Ironic or Cinder+Ceph), and RPC is used only between controllers.
In this case number of RPC connections doesn't matter (very small) so we
can use static connections without fear and have all performance benefits
from it.
"""
def __init__(self, conf, matchmaker):
super(DealerPublisherDirectStatic, self).__init__(conf, matchmaker)
self.fanout_sockets = zmq_sockets_manager.SocketsManager(
conf, matchmaker, zmq.DEALER)
def acquire_connection(self, request):
target_key = zmq_address.target_to_key(
request.target, zmq_names.socket_type_str(zmq.ROUTER))
if request.msg_type in zmq_names.MULTISEND_TYPES:
hosts = self.routing_table.get_fanout_hosts(request.target)
return self.fanout_sockets.get_cached_socket(target_key, hosts,
immediate=False)
else:
hosts = self.routing_table.get_all_round_robin_hosts(
request.target)
return self.sockets_manager.get_cached_socket(target_key, hosts)
def send_request(self, socket, request):
self._do_send(socket, request)
def _finally_unregister(self, socket, request):
self.receiver.untrack_request(request)
def cleanup(self):
self.fanout_sockets.cleanup()
super(DealerPublisherDirectStatic, self).cleanup()
| ozamiatin/oslo.messaging | oslo_messaging/_drivers/zmq_driver/client/publishers/dealer/zmq_dealer_publisher_direct.py | Python | apache-2.0 | 6,687 | 0.00015 |
"""Global test fixtures."""
import uuid
import pytest
from s3keyring.s3 import S3Keyring
from s3keyring.settings import config
from keyring.errors import PasswordDeleteError
@pytest.fixture
def keyring(scope="module"):
config.boto_config.activate_profile("test")
return S3Keyring()
@pytest.yield_fixture
def random_entry(keyring, scope="function"):
service = str(uuid.uuid4())
user = str(uuid.uuid4())
pwd = str(uuid.uuid4())
yield (service, user, pwd)
# Cleanup
try:
keyring.delete_password(service, user)
except PasswordDeleteError as err:
if 'not found' not in err.args[0]:
# It's ok if the entry has been already deleted
raise
| InnovativeTravel/s3-keyring | tests/conftest.py | Python | mit | 714 | 0 |
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
from sklearn.preprocessing import Imputer
from sklearn import linear_model
import statsmodels.api as sm
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from sklearn import preprocessing
# Some colors for later
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
###
#load data from a CSV to a dataframe
with open("./lin.csv") as in_data:
crime_data = pd.DataFrame.from_csv(in_data, sep=',')
#crime_data=crime_data.fillna(value=-999)
#load all numeric data into an array. The offense column from the crime data
#is excluded
as_array = np.asfarray(crime_data[['Average Velocity (mph)','Aggressive Turns']])#'Max Velocity', 'Velocity Stdev','Average Acceleration (mph per s)', 'Max Acceleration (mph per s)', ' Acceleration Stdev','Displacement','Total Distance Traveled','Max Direction Change per sec', ' Direction Stdev','Time (s)', 'Turns', 'Aggressive Turns', 'Stops', 'Large Deceleration Events', 'Deceleration Events', 'Max Deceleration Event']])
#number of groups
n_clusters=4
#Correct missing data
imputer = Imputer(missing_values="NaN", strategy="mean")
patched = imputer.fit_transform(as_array)
# Preprocessing tricks
#patched = StandardScaler().fit_transform(patched)
#patched = scale(patched, axis=0, with_mean=True)
patched = preprocessing.normalize(patched, norm='l2')
#min_max_scaler = preprocessing.MinMaxScaler()
#patched = min_max_scaler.fit_transform(patched)
#cluster data
cluster = KMeans(n_clusters=n_clusters)
cluster.fit_transform(patched)
#assigned grouped labels to the crime data
labels = cluster.labels_
#copy dataframe (may be memory intensive but just for illustration)
skid_data = crime_data.copy()
#print pd.Series(classified_data)
#print pd.Series(prediction_data)
skid_data['Cluster Class'] = pd.Series(labels, index=skid_data.index)
print skid_data.describe()
print skid_data
#print list(skid_data.columns)
skid_data.plot( x = 'Aggressive Turns', y = 'Cluster Class', kind = 'scatter')
plt.show()
# Make Predictions
predictions = cluster.predict(patched)
SilouetteCoefficient = metrics.silhouette_score(patched, labels, metric='euclidean')
print "The Silouette Coefficient is", SilouetteCoefficient
model = sm.OLS(labels, patched)
results = model.fit()
print results.summary()
# Find centers
centers = cluster.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
#plt.subplot(1,4,idx+1)
plt.scatter(patched[:, 0], patched[:, 1], color=colors[predictions].tolist(), s=10)
plt.xticks(())
plt.yticks(())
plt.ylabel('$x_1$')
plt.xlabel('$x_0$')
plt.show()
| georgetown-analytics/skidmarks | bin/cluster.py | Python | mit | 2,943 | 0.016989 |
"""This file contains code for use with "Think Stats" and
"Think Bayes", both by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
"""This file contains class definitions for:
Hist: represents a histogram (map from values to integer frequencies).
Pmf: represents a probability mass function (map from values to probs).
_DictWrapper: private parent class for Hist and Pmf.
Cdf: represents a discrete cumulative distribution function
Pdf: represents a continuous probability density function
"""
import bisect
import copy
import logging
import math
import random
import re
from collections import Counter
from operator import itemgetter
import thinkplot
import numpy as np
import pandas
import scipy
from scipy import stats
from scipy import special
from scipy import ndimage
from scipy.special import gamma
from io import open
ROOT2 = math.sqrt(2)
def RandomSeed(x):
"""Initialize the random and np.random generators.
x: int seed
"""
random.seed(x)
np.random.seed(x)
def Odds(p):
"""Computes odds for a given probability.
Example: p=0.75 means 75 for and 25 against, or 3:1 odds in favor.
Note: when p=1, the formula for odds divides by zero, which is
normally undefined. But I think it is reasonable to define Odds(1)
to be infinity, so that's what this function does.
p: float 0-1
Returns: float odds
"""
if p == 1:
return float('inf')
return p / (1 - p)
def Probability(o):
"""Computes the probability corresponding to given odds.
Example: o=2 means 2:1 odds in favor, or 2/3 probability
o: float odds, strictly positive
Returns: float probability
"""
return o / (o + 1)
def Probability2(yes, no):
"""Computes the probability corresponding to given odds.
Example: yes=2, no=1 means 2:1 odds in favor, or 2/3 probability.
yes, no: int or float odds in favor
"""
return yes / (yes + no)
class Interpolator(object):
"""Represents a mapping between sorted sequences; performs linear interp.
Attributes:
xs: sorted list
ys: sorted list
"""
def __init__(self, xs, ys):
self.xs = xs
self.ys = ys
def Lookup(self, x):
"""Looks up x and returns the corresponding value of y."""
return self._Bisect(x, self.xs, self.ys)
def Reverse(self, y):
"""Looks up y and returns the corresponding value of x."""
return self._Bisect(y, self.ys, self.xs)
def _Bisect(self, x, xs, ys):
"""Helper function."""
if x <= xs[0]:
return ys[0]
if x >= xs[-1]:
return ys[-1]
i = bisect.bisect(xs, x)
frac = 1.0 * (x - xs[i - 1]) / (xs[i] - xs[i - 1])
y = ys[i - 1] + frac * 1.0 * (ys[i] - ys[i - 1])
return y
# When we plot Hist, Pmf and Cdf objects, they don't appear in
# the legend unless we override the default label.
DEFAULT_LABEL = '_nolegend_'
class _DictWrapper(object):
"""An object that contains a dictionary."""
def __init__(self, obj=None, label=None):
"""Initializes the distribution.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
label: string label
"""
self.label = label if label is not None else DEFAULT_LABEL
self.d = {}
# flag whether the distribution is under a log transform
self.log = False
if obj is None:
return
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.label = label if label is not None else obj.label
if isinstance(obj, dict):
self.d.update(obj.items())
elif isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.d.update(obj.Items())
elif isinstance(obj, pandas.Series):
self.d.update(obj.value_counts().iteritems())
else:
# finally, treat it like a list
self.d.update(Counter(obj))
if len(self) > 0 and isinstance(self, Pmf):
self.Normalize()
def __hash__(self):
return id(self)
def __str__(self):
cls = self.__class__.__name__
if self.label == DEFAULT_LABEL:
return '%s(%s)' % (cls, str(self.d))
else:
return self.label
def __repr__(self):
cls = self.__class__.__name__
if self.label == DEFAULT_LABEL:
return '%s(%s)' % (cls, repr(self.d))
else:
return '%s(%s, %s)' % (cls, repr(self.d), repr(self.label))
def __eq__(self, other):
try:
return self.d == other.d
except AttributeError:
return False
def __len__(self):
return len(self.d)
def __iter__(self):
return iter(self.d)
def iterkeys(self):
"""Returns an iterator over keys."""
return iter(self.d)
def __contains__(self, value):
return value in self.d
def __getitem__(self, value):
return self.d.get(value, 0)
def __setitem__(self, value, prob):
self.d[value] = prob
def __delitem__(self, value):
del self.d[value]
def Copy(self, label=None):
"""Returns a copy.
Make a shallow copy of d. If you want a deep copy of d,
use copy.deepcopy on the whole object.
label: string label for the new Hist
returns: new _DictWrapper with the same type
"""
new = copy.copy(self)
new.d = copy.copy(self.d)
new.label = label if label is not None else self.label
return new
def Scale(self, factor):
"""Multiplies the values by a factor.
factor: what to multiply by
Returns: new object
"""
new = self.Copy()
new.d.clear()
for val, prob in self.Items():
new.Set(val * factor, prob)
return new
def Log(self, m=None):
"""Log transforms the probabilities.
Removes values with probability 0.
Normalizes so that the largest logprob is 0.
"""
if self.log:
raise ValueError("Pmf/Hist already under a log transform")
self.log = True
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
if p:
self.Set(x, math.log(p / m))
else:
self.Remove(x)
def Exp(self, m=None):
"""Exponentiates the probabilities.
m: how much to shift the ps before exponentiating
If m is None, normalizes so that the largest prob is 1.
"""
if not self.log:
raise ValueError("Pmf/Hist not under a log transform")
self.log = False
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
self.Set(x, math.exp(p - m))
def GetDict(self):
"""Gets the dictionary."""
return self.d
def SetDict(self, d):
"""Sets the dictionary."""
self.d = d
def Values(self):
"""Gets an unsorted sequence of values.
Note: one source of confusion is that the keys of this
dictionary are the values of the Hist/Pmf, and the
values of the dictionary are frequencies/probabilities.
"""
return self.d.keys()
def Items(self):
"""Gets an unsorted sequence of (value, freq/prob) pairs."""
return self.d.items()
def SortedItems(self):
"""Gets a sorted sequence of (value, freq/prob) pairs.
It items are unsortable, the result is unsorted.
"""
def isnan(x):
try:
return math.isnan(x)
except TypeError:
return False
if any([isnan(x) for x in self.Values()]):
msg = 'Keys contain NaN, may not sort correctly.'
logging.warning(msg)
try:
return sorted(self.d.items())
except TypeError:
return self.d.items()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
Note: options are ignored
Returns:
tuple of (sorted value sequence, freq/prob sequence)
"""
return zip(*self.SortedItems())
def MakeCdf(self, label=None):
"""Makes a Cdf."""
label = label if label is not None else self.label
return Cdf(self, label=label)
def Print(self):
"""Prints the values and freqs/probs in ascending order."""
for val, prob in self.SortedItems():
print(val, prob)
def Set(self, x, y=0):
"""Sets the freq/prob associated with the value x.
Args:
x: number value
y: number freq or prob
"""
self.d[x] = y
def Incr(self, x, term=1):
"""Increments the freq/prob associated with the value x.
Args:
x: number value
term: how much to increment by
"""
self.d[x] = self.d.get(x, 0) + term
def Mult(self, x, factor):
"""Scales the freq/prob associated with the value x.
Args:
x: number value
factor: how much to multiply by
"""
self.d[x] = self.d.get(x, 0) * factor
def Remove(self, x):
"""Removes a value.
Throws an exception if the value is not there.
Args:
x: value to remove
"""
del self.d[x]
def Total(self):
"""Returns the total of the frequencies/probabilities in the map."""
total = sum(self.d.values())
return total
def MaxLike(self):
"""Returns the largest frequency/probability in the map."""
return max(self.d.values())
def Largest(self, n=10):
"""Returns the largest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=True)[:n]
def Smallest(self, n=10):
"""Returns the smallest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=False)[:n]
class Hist(_DictWrapper):
"""Represents a histogram, which is a map from values to frequencies.
Values can be any hashable type; frequencies are integer counters.
"""
def Freq(self, x):
"""Gets the frequency associated with the value x.
Args:
x: number value
Returns:
int frequency
"""
return self.d.get(x, 0)
def Freqs(self, xs):
"""Gets frequencies for a sequence of values."""
return [self.Freq(x) for x in xs]
def IsSubset(self, other):
"""Checks whether the values in this histogram are a subset of
the values in the given histogram."""
for val, freq in self.Items():
if freq > other.Freq(val):
return False
return True
def Subtract(self, other):
"""Subtracts the values in the given histogram from this histogram."""
for val, freq in other.Items():
self.Incr(val, -freq)
class Pmf(_DictWrapper):
"""Represents a probability mass function.
Values can be any hashable type; probabilities are floating-point.
Pmfs are not necessarily normalized.
"""
def Prob(self, x, default=0):
"""Gets the probability associated with the value x.
Args:
x: number value
default: value to return if the key is not there
Returns:
float probability
"""
return self.d.get(x, default)
def Probs(self, xs):
"""Gets probabilities for a sequence of values."""
return [self.Prob(x) for x in xs]
def Percentile(self, percentage):
"""Computes a percentile of a given Pmf.
Note: this is not super efficient. If you are planning
to compute more than a few percentiles, compute the Cdf.
percentage: float 0-100
returns: value from the Pmf
"""
p = percentage / 100
total = 0
for val, prob in sorted(self.Items()):
total += prob
if total >= p:
return val
def ProbGreater(self, x):
"""Probability that a sample from this Pmf exceeds x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbGreater(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val > x]
return sum(t)
def ProbLess(self, x):
"""Probability that a sample from this Pmf is less than x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbLess(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val < x]
return sum(t)
def ProbEqual(self, x):
"""Probability that a sample from this Pmf is exactly x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbEqual(self, x)
else:
return self[x]
# NOTE: I've decided to remove the magic comparators because they
# have the side-effect of making Pmf sortable, but in fact they
# don't support sorting.
def Normalize(self, fraction=1):
"""Normalizes this PMF so the sum of all probs is fraction.
Args:
fraction: what the total should be after normalization
Returns: the total probability before normalizing
"""
if self.log:
raise ValueError("Normalize: Pmf is under a log transform")
total = self.Total()
if total == 0:
raise ValueError('Normalize: total probability is zero.')
factor = fraction / total
for x in self.d:
self.d[x] *= factor
return total
def Random(self):
"""Chooses a random element from this PMF.
Note: this is not very efficient. If you plan to call
this more than a few times, consider converting to a CDF.
Returns:
float value from the Pmf
"""
target = random.random()
total = 0
for x, p in self.d.items():
total += p
if total >= target:
return x
# we shouldn't get here
raise ValueError('Random: Pmf might not be normalized.')
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int length of the sample
returns: NumPy array
"""
return self.MakeCdf().Sample(n)
def Mean(self):
"""Computes the mean of a PMF.
Returns:
float mean
"""
return sum(p * x for x, p in self.Items())
def Median(self):
"""Computes the median of a PMF.
Returns:
float median
"""
return self.MakeCdf().Percentile(50)
def Var(self, mu=None):
"""Computes the variance of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float variance
"""
if mu is None:
mu = self.Mean()
return sum(p * (x-mu)**2 for x, p in self.Items())
def Expect(self, func):
"""Computes the expectation of func(x).
Returns:
expectation
"""
return np.sum(p * func(x) for x, p in self.Items())
def Std(self, mu=None):
"""Computes the standard deviation of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float standard deviation
"""
var = self.Var(mu)
return math.sqrt(var)
def Mode(self):
"""Returns the value with the highest probability.
Returns: float probability
"""
_, val = max((prob, val) for val, prob in self.Items())
return val
# The mode of a posterior is the maximum aposteori probability (MAP)
MAP = Mode
# If the distribution contains likelihoods only, the peak is the
# maximum likelihood estimator.
MaximumLikelihood = Mode
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = self.MakeCdf()
return cdf.CredibleInterval(percentage)
def __add__(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf or a scalar
returns: new Pmf
"""
try:
return self.AddPmf(other)
except AttributeError:
return self.AddConstant(other)
__radd__ = __add__
def AddPmf(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf[v1 + v2] += p1 * p2
return pmf
def AddConstant(self, other):
"""Computes the Pmf of the sum a constant and values from self.
other: a number
returns: new Pmf
"""
if other == 0:
return self.Copy()
pmf = Pmf()
for v1, p1 in self.Items():
pmf.Set(v1 + other, p1)
return pmf
def __sub__(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.SubPmf(other)
except AttributeError:
return self.AddConstant(-other)
def SubPmf(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 - v2, p1 * p2)
return pmf
def __mul__(self, other):
"""Computes the Pmf of the product of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.MulPmf(other)
except AttributeError:
return self.MulConstant(other)
def MulPmf(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 * v2, p1 * p2)
return pmf
def MulConstant(self, other):
"""Computes the Pmf of the product of a constant and values from self.
other: a number
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
pmf.Set(v1 * other, p1)
return pmf
def __div__(self, other):
"""Computes the Pmf of the ratio of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.DivPmf(other)
except AttributeError:
return self.MulConstant(1/other)
__truediv__ = __div__
def DivPmf(self, other):
"""Computes the Pmf of the ratio of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 / v2, p1 * p2)
return pmf
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.MakeCdf()
cdf.ps **= k
return cdf
class Joint(Pmf):
"""Represents a joint distribution.
The values are sequences (usually tuples)
"""
def Marginal(self, i, label=None):
"""Gets the marginal distribution of the indicated variable.
i: index of the variable we want
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
pmf.Incr(vs[i], prob)
return pmf
def Conditional(self, i, j, val, label=None):
"""Gets the conditional distribution of the indicated variable.
Distribution of vs[i], conditioned on vs[j] = val.
i: index of the variable we want
j: which variable is conditioned on
val: the value the jth variable has to have
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
if vs[j] != val:
continue
pmf.Incr(vs[i], prob)
pmf.Normalize()
return pmf
def MaxLikeInterval(self, percentage=90):
"""Returns the maximum-likelihood credible interval.
If percentage=90, computes a 90% CI containing the values
with the highest likelihoods.
percentage: float between 0 and 100
Returns: list of values from the suite
"""
interval = []
total = 0
t = [(prob, val) for val, prob in self.Items()]
t.sort(reverse=True)
for prob, val in t:
interval.append(val)
total += prob
if total >= percentage / 100:
break
return interval
def MakeJoint(pmf1, pmf2):
"""Joint distribution of values from pmf1 and pmf2.
Assumes that the PMFs represent independent random variables.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
Joint pmf of value pairs
"""
joint = Joint()
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
joint.Set((v1, v2), p1 * p2)
return joint
def MakeHistFromList(t, label=None):
"""Makes a histogram from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this histogram
Returns:
Hist object
"""
return Hist(t, label=label)
def MakeHistFromDict(d, label=None):
"""Makes a histogram from a map from values to frequencies.
Args:
d: dictionary that maps values to frequencies
label: string label for this histogram
Returns:
Hist object
"""
return Hist(d, label)
def MakePmfFromList(t, label=None):
"""Makes a PMF from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(t, label=label)
def MakePmfFromDict(d, label=None):
"""Makes a PMF from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(d, label=label)
def MakePmfFromItems(t, label=None):
"""Makes a PMF from a sequence of value-probability pairs
Args:
t: sequence of value-probability pairs
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(dict(t), label=label)
def MakePmfFromHist(hist, label=None):
"""Makes a normalized PMF from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Pmf object
"""
if label is None:
label = hist.label
return Pmf(hist, label=label)
def MakeMixture(metapmf, label='mix'):
"""Make a mixture distribution.
Args:
metapmf: Pmf that maps from Pmfs to probs.
label: string label for the new Pmf.
Returns: Pmf object.
"""
mix = Pmf(label=label)
for pmf, p1 in metapmf.Items():
for x, p2 in pmf.Items():
mix[x] += p1 * p2
return mix
def MakeUniformPmf(low, high, n):
"""Make a uniform Pmf.
low: lowest value (inclusive)
high: highest value (inclusize)
n: number of values
"""
pmf = Pmf()
for x in np.linspace(low, high, n):
pmf.Set(x, 1)
pmf.Normalize()
return pmf
class Cdf:
"""Represents a cumulative distribution function.
Attributes:
xs: sequence of values
ps: sequence of probabilities
label: string used as a graph label.
"""
def __init__(self, obj=None, ps=None, label=None):
"""Initializes.
If ps is provided, obj must be the corresponding list of values.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
ps: list of cumulative probabilities
label: string label
"""
self.label = label if label is not None else DEFAULT_LABEL
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
if not label:
self.label = label if label is not None else obj.label
if obj is None:
# caller does not provide obj, make an empty Cdf
self.xs = np.asarray([])
self.ps = np.asarray([])
if ps is not None:
logging.warning("Cdf: can't pass ps without also passing xs.")
return
else:
# if the caller provides xs and ps, just store them
if ps is not None:
if isinstance(ps, str):
logging.warning("Cdf: ps can't be a string")
self.xs = np.asarray(obj)
self.ps = np.asarray(ps)
return
# caller has provided just obj, not ps
if isinstance(obj, Cdf):
self.xs = copy.copy(obj.xs)
self.ps = copy.copy(obj.ps)
return
if isinstance(obj, _DictWrapper):
dw = obj
else:
dw = Hist(obj)
if len(dw) == 0:
self.xs = np.asarray([])
self.ps = np.asarray([])
return
xs, freqs = zip(*sorted(dw.Items()))
self.xs = np.asarray(xs)
self.ps = np.cumsum(freqs, dtype=np.float)
self.ps /= self.ps[-1]
def __str__(self):
cls = self.__class__.__name__
if self.label == DEFAULT_LABEL:
return '%s(%s, %s)' % (cls, str(self.xs), str(self.ps))
else:
return self.label
def __repr__(self):
cls = self.__class__.__name__
if self.label == DEFAULT_LABEL:
return '%s(%s, %s)' % (cls, str(self.xs), str(self.ps))
else:
return '%s(%s, %s, %s)' % (cls, str(self.xs), str(self.ps),
repr(self.label))
def __len__(self):
return len(self.xs)
def __getitem__(self, x):
return self.Prob(x)
def __setitem__(self):
raise UnimplementedMethodException()
def __delitem__(self):
raise UnimplementedMethodException()
def __eq__(self, other):
return np.all(self.xs == other.xs) and np.all(self.ps == other.ps)
def Print(self):
"""Prints the values and freqs/probs in ascending order."""
for val, prob in zip(self.xs, self.ps):
print(val, prob)
def Copy(self, label=None):
"""Returns a copy of this Cdf.
label: string label for the new Cdf
"""
if label is None:
label = self.label
return Cdf(list(self.xs), list(self.ps), label=label)
def MakePmf(self, label=None):
"""Makes a Pmf."""
if label is None:
label = self.label
return Pmf(self, label=label)
def Items(self):
"""Returns a sorted sequence of (value, probability) pairs.
Note: in Python3, returns an iterator.
"""
a = self.ps
b = np.roll(a, 1)
b[0] = 0
return zip(self.xs, a-b)
def Shift(self, term):
"""Adds a term to the xs.
term: how much to add
"""
new = self.Copy()
# don't use +=, or else an int array + float yields int array
new.xs = new.xs + term
return new
def Scale(self, factor):
"""Multiplies the xs by a factor.
factor: what to multiply by
"""
new = self.Copy()
# don't use *=, or else an int array * float yields int array
new.xs = new.xs * factor
return new
def Prob(self, x):
"""Returns CDF(x), the probability that corresponds to value x.
Args:
x: number
Returns:
float probability
"""
if x < self.xs[0]:
return 0
index = bisect.bisect(self.xs, x)
p = self.ps[index-1]
return p
def Probs(self, xs):
"""Gets probabilities for a sequence of values.
xs: any sequence that can be converted to NumPy array
returns: NumPy array of cumulative probabilities
"""
xs = np.asarray(xs)
index = np.searchsorted(self.xs, xs, side='right')
ps = self.ps[index-1]
ps[xs < self.xs[0]] = 0
return ps
ProbArray = Probs
def Value(self, p):
"""Returns InverseCDF(p), the value that corresponds to probability p.
Args:
p: number in the range [0, 1]
Returns:
number value
"""
if p < 0 or p > 1:
raise ValueError('Probability p must be in range [0, 1]')
index = bisect.bisect_left(self.ps, p)
return self.xs[index]
def Values(self, ps=None):
"""Returns InverseCDF(p), the value that corresponds to probability p.
If ps is not provided, returns all values.
Args:
ps: NumPy array of numbers in the range [0, 1]
Returns:
NumPy array of values
"""
if ps is None:
return self.xs
ps = np.asarray(ps)
if np.any(ps < 0) or np.any(ps > 1):
raise ValueError('Probability p must be in range [0, 1]')
index = np.searchsorted(self.ps, ps, side='left')
return self.xs[index]
ValueArray = Values
def Percentile(self, p):
"""Returns the value that corresponds to percentile p.
Args:
p: number in the range [0, 100]
Returns:
number value
"""
return self.Value(p / 100)
def Percentiles(self, ps):
"""Returns the value that corresponds to percentiles ps.
Args:
ps: numbers in the range [0, 100]
Returns:
array of values
"""
ps = np.asarray(ps)
return self.Values(ps / 100)
def PercentileRank(self, x):
"""Returns the percentile rank of the value x.
x: potential value in the CDF
returns: percentile rank in the range 0 to 100
"""
return self.Prob(x) * 100
def PercentileRanks(self, xs):
"""Returns the percentile ranks of the values in xs.
xs: potential value in the CDF
returns: array of percentile ranks in the range 0 to 100
"""
return self.Probs(x) * 100
def Random(self):
"""Chooses a random value from this distribution."""
return self.Value(random.random())
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int length of the sample
returns: NumPy array
"""
ps = np.random.random(n)
return self.ValueArray(ps)
def Mean(self):
"""Computes the mean of a CDF.
Returns:
float mean
"""
old_p = 0
total = 0
for x, new_p in zip(self.xs, self.ps):
p = new_p - old_p
total += p * x
old_p = new_p
return total
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
prob = (1 - percentage / 100) / 2
interval = self.Value(prob), self.Value(1 - prob)
return interval
ConfidenceInterval = CredibleInterval
def _Round(self, multiplier=1000):
"""
An entry is added to the cdf only if the percentile differs
from the previous value in a significant digit, where the number
of significant digits is determined by multiplier. The
default is 1000, which keeps log10(1000) = 3 significant digits.
"""
# TODO(write this method)
raise UnimplementedMethodException()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
An empirical CDF is a step function; linear interpolation
can be misleading.
Note: options are ignored
Returns:
tuple of (xs, ps)
"""
def interleave(a, b):
c = np.empty(a.shape[0] + b.shape[0])
c[::2] = a
c[1::2] = b
return c
a = np.array(self.xs)
xs = interleave(a, a)
shift_ps = np.roll(self.ps, 1)
shift_ps[0] = 0
ps = interleave(shift_ps, self.ps)
return xs, ps
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.Copy()
cdf.ps **= k
return cdf
def MakeCdfFromItems(items, label=None):
"""Makes a cdf from an unsorted sequence of (value, frequency) pairs.
Args:
items: unsorted sequence of (value, frequency) pairs
label: string label for this CDF
Returns:
cdf: list of (value, fraction) pairs
"""
return Cdf(dict(items), label=label)
def MakeCdfFromDict(d, label=None):
"""Makes a CDF from a dictionary that maps values to frequencies.
Args:
d: dictionary that maps values to frequencies.
label: string label for the data.
Returns:
Cdf object
"""
return Cdf(d, label=label)
def MakeCdfFromList(seq, label=None):
"""Creates a CDF from an unsorted sequence.
Args:
seq: unsorted sequence of sortable values
label: string label for the cdf
Returns:
Cdf object
"""
return Cdf(seq, label=label)
def MakeCdfFromHist(hist, label=None):
"""Makes a CDF from a Hist object.
Args:
hist: Pmf.Hist object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = hist.label
return Cdf(hist, label=label)
def MakeCdfFromPmf(pmf, label=None):
"""Makes a CDF from a Pmf object.
Args:
pmf: Pmf.Pmf object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = pmf.label
return Cdf(pmf, label=label)
class UnimplementedMethodException(Exception):
"""Exception if someone calls a method that should be overridden."""
class Suite(Pmf):
"""Represents a suite of hypotheses and their probabilities."""
def Update(self, data):
"""Updates each hypothesis based on the data.
data: any representation of the data
returns: the normalizing constant
"""
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def LogUpdate(self, data):
"""Updates a suite of hypotheses based on new data.
Modifies the suite directly; if you want to keep the original, make
a copy.
Note: unlike Update, LogUpdate does not normalize.
Args:
data: any representation of the data
"""
for hypo in self.Values():
like = self.LogLikelihood(data, hypo)
self.Incr(hypo, like)
def UpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
This is more efficient than calling Update repeatedly because
it waits until the end to Normalize.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: the normalizing constant
"""
for data in dataset:
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def LogUpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: None
"""
for data in dataset:
self.LogUpdate(data)
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def LogLikelihood(self, data, hypo):
"""Computes the log likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def Print(self):
"""Prints the hypotheses and their probabilities."""
for hypo, prob in sorted(self.Items()):
print(hypo, prob)
def MakeOdds(self):
"""Transforms from probabilities to odds.
Values with prob=0 are removed.
"""
for hypo, prob in self.Items():
if prob:
self.Set(hypo, Odds(prob))
else:
self.Remove(hypo)
def MakeProbs(self):
"""Transforms from odds to probabilities."""
for hypo, odds in self.Items():
self.Set(hypo, Probability(odds))
def MakeSuiteFromList(t, label=None):
"""Makes a suite from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this suite
Returns:
Suite object
"""
hist = MakeHistFromList(t, label=label)
d = hist.GetDict()
return MakeSuiteFromDict(d)
def MakeSuiteFromHist(hist, label=None):
"""Makes a normalized suite from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Suite object
"""
if label is None:
label = hist.label
# make a copy of the dictionary
d = dict(hist.GetDict())
return MakeSuiteFromDict(d, label)
def MakeSuiteFromDict(d, label=None):
"""Makes a suite from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this suite
Returns:
Suite object
"""
suite = Suite(label=label)
suite.SetDict(d)
suite.Normalize()
return suite
class Pdf(object):
"""Represents a probability density function (PDF)."""
def Density(self, x):
"""Evaluates this Pdf at x.
Returns: float or NumPy array of probability density
"""
raise UnimplementedMethodException()
def GetLinspace(self):
"""Get a linspace for plotting.
Not all subclasses of Pdf implement this.
Returns: numpy array
"""
raise UnimplementedMethodException()
def MakePmf(self, **options):
"""Makes a discrete version of this Pdf.
options can include
label: string
low: low end of range
high: high end of range
n: number of places to evaluate
Returns: new Pmf
"""
label = options.pop('label', '')
xs, ds = self.Render(**options)
return Pmf(dict(zip(xs, ds)), label=label)
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
If options includes low and high, it must also include n;
in that case the density is evaluated an n locations between
low and high, including both.
If options includes xs, the density is evaluate at those location.
Otherwise, self.GetLinspace is invoked to provide the locations.
Returns:
tuple of (xs, densities)
"""
low, high = options.pop('low', None), options.pop('high', None)
if low is not None and high is not None:
n = options.pop('n', 101)
xs = np.linspace(low, high, n)
else:
xs = options.pop('xs', None)
if xs is None:
xs = self.GetLinspace()
ds = self.Density(xs)
return xs, ds
def Items(self):
"""Generates a sequence of (value, probability) pairs.
"""
return zip(*self.Render())
class NormalPdf(Pdf):
"""Represents the PDF of a Normal distribution."""
def __init__(self, mu=0, sigma=1, label=None):
"""Constructs a Normal Pdf with given mu and sigma.
mu: mean
sigma: standard deviation
label: string
"""
self.mu = mu
self.sigma = sigma
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'NormalPdf(%f, %f)' % (self.mu, self.sigma)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = self.mu-3*self.sigma, self.mu+3*self.sigma
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return stats.norm.pdf(xs, self.mu, self.sigma)
class ExponentialPdf(Pdf):
"""Represents the PDF of an exponential distribution."""
def __init__(self, lam=1, label=None):
"""Constructs an exponential Pdf with given parameter.
lam: rate parameter
label: string
"""
self.lam = lam
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'ExponentialPdf(%f)' % (self.lam)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = 0, 5.0/self.lam
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return stats.expon.pdf(xs, scale=1.0/self.lam)
class EstimatedPdf(Pdf):
"""Represents a PDF estimated by KDE."""
def __init__(self, sample, label=None):
"""Estimates the density function based on a sample.
sample: sequence of data
label: string
"""
self.label = label if label is not None else '_nolegend_'
self.kde = stats.gaussian_kde(sample)
low = min(sample)
high = max(sample)
self.linspace = np.linspace(low, high, 101)
def __str__(self):
return 'EstimatedPdf(label=%s)' % str(self.label)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
return self.linspace
def Density(self, xs):
"""Evaluates this Pdf at xs.
returns: float or NumPy array of probability density
"""
return self.kde.evaluate(xs)
def Sample(self, n):
"""Generates a random sample from the estimated Pdf.
n: size of sample
"""
# NOTE: we have to flatten because resample returns a 2-D
# array for some reason.
return self.kde.resample(n).flatten()
def CredibleInterval(pmf, percentage=90):
"""Computes a credible interval for a given distribution.
If percentage=90, computes the 90% CI.
Args:
pmf: Pmf object representing a posterior distribution
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = pmf.MakeCdf()
prob = (1 - percentage / 100) / 2
interval = cdf.Value(prob), cdf.Value(1 - prob)
return interval
def PmfProbLess(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 < v2:
total += p1 * p2
return total
def PmfProbGreater(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 > v2:
total += p1 * p2
return total
def PmfProbEqual(pmf1, pmf2):
"""Probability that a value from pmf1 equals a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 == v2:
total += p1 * p2
return total
def RandomSum(dists):
"""Chooses a random value from each dist and returns the sum.
dists: sequence of Pmf or Cdf objects
returns: numerical sum
"""
total = sum(dist.Random() for dist in dists)
return total
def SampleSum(dists, n):
"""Draws a sample of sums from a list of distributions.
dists: sequence of Pmf or Cdf objects
n: sample size
returns: new Pmf of sums
"""
pmf = Pmf(RandomSum(dists) for i in range(n))
return pmf
def EvalNormalPdf(x, mu, sigma):
"""Computes the unnormalized PDF of the normal distribution.
x: value
mu: mean
sigma: standard deviation
returns: float probability density
"""
return stats.norm.pdf(x, mu, sigma)
def MakeNormalPmf(mu, sigma, num_sigmas, n=201):
"""Makes a PMF discrete approx to a Normal distribution.
mu: float mean
sigma: float standard deviation
num_sigmas: how many sigmas to extend in each direction
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
low = mu - num_sigmas * sigma
high = mu + num_sigmas * sigma
for x in np.linspace(low, high, n):
p = EvalNormalPdf(x, mu, sigma)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def EvalBinomialPmf(k, n, p):
"""Evaluates the binomial PMF.
Returns the probabily of k successes in n trials with probability p.
"""
return stats.binom.pmf(k, n, p)
def MakeBinomialPmf(n, p):
"""Evaluates the binomial PMF.
Returns the distribution of successes in n trials with probability p.
"""
pmf = Pmf()
for k in range(n+1):
pmf[k] = stats.binom.pmf(k, n, p)
return pmf
def EvalGammaPdf(x, a):
"""Computes the Gamma PDF.
x: where to evaluate the PDF
a: parameter of the gamma distribution
returns: float probability
"""
return x**(a-1) * np.exp(-x) / gamma(a)
def MakeGammaPmf(xs, a):
"""Makes a PMF discrete approx to a Gamma distribution.
lam: parameter lambda in events per unit time
xs: upper bound of the Pmf
returns: normalized Pmf
"""
xs = np.asarray(xs)
ps = EvalGammaPdf(xs, a)
pmf = Pmf(dict(zip(xs, ps)))
pmf.Normalize()
return pmf
def EvalGeometricPmf(k, p, loc=0):
"""Evaluates the geometric PMF.
With loc=0: Probability of `k` trials to get one success.
With loc=-1: Probability of `k` trials before first success.
k: number of trials
p: probability of success on each trial
"""
return stats.geom.pmf(k, p, loc=loc)
def MakeGeometricPmf(p, loc=0, high=10):
"""Evaluates the binomial PMF.
With loc=0: PMF of trials to get one success.
With loc=-1: PMF of trials before first success.
p: probability of success
high: upper bound where PMF is truncated
"""
pmf = Pmf()
for k in range(high):
pmf[k] = stats.geom.pmf(k, p, loc=loc)
pmf.Normalize()
return pmf
def EvalHypergeomPmf(k, N, K, n):
"""Evaluates the hypergeometric PMF.
Returns the probabily of k successes in n trials from a population
N with K successes in it.
"""
return stats.hypergeom.pmf(k, N, K, n)
def EvalPoissonPmf(k, lam):
"""Computes the Poisson PMF.
k: number of events
lam: parameter lambda in events per unit time
returns: float probability
"""
return stats.poisson.pmf(k, lam)
def MakePoissonPmf(lam, high, step=1):
"""Makes a PMF discrete approx to a Poisson distribution.
lam: parameter lambda in events per unit time
high: upper bound of the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for k in range(0, high + 1, step):
p = stats.poisson.pmf(k, lam)
pmf.Set(k, p)
pmf.Normalize()
return pmf
def EvalExponentialPdf(x, lam):
"""Computes the exponential PDF.
x: value
lam: parameter lambda in events per unit time
returns: float probability density
"""
return lam * math.exp(-lam * x)
def EvalExponentialCdf(x, lam):
"""Evaluates CDF of the exponential distribution with parameter lam."""
return 1 - math.exp(-lam * x)
def MakeExponentialPmf(lam, high, n=200):
"""Makes a PMF discrete approx to an exponential distribution.
lam: parameter lambda in events per unit time
high: upper bound
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for x in np.linspace(0, high, n):
p = EvalExponentialPdf(x, lam)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def EvalWeibullPdf(x, lam, k):
"""Computes the Weibull PDF.
x: value
lam: parameter lambda in events per unit time
k: parameter
returns: float probability density
"""
arg = (x / lam)
return k / lam * arg**(k-1) * np.exp(-arg**k)
def EvalWeibullCdf(x, lam, k):
"""Evaluates CDF of the Weibull distribution."""
arg = (x / lam)
return 1 - np.exp(-arg**k)
def MakeWeibullPmf(lam, k, high, n=200):
"""Makes a PMF discrete approx to a Weibull distribution.
lam: parameter lambda in events per unit time
k: parameter
high: upper bound
n: number of values in the Pmf
returns: normalized Pmf
"""
xs = np.linspace(0, high, n)
ps = EvalWeibullPdf(xs, lam, k)
ps[np.isinf(ps)] = 0
return Pmf(dict(zip(xs, ps)))
def EvalParetoPdf(x, xm, alpha):
"""Computes the Pareto.
xm: minimum value (scale parameter)
alpha: shape parameter
returns: float probability density
"""
return stats.pareto.pdf(x, alpha, scale=xm)
def MakeParetoPmf(xm, alpha, high, num=101):
"""Makes a PMF discrete approx to a Pareto distribution.
xm: minimum value (scale parameter)
alpha: shape parameter
high: upper bound value
num: number of values
returns: normalized Pmf
"""
xs = np.linspace(xm, high, num)
ps = stats.pareto.pdf(xs, alpha, scale=xm)
pmf = Pmf(dict(zip(xs, ps)))
return pmf
def StandardNormalCdf(x):
"""Evaluates the CDF of the standard Normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution
#Cumulative_distribution_function
Args:
x: float
Returns:
float
"""
return (math.erf(x / ROOT2) + 1) / 2
def EvalNormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the normal distribution.
Args:
x: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return stats.norm.cdf(x, loc=mu, scale=sigma)
def EvalNormalCdfInverse(p, mu=0, sigma=1):
"""Evaluates the inverse CDF of the normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution#Quantile_function
Args:
p: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return stats.norm.ppf(p, loc=mu, scale=sigma)
def EvalLognormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the lognormal distribution.
x: float or sequence
mu: mean parameter
sigma: standard deviation parameter
Returns: float or sequence
"""
return stats.lognorm.cdf(x, loc=mu, scale=sigma)
def RenderExpoCdf(lam, low, high, n=101):
"""Generates sequences of xs and ps for an exponential CDF.
lam: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = 1 - np.exp(-lam * xs)
#ps = stats.expon.cdf(xs, scale=1.0/lam)
return xs, ps
def RenderNormalCdf(mu, sigma, low, high, n=101):
"""Generates sequences of xs and ps for a Normal CDF.
mu: parameter
sigma: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = stats.norm.cdf(xs, mu, sigma)
return xs, ps
def RenderParetoCdf(xmin, alpha, low, high, n=50):
"""Generates sequences of xs and ps for a Pareto CDF.
xmin: parameter
alpha: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
if low < xmin:
low = xmin
xs = np.linspace(low, high, n)
ps = 1 - (xs / xmin) ** -alpha
#ps = stats.pareto.cdf(xs, scale=xmin, b=alpha)
return xs, ps
class Beta:
"""Represents a Beta distribution.
See http://en.wikipedia.org/wiki/Beta_distribution
"""
def __init__(self, alpha=1, beta=1, label=None):
"""Initializes a Beta distribution."""
self.alpha = alpha
self.beta = beta
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Beta distribution.
data: pair of int (heads, tails)
"""
heads, tails = data
self.alpha += heads
self.beta += tails
def Mean(self):
"""Computes the mean of this distribution."""
return self.alpha / (self.alpha + self.beta)
def MAP(self):
"""Computes the value with maximum a posteori probability."""
a = self.alpha - 1
b = self.beta - 1
return a / (a + b)
def Random(self):
"""Generates a random variate from this distribution."""
return random.betavariate(self.alpha, self.beta)
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int sample size
"""
size = n,
return np.random.beta(self.alpha, self.beta, size)
def EvalPdf(self, x):
"""Evaluates the PDF at x."""
return x ** (self.alpha - 1) * (1 - x) ** (self.beta - 1)
def MakePmf(self, steps=101, label=None):
"""Returns a Pmf of this distribution.
Note: Normally, we just evaluate the PDF at a sequence
of points and treat the probability density as a probability
mass.
But if alpha or beta is less than one, we have to be
more careful because the PDF goes to infinity at x=0
and x=1. In that case we evaluate the CDF and compute
differences.
The result is a little funny, because the values at 0 and 1
are not symmetric. Nevertheless, it is a reasonable discrete
model of the continuous distribution, and behaves well as
the number of values increases.
"""
if label is None and self.label is not None:
label = self.label
if self.alpha < 1 or self.beta < 1:
cdf = self.MakeCdf()
pmf = cdf.MakePmf()
return pmf
xs = [i / (steps - 1.0) for i in range(steps)]
probs = [self.EvalPdf(x) for x in xs]
pmf = Pmf(dict(zip(xs, probs)), label=label)
return pmf
def MakeCdf(self, steps=101):
"""Returns the CDF of this distribution."""
xs = [i / (steps - 1.0) for i in range(steps)]
ps = special.betainc(self.alpha, self.beta, xs)
cdf = Cdf(xs, ps)
return cdf
def Percentile(self, ps):
"""Returns the given percentiles from this distribution.
ps: scalar, array, or list of [0-100]
"""
ps = np.asarray(ps) / 100
xs = special.betaincinv(self.alpha, self.beta, ps)
return xs
class Dirichlet(object):
"""Represents a Dirichlet distribution.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
"""
def __init__(self, n, conc=1, label=None):
"""Initializes a Dirichlet distribution.
n: number of dimensions
conc: concentration parameter (smaller yields more concentration)
label: string label
"""
if n < 2:
raise ValueError('A Dirichlet distribution with '
'n<2 makes no sense')
self.n = n
self.params = np.ones(n, dtype=np.float) * conc
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Dirichlet distribution.
data: sequence of observations, in order corresponding to params
"""
m = len(data)
self.params[:m] += data
def Random(self):
"""Generates a random variate from this distribution.
Returns: normalized vector of fractions
"""
p = np.random.gamma(self.params)
return p / p.sum()
def Likelihood(self, data):
"""Computes the likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float probability
"""
m = len(data)
if self.n < m:
return 0
x = data
p = self.Random()
q = p[:m] ** x
return q.prod()
def LogLikelihood(self, data):
"""Computes the log likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float log probability
"""
m = len(data)
if self.n < m:
return float('-inf')
x = self.Random()
y = np.log(x[:m]) * data
return y.sum()
def MarginalBeta(self, i):
"""Computes the marginal distribution of the ith element.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
#Marginal_distributions
i: int
Returns: Beta object
"""
alpha0 = self.params.sum()
alpha = self.params[i]
return Beta(alpha, alpha0 - alpha)
def PredictivePmf(self, xs, label=None):
"""Makes a predictive distribution.
xs: values to go into the Pmf
Returns: Pmf that maps from x to the mean prevalence of x
"""
alpha0 = self.params.sum()
ps = self.params / alpha0
return Pmf(zip(xs, ps), label=label)
def BinomialCoef(n, k):
"""Compute the binomial coefficient "n choose k".
n: number of trials
k: number of successes
Returns: float
"""
return scipy.misc.comb(n, k)
def LogBinomialCoef(n, k):
"""Computes the log of the binomial coefficient.
http://math.stackexchange.com/questions/64716/
approximating-the-logarithm-of-the-binomial-coefficient
n: number of trials
k: number of successes
Returns: float
"""
return n * math.log(n) - k * math.log(k) - (n - k) * math.log(n - k)
def NormalProbability(ys, jitter=0):
"""Generates data for a normal probability plot.
ys: sequence of values
jitter: float magnitude of jitter added to the ys
returns: numpy arrays xs, ys
"""
n = len(ys)
xs = np.random.normal(0, 1, n)
xs.sort()
if jitter:
ys = Jitter(ys, jitter)
else:
ys = np.array(ys)
ys.sort()
return xs, ys
def Jitter(values, jitter=0.5):
"""Jitters the values by adding a uniform variate in (-jitter, jitter).
values: sequence
jitter: scalar magnitude of jitter
returns: new numpy array
"""
n = len(values)
return np.random.normal(0, jitter, n) + values
def NormalProbabilityPlot(sample, fit_color='0.8', **options):
"""Makes a normal probability plot with a fitted line.
sample: sequence of numbers
fit_color: color string for the fitted line
options: passed along to Plot
"""
xs, ys = NormalProbability(sample)
mean, var = MeanVar(sample)
std = math.sqrt(var)
fit = FitLine(xs, mean, std)
thinkplot.Plot(*fit, color=fit_color, label='model')
xs, ys = NormalProbability(sample)
thinkplot.Plot(xs, ys, **options)
def Mean(xs):
"""Computes mean.
xs: sequence of values
returns: float mean
"""
return np.mean(xs)
def Var(xs, mu=None, ddof=0):
"""Computes variance.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
xs = np.asarray(xs)
if mu is None:
mu = xs.mean()
ds = xs - mu
return np.dot(ds, ds) / (len(xs) - ddof)
def Std(xs, mu=None, ddof=0):
"""Computes standard deviation.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
var = Var(xs, mu, ddof)
return math.sqrt(var)
def MeanVar(xs, ddof=0):
"""Computes mean and variance.
Based on http://stackoverflow.com/questions/19391149/
numpy-mean-and-variance-from-single-function
xs: sequence of values
ddof: delta degrees of freedom
returns: pair of float, mean and var
"""
xs = np.asarray(xs)
mean = xs.mean()
s2 = Var(xs, mean, ddof)
return mean, s2
def Trim(t, p=0.01):
"""Trims the largest and smallest elements of t.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
sequence of values
"""
n = int(p * len(t))
t = sorted(t)[n:-n]
return t
def TrimmedMean(t, p=0.01):
"""Computes the trimmed mean of a sequence of numbers.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
return Mean(t)
def TrimmedMeanVar(t, p=0.01):
"""Computes the trimmed mean and variance of a sequence of numbers.
Side effect: sorts the list.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
mu, var = MeanVar(t)
return mu, var
def CohenEffectSize(group1, group2):
"""Compute Cohen's d.
group1: Series or NumPy array
group2: Series or NumPy array
returns: float
"""
diff = group1.mean() - group2.mean()
n1, n2 = len(group1), len(group2)
var1 = group1.var()
var2 = group2.var()
pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2)
d = diff / math.sqrt(pooled_var)
return d
def Cov(xs, ys, meanx=None, meany=None):
"""Computes Cov(X, Y).
Args:
xs: sequence of values
ys: sequence of values
meanx: optional float mean of xs
meany: optional float mean of ys
Returns:
Cov(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
if meanx is None:
meanx = np.mean(xs)
if meany is None:
meany = np.mean(ys)
cov = np.dot(xs-meanx, ys-meany) / len(xs)
return cov
def Corr(xs, ys):
"""Computes Corr(X, Y).
Args:
xs: sequence of values
ys: sequence of values
Returns:
Corr(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
meanx, varx = MeanVar(xs)
meany, vary = MeanVar(ys)
corr = Cov(xs, ys, meanx, meany) / math.sqrt(varx * vary)
return corr
def SerialCorr(series, lag=1):
"""Computes the serial correlation of a series.
series: Series
lag: integer number of intervals to shift
returns: float correlation
"""
xs = series[lag:]
ys = series.shift(lag)[lag:]
corr = Corr(xs, ys)
return corr
def SpearmanCorr(xs, ys):
"""Computes Spearman's rank correlation.
Args:
xs: sequence of values
ys: sequence of values
Returns:
float Spearman's correlation
"""
xranks = pandas.Series(xs).rank()
yranks = pandas.Series(ys).rank()
return Corr(xranks, yranks)
def MapToRanks(t):
"""Returns a list of ranks corresponding to the elements in t.
Args:
t: sequence of numbers
Returns:
list of integer ranks, starting at 1
"""
# pair up each value with its index
pairs = enumerate(t)
# sort by value
sorted_pairs = sorted(pairs, key=itemgetter(1))
# pair up each pair with its rank
ranked = enumerate(sorted_pairs)
# sort by index
resorted = sorted(ranked, key=lambda trip: trip[1][0])
# extract the ranks
ranks = [trip[0]+1 for trip in resorted]
return ranks
def LeastSquares(xs, ys):
"""Computes a linear least squares fit for ys as a function of xs.
Args:
xs: sequence of values
ys: sequence of values
Returns:
tuple of (intercept, slope)
"""
meanx, varx = MeanVar(xs)
meany = Mean(ys)
slope = Cov(xs, ys, meanx, meany) / varx
inter = meany - slope * meanx
return inter, slope
def FitLine(xs, inter, slope):
"""Fits a line to the given data.
xs: sequence of x
returns: tuple of numpy arrays (sorted xs, fit ys)
"""
fit_xs = np.sort(xs)
fit_ys = inter + slope * fit_xs
return fit_xs, fit_ys
def Residuals(xs, ys, inter, slope):
"""Computes residuals for a linear fit with parameters inter and slope.
Args:
xs: independent variable
ys: dependent variable
inter: float intercept
slope: float slope
Returns:
list of residuals
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
res = ys - (inter + slope * xs)
return res
def CoefDetermination(ys, res):
"""Computes the coefficient of determination (R^2) for given residuals.
Args:
ys: dependent variable
res: residuals
Returns:
float coefficient of determination
"""
return 1 - Var(res) / Var(ys)
def CorrelatedGenerator(rho):
"""Generates standard normal variates with serial correlation.
rho: target coefficient of correlation
Returns: iterable
"""
x = random.gauss(0, 1)
yield x
sigma = math.sqrt(1 - rho**2)
while True:
x = random.gauss(x * rho, sigma)
yield x
def CorrelatedNormalGenerator(mu, sigma, rho):
"""Generates normal variates with serial correlation.
mu: mean of variate
sigma: standard deviation of variate
rho: target coefficient of correlation
Returns: iterable
"""
for x in CorrelatedGenerator(rho):
yield x * sigma + mu
def RawMoment(xs, k):
"""Computes the kth raw moment of xs.
"""
return sum(x**k for x in xs) / len(xs)
def CentralMoment(xs, k):
"""Computes the kth central moment of xs.
"""
mean = RawMoment(xs, 1)
return sum((x - mean)**k for x in xs) / len(xs)
def StandardizedMoment(xs, k):
"""Computes the kth standardized moment of xs.
"""
var = CentralMoment(xs, 2)
std = math.sqrt(var)
return CentralMoment(xs, k) / std**k
def Skewness(xs):
"""Computes skewness.
"""
return StandardizedMoment(xs, 3)
def Median(xs):
"""Computes the median (50th percentile) of a sequence.
xs: sequence or anything else that can initialize a Cdf
returns: float
"""
cdf = Cdf(xs)
return cdf.Value(0.5)
def IQR(xs):
"""Computes the interquartile of a sequence.
xs: sequence or anything else that can initialize a Cdf
returns: pair of floats
"""
cdf = Cdf(xs)
return cdf.Value(0.25), cdf.Value(0.75)
def PearsonMedianSkewness(xs):
"""Computes the Pearson median skewness.
"""
median = Median(xs)
mean = RawMoment(xs, 1)
var = CentralMoment(xs, 2)
std = math.sqrt(var)
gp = 3 * (mean - median) / std
return gp
class FixedWidthVariables(object):
"""Represents a set of variables in a fixed width file."""
def __init__(self, variables, index_base=0):
"""Initializes.
variables: DataFrame
index_base: are the indices 0 or 1 based?
Attributes:
colspecs: list of (start, end) index tuples
names: list of string variable names
"""
self.variables = variables
# note: by default, subtract 1 from colspecs
self.colspecs = variables[['start', 'end']] - index_base
# convert colspecs to a list of pair of int
self.colspecs = self.colspecs.astype(np.int).values.tolist()
self.names = variables['name']
def ReadFixedWidth(self, filename, **options):
"""Reads a fixed width ASCII file.
filename: string filename
returns: DataFrame
"""
df = pandas.read_fwf(filename,
colspecs=self.colspecs,
names=self.names,
**options)
return df
def ReadStataDct(dct_file, **options):
"""Reads a Stata dictionary file.
dct_file: string filename
options: dict of options passed to open()
returns: FixedWidthVariables object
"""
type_map = dict(byte=int, int=int, long=int, float=float,
double=float, numeric=float)
var_info = []
with open(dct_file, **options) as f:
for line in f:
match = re.search( r'_column\(([^)]*)\)', line)
if not match:
continue
start = int(match.group(1))
t = line.split()
vtype, name, fstring = t[1:4]
name = name.lower()
if vtype.startswith('str'):
vtype = str
else:
vtype = type_map[vtype]
long_desc = ' '.join(t[4:]).strip('"')
var_info.append((start, vtype, name, fstring, long_desc))
columns = ['start', 'type', 'name', 'fstring', 'desc']
variables = pandas.DataFrame(var_info, columns=columns)
# fill in the end column by shifting the start column
variables['end'] = variables.start.shift(-1)
variables.loc[len(variables)-1, 'end'] = 0
dct = FixedWidthVariables(variables, index_base=1)
return dct
def Resample(xs, n=None):
"""Draw a sample from xs with the same length as xs.
xs: sequence
n: sample size (default: len(xs))
returns: NumPy array
"""
if n is None:
n = len(xs)
return np.random.choice(xs, n, replace=True)
def SampleRows(df, nrows, replace=False):
"""Choose a sample of rows from a DataFrame.
df: DataFrame
nrows: number of rows
replace: whether to sample with replacement
returns: DataDf
"""
indices = np.random.choice(df.index, nrows, replace=replace)
sample = df.loc[indices]
return sample
def ResampleRows(df):
"""Resamples rows from a DataFrame.
df: DataFrame
returns: DataFrame
"""
return SampleRows(df, len(df), replace=True)
def ResampleRowsWeighted(df, column='finalwgt'):
"""Resamples a DataFrame using probabilities proportional to given column.
df: DataFrame
column: string column name to use as weights
returns: DataFrame
"""
weights = df[column].copy()
weights /= sum(weights)
indices = np.random.choice(df.index, len(df), replace=True, p=weights)
sample = df.loc[indices]
return sample
def PercentileRow(array, p):
"""Selects the row from a sorted array that maps to percentile p.
p: float 0--100
returns: NumPy array (one row)
"""
rows, cols = array.shape
index = int(rows * p / 100)
return array[index,]
def PercentileRows(ys_seq, percents):
"""Given a collection of lines, selects percentiles along vertical axis.
For example, if ys_seq contains simulation results like ys as a
function of time, and percents contains (5, 95), the result would
be a 90% CI for each vertical slice of the simulation results.
ys_seq: sequence of lines (y values)
percents: list of percentiles (0-100) to select
returns: list of NumPy arrays, one for each percentile
"""
nrows = len(ys_seq)
ncols = len(ys_seq[0])
array = np.zeros((nrows, ncols))
for i, ys in enumerate(ys_seq):
array[i,] = ys
array = np.sort(array, axis=0)
rows = [PercentileRow(array, p) for p in percents]
return rows
def Smooth(xs, sigma=2, **options):
"""Smooths a NumPy array with a Gaussian filter.
xs: sequence
sigma: standard deviation of the filter
"""
return ndimage.filters.gaussian_filter1d(xs, sigma, **options)
class HypothesisTest(object):
"""Represents a hypothesis test."""
def __init__(self, data):
"""Initializes.
data: data in whatever form is relevant
"""
self.data = data
self.MakeModel()
self.actual = self.TestStatistic(data)
self.test_stats = None
self.test_cdf = None
def PValue(self, iters=1000):
"""Computes the distribution of the test statistic and p-value.
iters: number of iterations
returns: float p-value
"""
self.test_stats = [self.TestStatistic(self.RunModel())
for _ in range(iters)]
self.test_cdf = Cdf(self.test_stats)
count = sum(1 for x in self.test_stats if x >= self.actual)
return count / iters
def MaxTestStat(self):
"""Returns the largest test statistic seen during simulations.
"""
return max(self.test_stats)
def PlotCdf(self, label=None):
"""Draws a Cdf with vertical lines at the observed test stat.
"""
def VertLine(x):
"""Draws a vertical line at x."""
thinkplot.Plot([x, x], [0, 1], color='0.8')
VertLine(self.actual)
thinkplot.Cdf(self.test_cdf, label=label)
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
raise UnimplementedMethodException()
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
pass
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
raise UnimplementedMethodException()
def main():
pass
if __name__ == '__main__':
main()
| AllenDowney/MarriageNSFG | thinkstats2.py | Python | mit | 75,264 | 0.000864 |
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for notification command."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import re
import time
import uuid
import boto
from gslib.cloud_api_delegator import CloudApiDelegator
import gslib.tests.testcase as testcase
from gslib.tests.util import ObjectToURI as suri
from gslib.tests.util import unittest
from gslib.utils.retry_util import Retry
from six import add_move, MovedModule
add_move(MovedModule('mock', 'mock', 'unittest.mock'))
from six.moves import mock
def _LoadNotificationUrl():
return boto.config.get_value('GSUtil', 'test_notification_url')
NOTIFICATION_URL = _LoadNotificationUrl()
class TestNotificationUnit(testcase.GsUtilUnitTestCase):
@mock.patch.object(CloudApiDelegator,
'CreateNotificationConfig',
autospec=True)
def test_notification_splits_dash_m_value_correctly(self,
mock_create_notification):
bucket_uri = self.CreateBucket(bucket_name='foo_notification')
stdout = self.RunCommand(
'notification',
['create', '-f', 'none', '-s', '-m', 'foo:bar:baz',
suri(bucket_uri)],
return_stdout=True)
mock_create_notification.assert_called_once_with(
mock.ANY, # Client instance.
'foo_notification',
pubsub_topic=mock.ANY,
payload_format=mock.ANY,
custom_attributes={'foo': 'bar:baz'},
event_types=None,
object_name_prefix=mock.ANY,
provider=mock.ANY)
class TestNotification(testcase.GsUtilIntegrationTestCase):
"""Integration tests for notification command."""
@unittest.skipUnless(NOTIFICATION_URL,
'Test requires notification URL configuration.')
def test_watch_bucket(self):
"""Tests creating a notification channel on a bucket."""
bucket_uri = self.CreateBucket()
self.RunGsUtil(
['notification', 'watchbucket', NOTIFICATION_URL,
suri(bucket_uri)])
identifier = str(uuid.uuid4())
token = str(uuid.uuid4())
stderr = self.RunGsUtil([
'notification', 'watchbucket', '-i', identifier, '-t', token,
NOTIFICATION_URL,
suri(bucket_uri)
],
return_stderr=True)
self.assertIn('token: %s' % token, stderr)
self.assertIn('identifier: %s' % identifier, stderr)
@unittest.skipUnless(NOTIFICATION_URL,
'Test requires notification URL configuration.')
def test_stop_channel(self):
"""Tests stopping a notification channel on a bucket."""
bucket_uri = self.CreateBucket()
stderr = self.RunGsUtil(
['notification', 'watchbucket', NOTIFICATION_URL,
suri(bucket_uri)],
return_stderr=True)
channel_id = re.findall(r'channel identifier: (?P<id>.*)', stderr)
self.assertEqual(len(channel_id), 1)
resource_id = re.findall(r'resource identifier: (?P<id>.*)', stderr)
self.assertEqual(len(resource_id), 1)
channel_id = channel_id[0]
resource_id = resource_id[0]
self.RunGsUtil(['notification', 'stopchannel', channel_id, resource_id])
@unittest.skipUnless(NOTIFICATION_URL,
'Test requires notification URL configuration.')
def test_list_one_channel(self):
"""Tests listing notification channel on a bucket."""
# TODO(b/132277269): Re-enable these once the service-side bug is fixed.
return unittest.skip('Functionality has been disabled due to b/132277269')
bucket_uri = self.CreateBucket()
# Set up an OCN (object change notification) on the newly created bucket.
self.RunGsUtil(
['notification', 'watchbucket', NOTIFICATION_URL,
suri(bucket_uri)],
return_stderr=False)
# The OCN listing in the service is eventually consistent. In initial
# tests, it almost never was ready immediately after calling WatchBucket
# above, so we A) sleep for a few seconds before the first OCN listing
# attempt, and B) wrap the OCN listing attempt in retry logic in case
# it raises a BucketNotFoundException (note that RunGsUtil will raise this
# as an AssertionError due to the exit status not being 0).
@Retry(AssertionError, tries=3, timeout_secs=5)
def _ListObjectChangeNotifications():
stderr = self.RunGsUtil(['notification', 'list', '-o',
suri(bucket_uri)],
return_stderr=True)
return stderr
time.sleep(5)
stderr = _ListObjectChangeNotifications()
channel_id = re.findall(r'Channel identifier: (?P<id>.*)', stderr)
self.assertEqual(len(channel_id), 1)
resource_id = re.findall(r'Resource identifier: (?P<id>.*)', stderr)
self.assertEqual(len(resource_id), 1)
push_url = re.findall(r'Application URL: (?P<id>.*)', stderr)
self.assertEqual(len(push_url), 1)
subscriber_email = re.findall(r'Created by: (?P<id>.*)', stderr)
self.assertEqual(len(subscriber_email), 1)
creation_time = re.findall(r'Creation time: (?P<id>.*)', stderr)
self.assertEqual(len(creation_time), 1)
def test_invalid_subcommand(self):
stderr = self.RunGsUtil(['notification', 'foo', 'bar', 'baz'],
return_stderr=True,
expected_status=1)
self.assertIn('Invalid subcommand', stderr)
| GoogleCloudPlatform/gsutil | gslib/tests/test_notification.py | Python | apache-2.0 | 6,042 | 0.002648 |
import logging
import os
import datetime
import tba_config
import time
import json
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from consts.event_type import EventType
from consts.media_type import MediaType
from consts.media_tag import MediaTag
from datafeeds.datafeed_fms_api import DatafeedFMSAPI
from datafeeds.datafeed_first_elasticsearch import DatafeedFIRSTElasticSearch
from datafeeds.datafeed_tba import DatafeedTba
from datafeeds.datafeed_resource_library import DatafeedResourceLibrary
from helpers.district_manipulator import DistrictManipulator
from helpers.event_helper import EventHelper
from helpers.event_manipulator import EventManipulator
from helpers.event_details_manipulator import EventDetailsManipulator
from helpers.event_team_manipulator import EventTeamManipulator
from helpers.match_manipulator import MatchManipulator
from helpers.match_helper import MatchHelper
from helpers.award_manipulator import AwardManipulator
from helpers.media_manipulator import MediaManipulator
from helpers.team_manipulator import TeamManipulator
from helpers.district_team_manipulator import DistrictTeamManipulator
from helpers.robot_manipulator import RobotManipulator
from helpers.event.offseason_event_helper import OffseasonEventHelper
from helpers.suggestions.suggestion_creator import SuggestionCreator
from models.district_team import DistrictTeam
from models.event import Event
from models.event_details import EventDetails
from models.event_team import EventTeam
from models.media import Media
from models.robot import Robot
from models.sitevar import Sitevar
from models.team import Team
from sitevars.website_blacklist import WebsiteBlacklist
class FMSAPIAwardsEnqueue(webapp.RequestHandler):
"""
Handles enqueing getting awards
"""
def get(self, when):
if when == "now":
events = EventHelper.getEventsWithinADay()
events = filter(lambda e: e.official, events)
else:
event_keys = Event.query(Event.official == True).filter(Event.year == int(when)).fetch(500, keys_only=True)
events = ndb.get_multi(event_keys)
for event in events:
taskqueue.add(
queue_name='datafeed',
url='/tasks/get/fmsapi_awards/%s' % (event.key_name),
method='GET')
template_values = {
'events': events,
}
if 'X-Appengine-Taskname' not in self.request.headers: # Only write out if not in taskqueue
path = os.path.join(os.path.dirname(__file__), '../templates/datafeeds/usfirst_awards_enqueue.html')
self.response.out.write(template.render(path, template_values))
class FMSAPIAwardsGet(webapp.RequestHandler):
"""
Handles updating awards
"""
def get(self, event_key):
datafeed = DatafeedFMSAPI('v2.0', save_response=True)
event = Event.get_by_id(event_key)
awards = datafeed.getAwards(event)
if event and event.remap_teams:
EventHelper.remapteams_awards(awards, event.remap_teams)
new_awards = AwardManipulator.createOrUpdate(awards)
if new_awards is None:
new_awards = []
elif type(new_awards) != list:
new_awards = [new_awards]
# create EventTeams
team_ids = set()
for award in new_awards:
for team in award.team_list:
team_ids.add(team.id())
teams = TeamManipulator.createOrUpdate([Team(
id=team_id,
team_number=int(team_id[3:]))
for team_id in team_ids])
if teams:
if type(teams) is not list:
teams = [teams]
event_teams = EventTeamManipulator.createOrUpdate([EventTeam(
id=event_key + "_" + team.key.id(),
event=event.key,
team=team.key,
year=event.year)
for team in teams])
template_values = {
'awards': new_awards,
}
if 'X-Appengine-Taskname' not in self.request.headers: # Only write out if not in taskqueue
path = os.path.join(os.path.dirname(__file__), '../templates/datafeeds/usfirst_awards_get.html')
self.response.out.write(template.render(path, template_values))
class FMSAPIEventAlliancesEnqueue(webapp.RequestHandler):
"""
Handles enqueing getting alliances
"""
def get(self, when):
if when == "now":
events = EventHelper.getEventsWithinADay()
events = filter(lambda e: e.official, events)
elif when == "last_day_only":
events = EventHelper.getEventsWithinADay()
events = filter(lambda e: e.official and e.ends_today, events)
else:
event_keys = Event.query(Event.official == True).filter(Event.year == int(when)).fetch(500, keys_only=True)
events = ndb.get_multi(event_keys)
for event in events:
taskqueue.add(
queue_name='datafeed',
url='/tasks/get/fmsapi_event_alliances/' + event.key_name,
method='GET')
template_values = {
'events': events
}
if 'X-Appengine-Taskname' not in self.request.headers: # Only write out if not in taskqueue
path = os.path.join(os.path.dirname(__file__), '../templates/datafeeds/usfirst_event_alliances_enqueue.html')
self.response.out.write(template.render(path, template_values))
class FMSAPIEventAlliancesGet(webapp.RequestHandler):
"""
Handles updating an event's alliances
"""
def get(self, event_key):
df = DatafeedFMSAPI('v2.0', save_response=True)
event = Event.get_by_id(event_key)
alliance_selections = df.getEventAlliances(event_key)
if event and event.remap_teams:
EventHelper.remapteams_alliances(alliance_selections, event.remap_teams)
event_details = EventDetails(
id=event_key,
alliance_selections=alliance_selections
)
EventDetailsManipulator.createOrUpdate(event_details)
template_values = {'alliance_selections': alliance_selections,
'event_name': event_details.key.id()}
if 'X-Appengine-Taskname' not in self.request.headers: # Only write out if not in taskqueue
path = os.path.join(os.path.dirname(__file__), '../templates/datafeeds/usfirst_event_alliances_get.html')
self.response.out.write(template.render(path, template_values))
class FMSAPIEventRankingsEnqueue(webapp.RequestHandler):
"""
Handles enqueing getting rankings
"""
def get(self, when):
if when == "now":
events = EventHelper.getEventsWithinADay()
events = filter(lambda e: e.official, events)
else:
event_keys = Event.query(Event.official == True).filter(Event.year == int(when)).fetch(500, keys_only=True)
events = ndb.get_multi(event_keys)
for event in events:
taskqueue.add(
queue_name='datafeed',
url='/tasks/get/fmsapi_event_rankings/' + event.key_name,
method='GET')
template_values = {
'events': events,
}
if 'X-Appengine-Taskname' not in self.request.headers: # Only write out if not in taskqueue
path = os.path.join(os.path.dirname(__file__), '../templates/datafeeds/usfirst_event_rankings_enqueue.html')
self.response.out.write(template.render(path, template_values))
class FMSAPIEventRankingsGet(webapp.RequestHandler):
"""
Handles updating an event's rankings
"""
def get(self, event_key):
df = DatafeedFMSAPI('v2.0', save_response=True)
event = Event.get_by_id(event_key)
rankings, rankings2 = df.getEventRankings(event_key)
if event and event.remap_teams:
EventHelper.remapteams_rankings(rankings, event.remap_teams)
EventHelper.remapteams_rankings2(rankings2, event.remap_teams)
event_details = EventDetails(
id=event_key,
rankings=rankings,
rankings2=rankings2
)
EventDetailsManipulator.createOrUpdate(event_details)
template_values = {'rankings': rankings,
'event_name': event_details.key.id()}
if 'X-Appengine-Taskname' not in self.request.headers: # Only write out if not in taskqueue
path = os.path.join(os.path.dirname(__file__), '../templates/datafeeds/usfirst_event_rankings_get.html')
self.response.out.write(template.render(path, template_values))
class FMSAPIMatchesEnqueue(webapp.RequestHandler):
"""
Handles enqueing getting match results
"""
def get(self, when):
if when == "now":
events = EventHelper.getEventsWithinADay()
events = filter(lambda e: e.official, events)
else:
event_keys = Event.query(Event.official == True).filter(Event.year == int(when)).fetch(500, keys_only=True)
events = ndb.get_multi(event_keys)
for event in events:
taskqueue.add(
queue_name='datafeed',
url='/tasks/get/fmsapi_matches/' + event.key_name,
method='GET')
template_values = {
'events': events,
}
if 'X-Appengine-Taskname' not in self.request.headers: # Only write out if not in taskqueue
path = os.path.join(os.path.dirname(__file__), '../templates/datafeeds/usfirst_matches_enqueue.html')
self.response.out.write(template.render(path, template_values))
class FMSAPIMatchesGet(webapp.RequestHandler):
"""
Handles updating matches
"""
def get(self, event_key):
df = DatafeedFMSAPI('v2.0', save_response=True)
event = Event.get_by_id(event_key)
matches = MatchHelper.deleteInvalidMatches(
df.getMatches(event_key),
Event.get_by_id(event_key)
)
if event and event.remap_teams:
EventHelper.remapteams_matches(matches, event.remap_teams)
new_matches = MatchManipulator.createOrUpdate(matches)
template_values = {
'matches': new_matches,
}
if 'X-Appengine-Taskname' not in self.request.headers: # Only write out if not in taskqueue
path = os.path.join(os.path.dirname(__file__), '../templates/datafeeds/usfirst_matches_get.html')
self.response.out.write(template.render(path, template_values))
# TODO: Currently unused
# class TeamDetailsEnqueue(webapp.RequestHandler):
# """
# Handles enqueing updates to individual teams
# """
# def get(self):
# offset = int(self.request.get("offset", 0))
# team_keys = Team.query().fetch(1000, offset=int(offset), keys_only=True)
# teams = ndb.get_multi(team_keys)
# for team in teams:
# taskqueue.add(
# queue_name='frc-api',
# url='/tasks/get/fmsapi_team_details/' + team.key_name,
# method='GET')
# # FIXME omg we're just writing out? -gregmarra 2012 Aug 26
# self.response.out.write("%s team gets have been enqueued offset from %s.<br />" % (len(teams), offset))
# self.response.out.write("Reload with ?offset=%s to enqueue more." % (offset + len(teams)))
# class TeamDetailsRollingEnqueue(webapp.RequestHandler):
# """
# Handles enqueing updates to individual teams
# Enqueues a certain fraction of teams so that all teams will get updated
# every PERIOD days.
# """
# PERIOD = 14 # a particular team will be updated every PERIOD days
# def get(self):
# now_epoch = time.mktime(datetime.datetime.now().timetuple())
# bucket_num = int((now_epoch / (60 * 60 * 24)) % self.PERIOD)
# highest_team_key = Team.query().order(-Team.team_number).fetch(1, keys_only=True)[0]
# highest_team_num = int(highest_team_key.id()[3:])
# bucket_size = int(highest_team_num / (self.PERIOD)) + 1
# min_team = bucket_num * bucket_size
# max_team = min_team + bucket_size
# team_keys = Team.query(Team.team_number >= min_team, Team.team_number < max_team).fetch(1000, keys_only=True)
# teams = ndb.get_multi(team_keys)
# for team in teams:
# taskqueue.add(
# queue_name='datafeed',
# url='/tasks/get/fmsapi_team_details/' + team.key_name,
# method='GET')
# # FIXME omg we're just writing out? -fangeugene 2013 Nov 6
# self.response.out.write("Bucket number {} out of {}<br>".format(bucket_num, self.PERIOD))
# self.response.out.write("{} team gets have been enqueued in the interval [{}, {}).".format(len(teams), min_team, max_team))
class TeamDetailsGet(webapp.RequestHandler):
"""
Fetches team details
FMSAPI should be trusted over FIRSTElasticSearch
"""
def get(self, key_name):
existing_team = Team.get_by_id(key_name)
fms_df = DatafeedFMSAPI('v2.0')
df2 = DatafeedFIRSTElasticSearch()
year = datetime.date.today().year
fms_details = fms_df.getTeamDetails(year, key_name)
if fms_details:
team, district_team, robot = fms_details[0]
else:
team = None
district_team = None
robot = None
if team:
team = TeamManipulator.mergeModels(team, df2.getTeamDetails(existing_team))
else:
team = df2.getTeamDetails(existing_team)
if team:
team = TeamManipulator.createOrUpdate(team)
# Clean up junk district teams
# https://www.facebook.com/groups/moardata/permalink/1310068625680096/
dt_keys = DistrictTeam.query(
DistrictTeam.team == existing_team.key,
DistrictTeam.year == year).fetch(keys_only=True)
keys_to_delete = set()
for dt_key in dt_keys:
if not district_team or dt_key.id() != district_team.key.id():
keys_to_delete.add(dt_key)
DistrictTeamManipulator.delete_keys(keys_to_delete)
if district_team:
district_team = DistrictTeamManipulator.createOrUpdate(district_team)
if robot:
robot = RobotManipulator.createOrUpdate(robot)
template_values = {
'key_name': key_name,
'team': team,
'success': team is not None,
'district': district_team,
'robot': robot,
}
if 'X-Appengine-Taskname' not in self.request.headers: # Only write out if not in taskqueue
path = os.path.join(os.path.dirname(__file__), '../templates/datafeeds/usfirst_team_details_get.html')
self.response.out.write(template.render(path, template_values))
class TeamAvatarGet(webapp.RequestHandler):
"""
Fetches team avatar
Doesn't currently use FIRSTElasticSearch
"""
def get(self, key_name):
fms_df = DatafeedFMSAPI('v2.0')
year = datetime.date.today().year
team = Team.get_by_id(key_name)
avatar, keys_to_delete = fms_df.getTeamAvatar(year, key_name)
if avatar:
MediaManipulator.createOrUpdate(avatar)
MediaManipulator.delete_keys(keys_to_delete)
template_values = {
'key_name': key_name,
'team': team,
'success': avatar is not None,
}
if 'X-Appengine-Taskname' not in self.request.headers: # Only write out if not in taskqueue
path = os.path.join(os.path.dirname(__file__), '../templates/datafeeds/usfirst_team_avatar_get.html')
self.response.out.write(template.render(path, template_values))
class EventListCurrentEnqueue(webapp.RequestHandler):
"""
Enqueue fetching events for years between current year and max year
"""
def get(self):
sv = Sitevar.get_by_id('apistatus')
current_year = sv.contents['current_season']
max_year = sv.contents['max_season']
years = range(current_year, max_year + 1)
for year in years:
taskqueue.add(
queue_name='datafeed',
target='backend-tasks',
url='/backend-tasks/get/event_list/%d' % year,
method='GET'
)
if 'X-Appengine-Taskname' not in self.request.headers: # Only write out if not in taskqueue
self.response.out.write("Enqueued fetching events for {}".format(years))
class EventListEnqueue(webapp.RequestHandler):
"""
Handles enqueing fetching a year's worth of events from FMSAPI
"""
def get(self, year):
taskqueue.add(
queue_name='datafeed',
target='backend-tasks',
url='/backend-tasks/get/event_list/' + year,
method='GET'
)
template_values = {
'year': year,
'event_count': year
}
if 'X-Appengine-Taskname' not in self.request.headers: # Only write out if not in taskqueue
path = os.path.join(os.path.dirname(__file__), '../templates/datafeeds/usfirst_events_details_enqueue.html')
self.response.out.write(template.render(path, template_values))
class EventListGet(webapp.RequestHandler):
"""
Fetch all events for a given year via the FRC Events API.
"""
def get(self, year):
df_config = Sitevar.get_or_insert('event_list_datafeed_config')
df = DatafeedFMSAPI('v2.0')
df2 = DatafeedFIRSTElasticSearch()
fmsapi_events, event_list_districts = df.getEventList(year)
if df_config.contents.get('enable_es') == True:
elasticsearch_events = df2.getEventList(year)
else:
elasticsearch_events = []
# All regular-season events can be inserted without any work involved.
# We need to de-duplicate offseason events from the FRC Events API with a different code than the TBA event code
fmsapi_events_offseason = [e for e in fmsapi_events if e.is_offseason]
event_keys_to_put = set([e.key_name for e in fmsapi_events]) - set(
[e.key_name for e in fmsapi_events_offseason])
events_to_put = [e for e in fmsapi_events if e.key_name in event_keys_to_put]
matched_offseason_events, new_offseason_events = \
OffseasonEventHelper.categorize_offseasons(int(year), fmsapi_events_offseason)
# For all matched offseason events, make sure the FIRST code matches the TBA FIRST code
for tba_event, first_event in matched_offseason_events:
tba_event.first_code = first_event.event_short
events_to_put.append(tba_event) # Update TBA events - discard the FIRST event
# For all new offseason events we can't automatically match, create suggestions
SuggestionCreator.createDummyOffseasonSuggestions(new_offseason_events)
merged_events = EventManipulator.mergeModels(
list(events_to_put),
elasticsearch_events) if elasticsearch_events else list(
events_to_put)
events = EventManipulator.createOrUpdate(merged_events) or []
fmsapi_districts = df.getDistrictList(year)
merged_districts = DistrictManipulator.mergeModels(fmsapi_districts, event_list_districts)
if merged_districts:
districts = DistrictManipulator.createOrUpdate(merged_districts)
else:
districts = []
# Fetch event details for each event
for event in events:
taskqueue.add(
queue_name='datafeed',
target='backend-tasks',
url='/backend-tasks/get/event_details/'+event.key_name,
method='GET'
)
template_values = {
"events": events,
"districts": districts,
}
if 'X-Appengine-Taskname' not in self.request.headers: # Only write out if not in taskqueue
path = os.path.join(os.path.dirname(__file__), '../templates/datafeeds/fms_event_list_get.html')
self.response.out.write(template.render(path, template_values))
class DistrictListGet(webapp.RequestHandler):
"""
Fetch one year of districts only from FMS API
"""
def get(self, year):
df = DatafeedFMSAPI('v2.0')
fmsapi_districts = df.getDistrictList(year)
districts = DistrictManipulator.createOrUpdate(fmsapi_districts)
template_values = {
"districts": districts,
}
if 'X-Appengine-Taskname' not in self.request.headers: # Only write out if not in taskqueue
path = os.path.join(os.path.dirname(__file__), '../templates/datafeeds/fms_district_list_get.html')
self.response.out.write(template.render(path, template_values))
class EventDetailsEnqueue(webapp.RequestHandler):
"""
Handlers enqueueing fetching event details, event teams, and team details
"""
def get(self, event_key):
taskqueue.add(
queue_name='datafeed',
target='backend-tasks',
url='/backend-tasks/get/event_details/'+event_key,
method='GET')
template_values = {
'event_key': event_key
}
if 'X-Appengine-Taskname' not in self.request.headers: # Only write out if not in taskqueue
path = os.path.join(os.path.dirname(__file__), '../templates/datafeeds/fmsapi_eventteams_enqueue.html')
self.response.out.write(template.render(path, template_values))
class EventDetailsGet(webapp.RequestHandler):
"""
Fetch event details, event teams, and team details
FMSAPI should be trusted over FIRSTElasticSearch
"""
def get(self, event_key):
df = DatafeedFMSAPI('v2.0')
df2 = DatafeedFIRSTElasticSearch()
event = Event.get_by_id(event_key)
# Update event
fmsapi_events, fmsapi_districts = df.getEventDetails(event_key)
elasticsearch_events = df2.getEventDetails(event)
updated_event = EventManipulator.mergeModels(
fmsapi_events,
elasticsearch_events)
if updated_event:
event = EventManipulator.createOrUpdate(updated_event)
DistrictManipulator.createOrUpdate(fmsapi_districts)
models = df.getEventTeams(event_key)
teams = []
district_teams = []
robots = []
for group in models:
# models is a list of tuples (team, districtTeam, robot)
if isinstance(group[0], Team):
teams.append(group[0])
if isinstance(group[1], DistrictTeam):
district_teams.append(group[1])
if isinstance(group[2], Robot):
robots.append(group[2])
# Merge teams
teams = TeamManipulator.mergeModels(teams, df2.getEventTeams(event))
# Write new models
if teams and event.year == tba_config.MAX_YEAR: # Only update from latest year
teams = TeamManipulator.createOrUpdate(teams)
district_teams = DistrictTeamManipulator.createOrUpdate(district_teams)
robots = RobotManipulator.createOrUpdate(robots)
if not teams:
# No teams found registered for this event
teams = []
if type(teams) is not list:
teams = [teams]
# Build EventTeams
cmp_hack_sitevar = Sitevar.get_or_insert('cmp_registration_hacks')
events_without_eventteams = cmp_hack_sitevar.contents.get('skip_eventteams', []) \
if cmp_hack_sitevar else []
skip_eventteams = event_key in events_without_eventteams
event_teams = [EventTeam(
id=event.key_name + "_" + team.key_name,
event=event.key,
team=team.key,
year=event.year)
for team in teams] if not skip_eventteams else []
# Delete eventteams of teams that are no longer registered
if event_teams and not skip_eventteams:
existing_event_team_keys = set(EventTeam.query(EventTeam.event == event.key).fetch(1000, keys_only=True))
event_team_keys = set([et.key for et in event_teams])
et_keys_to_delete = existing_event_team_keys.difference(event_team_keys)
EventTeamManipulator.delete_keys(et_keys_to_delete)
event_teams = EventTeamManipulator.createOrUpdate(event_teams)
if type(event_teams) is not list:
event_teams = [event_teams]
if event.year in {2018, 2019, 2020}:
avatars, keys_to_delete = df.getEventTeamAvatars(event.key_name)
if avatars:
MediaManipulator.createOrUpdate(avatars)
MediaManipulator.delete_keys(keys_to_delete)
template_values = {
'event': event,
'event_teams': event_teams,
}
if 'X-Appengine-Taskname' not in self.request.headers: # Only write out if not in taskqueue
path = os.path.join(os.path.dirname(__file__), '../templates/datafeeds/usfirst_event_details_get.html')
self.response.out.write(template.render(path, template_values))
class DistrictRankingsGet(webapp.RequestHandler):
"""
Fetch district rankings from FIRST
This data does not have full pre-event point breakdowns, but it does contain
things like CMP advancement
"""
def get(self, district_key):
df = DatafeedFMSAPI('v2.0')
district_with_rankings = df.getDistrictRankings(district_key)
districts = []
if district_with_rankings:
districts = DistrictManipulator.createOrUpdate(district_with_rankings)
template_values = {
"districts": [districts],
}
if 'X-Appengine-Taskname' not in self.request.headers: # Only write out if not in taskqueue
path = os.path.join(os.path.dirname(__file__), '../templates/datafeeds/fms_district_list_get.html')
self.response.out.write(template.render(path, template_values))
class TbaVideosEnqueue(webapp.RequestHandler):
"""
Handles enqueing grabing tba_videos for Matches at individual Events.
"""
def get(self):
events = Event.query()
for event in events:
taskqueue.add(
url='/tasks/get/tba_videos/' + event.key_name,
method='GET')
template_values = {
'event_count': Event.query().count(),
}
if 'X-Appengine-Taskname' not in self.request.headers: # Only write out if not in taskqueue
path = os.path.join(os.path.dirname(__file__), '../templates/datafeeds/tba_videos_enqueue.html')
self.response.out.write(template.render(path, template_values))
class TbaVideosGet(webapp.RequestHandler):
"""
Handles reading a TBA video listing page and updating the match objects in the datastore as needed.
"""
def get(self, event_key):
df = DatafeedTba()
event = Event.get_by_id(event_key)
match_filetypes = df.getVideos(event)
if match_filetypes:
matches_to_put = []
for match in event.matches:
if match.tba_videos != match_filetypes.get(match.key_name, []):
match.tba_videos = match_filetypes.get(match.key_name, [])
match.dirty = True
matches_to_put.append(match)
MatchManipulator.createOrUpdate(matches_to_put)
tbavideos = match_filetypes.items()
else:
logging.info("No tbavideos found for event " + event.key_name)
tbavideos = []
template_values = {
'tbavideos': tbavideos,
}
if 'X-Appengine-Taskname' not in self.request.headers: # Only write out if not in taskqueue
path = os.path.join(os.path.dirname(__file__), '../templates/datafeeds/tba_videos_get.html')
self.response.out.write(template.render(path, template_values))
class HallOfFameTeamsGet(webapp.RequestHandler):
"""
Handles scraping the list of Hall of Fame teams from FIRST resource library.
"""
def get(self):
df = DatafeedResourceLibrary()
teams = df.getHallOfFameTeams()
if teams:
media_to_update = []
for team in teams:
team_reference = Media.create_reference('team', team['team_id'])
video_foreign_key = team['video']
if video_foreign_key:
media_to_update.append(Media(id=Media.render_key_name(MediaType.YOUTUBE_VIDEO, video_foreign_key),
media_type_enum=MediaType.YOUTUBE_VIDEO,
media_tag_enum=[MediaTag.CHAIRMANS_VIDEO],
references=[team_reference],
year=team['year'],
foreign_key=video_foreign_key))
presentation_foreign_key = team['presentation']
if presentation_foreign_key:
media_to_update.append(Media(id=Media.render_key_name(MediaType.YOUTUBE_VIDEO, presentation_foreign_key),
media_type_enum=MediaType.YOUTUBE_VIDEO,
media_tag_enum=[MediaTag.CHAIRMANS_PRESENTATION],
references=[team_reference],
year=team['year'],
foreign_key=presentation_foreign_key))
essay_foreign_key = team['essay']
if essay_foreign_key:
media_to_update.append(Media(id=Media.render_key_name(MediaType.EXTERNAL_LINK, essay_foreign_key),
media_type_enum=MediaType.EXTERNAL_LINK,
media_tag_enum=[MediaTag.CHAIRMANS_ESSAY],
references=[team_reference],
year=team['year'],
foreign_key=essay_foreign_key))
MediaManipulator.createOrUpdate(media_to_update)
else:
logging.info("No Hall of Fame teams found")
teams = []
template_values = {
'teams': teams,
}
if 'X-Appengine-Taskname' not in self.request.headers: # Only write out if not in taskqueue
path = os.path.join(os.path.dirname(__file__), '../templates/datafeeds/hall_of_fame_teams_get.html')
self.response.out.write(template.render(path, template_values))
class TeamBlacklistWebsiteDo(webapp.RequestHandler):
"""
Blacklist the current website for a team
"""
def get(self, key_name):
team = Team.get_by_id(key_name)
if team.website:
WebsiteBlacklist.blacklist(team.website)
self.redirect('/backend-tasks/get/team_details/{}'.format(key_name))
| bdaroz/the-blue-alliance | controllers/datafeed_controller.py | Python | mit | 31,351 | 0.002775 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016, 2017 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""JS/CSS bundles for Records."""
from __future__ import absolute_import, print_function
from flask_assets import Bundle
from invenio_assets import NpmBundle
stats_js = NpmBundle(
"node_modules/invenio-charts-js/dist/lib.bundle.js",
"js/cds_records/stats.js",
output="gen/cds.records.stats.%(version)s.js",
npm={
"invenio-charts-js": "^0.2.2",
},
)
stats_css = Bundle(
Bundle(
"node_modules/invenio-charts-js/src/styles/styles.scss",
"scss/stats.scss",
filters="node-scss,cleancssurl",
),
output="gen/cds.stats.%(version)s.css",
)
js = NpmBundle(
Bundle(
"node_modules/cds/dist/cds.js",
"node_modules/angular-sanitize/angular-sanitize.js",
"node_modules/angular-strap/dist/angular-strap.js",
"node_modules/invenio-files-js/dist/invenio-files-js.js",
"node_modules/ngmodal/dist/ng-modal.js",
"js/cds_records/main.js",
"js/cds_records/user_actions_logger.js",
filters="jsmin",
),
depends=("node_modules/cds/dist/*.js",),
filters="jsmin",
output="gen/cds.record.%(version)s.js",
npm={
"angular": "~1.4.10",
"angular-sanitize": "~1.4.10",
"angular-loading-bar": "~0.9.0",
"cds": "~0.2.0",
"ng-dialog": "~0.6.0",
"ngmodal": "~2.0.1",
},
)
| CERNDocumentServer/cds-videos | cds/modules/records/bundles.py | Python | gpl-2.0 | 2,122 | 0 |
from __future__ import absolute_import, unicode_literals
from django.core.management.base import BaseCommand
from molo.core.models import LanguageRelation
from molo.core.models import Page
class Command(BaseCommand):
def handle(self, *args, **options):
for relation in LanguageRelation.objects.all():
if relation.page and relation.language:
page = Page.objects.get(pk=relation.page.pk).specific
page.language = relation.language
page.save()
else:
self.stdout.write(self.style.NOTICE(
'Relation with pk "%s" is missing either page/language'
% (relation.pk)))
| praekelt/molo | molo/core/management/commands/add_language_to_pages.py | Python | bsd-2-clause | 702 | 0 |
'''
@author: Michael Wan
@since : 2014-11-08
'''
from math import log
import operator
def createDataSet():
dataSet = [[1, 1, 'yes'],
[1, 1, 'yes'],
[1, 0, 'no'],
[0, 1, 'no'],
[0, 1, 'no']]
labels = ['no surfacing','flippers']
#change to discrete values
return dataSet, labels
def calcShannonEnt(dataSet):
numEntries = len(dataSet)
labelCounts = {}
for featVec in dataSet: #the the number of unique elements and their occurance
currentLabel = featVec[-1]
if currentLabel not in labelCounts.keys(): labelCounts[currentLabel] = 0
labelCounts[currentLabel] += 1
shannonEnt = 0.0
for key in labelCounts:
prob = float(labelCounts[key])/numEntries
shannonEnt -= prob * log(prob,2) #log base 2
return shannonEnt
def splitDataSet(dataSet, axis, value):
retDataSet = []
for featVec in dataSet:
if featVec[axis] == value:
reducedFeatVec = featVec[:axis] #chop out axis used for splitting
reducedFeatVec.extend(featVec[axis+1:])
retDataSet.append(reducedFeatVec)
return retDataSet
def chooseBestFeatureToSplit(dataSet):
numFeatures = len(dataSet[0]) - 1 #the last column is used for the labels
baseEntropy = calcShannonEnt(dataSet)
bestInfoGain = 0.0; bestFeature = -1
for i in range(numFeatures): #iterate over all the features
featList = [example[i] for example in dataSet]#create a list of all the examples of this feature
uniqueVals = set(featList) #get a set of unique values
newEntropy = 0.0
for value in uniqueVals:
subDataSet = splitDataSet(dataSet, i, value)
prob = len(subDataSet)/float(len(dataSet))
newEntropy += prob * calcShannonEnt(subDataSet)
infoGain = baseEntropy - newEntropy #calculate the info gain; ie reduction in entropy
if (infoGain > bestInfoGain): #compare this to the best gain so far
bestInfoGain = infoGain #if better than current best, set to best
bestFeature = i
return bestFeature #returns an integer
def majorityCnt(classList):
classCount={}
for vote in classList:
if vote not in classCount.keys(): classCount[vote] = 0
classCount[vote] += 1
sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
def createTree(dataSet,labels):
classList = [example[-1] for example in dataSet]
if classList.count(classList[0]) == len(classList):
return classList[0]#stop splitting when all of the classes are equal
if len(dataSet[0]) == 1: #stop splitting when there are no more features in dataSet
return majorityCnt(classList)
bestFeat = chooseBestFeatureToSplit(dataSet)
bestFeatLabel = labels[bestFeat]
myTree = {bestFeatLabel:{}}
del(labels[bestFeat])
featValues = [example[bestFeat] for example in dataSet]
uniqueVals = set(featValues)
for value in uniqueVals:
subLabels = labels[:] #copy all of labels, so trees don't mess up existing labels
myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet, bestFeat, value),subLabels)
return myTree
def classify(inputTree,featLabels,testVec):
firstStr = inputTree.keys()[0]
secondDict = inputTree[firstStr]
featIndex = featLabels.index(firstStr)
key = testVec[featIndex]
valueOfFeat = secondDict[key]
if isinstance(valueOfFeat, dict):
classLabel = classify(valueOfFeat, featLabels, testVec)
else: classLabel = valueOfFeat
return classLabel
def storeTree(inputTree,filename):
import pickle
fw = open(filename,'w')
pickle.dump(inputTree,fw)
fw.close()
def grabTree(filename):
import pickle
fr = open(filename)
return pickle.load(fr) | onehao/opensource | pyml/inaction/ch03/decisiontree/trees.py | Python | apache-2.0 | 4,087 | 0.015659 |
import random, inspect
from sched import scheduler
from time import time, sleep
from datetime import datetime
####################################################################################################
# Minimal implementation of the signaling library
class Signal(object):
def __init__(self, name):
self.name = name
self.receivers = {}
# This is all we need for a callback function to be registered for a signal:
def connect(self, receiver):
# print(id(receiver), receiver)
self.receivers.setdefault(id(receiver), receiver)
return receiver
# When a person expends effort and call their signal.send(), they really iterate through their
# receivers (callback functions) and __call__() each one, sending it themselves and ++kwargs
def send(self, sender, **kwargs):
# For Edwin: Understand This Syntax
# print("{} ({}) has the following receivers: {}".format(self.name, id(self), self.receivers))
if not self.receivers:
return []
# below is an example of tuple unpacking in python
# print ("wheee {}".format([(receiver, receiver(sender, **kwargs)) for k, receiver in self.receivers.items()]))
return [(receiver, receiver(sender, **kwargs)) for k, receiver in self.receivers.items()]
return [receiver(sender, **kwargs) for k, receiver in self.receivers.items()]
# Makes Signals(name) singletons
class Namespace(dict):
def signal(self, name):
try:
return self[name]
except KeyError:
return self.setdefault(name, Signal(name))
signal = Namespace().signal
####################################################################################################
## Minimal implementation of a Person class,
class Person(object):
def __init__(self, name):
self.name = name
self._energy = 100
@property
def energy(self):
return self._energy
def work(self):
effort = random.randint(-10, 10)
self._energy += effort
# People will emit a signal when they expend effort
if effort != 0:
# the signal will call the callback functon provided by the receiver on connect()
signal(self.name).send(self, effort=effort)
####################################################################################################
## Now the script - Let's start with the function we'll call to subscribe to signals and callback
def seamus_callback1(sender):
print("calling seamus_callback1! sender: {}".format(sender))
def seamus_callback2(sender):
print("calling seamus_callback2! sender: {}".format(sender))
def seamus_callback3(sender):
print("calling seamus_callback3! sender: {}".format(sender))
seamus = Person('seamus')
seamus_signal = signal(seamus.name)
print("{} is calling send. Debug: sender: {} signal: {} output: {}!".format(seamus.name, seamus, seamus_signal, seamus_signal.send(seamus)))
seamus_signal.connect(seamus_callback1)
seamus_signal.connect(seamus_callback2)
seamus_signal.connect(seamus_callback3)
print("{} is calling send again. Debug: sender: {} signal: {} output: {}!".format(seamus.name, seamus, seamus_signal, seamus_signal.send(seamus)))
seamus_signal.disconnect(seamus_callback1)
seamus_signal.disconnect(seamus_callback2)
print("{} is calling send again. Debug: sender: {} signal: {} output: {}!".format(seamus.name, seamus, seamus_signal, seamus_signal.send(seamus)))
## Subscribing to signals
def monitor_changes_in_effort(people):
# For each person, we call the signal method. signal() will either return an existing signal for
# that person, or return a new signal for that person. - hence the singletome comment above.
signals = [signal(person.name) for person in people]
# list comprehension
# signals = [functionToCall() for thing in someList]
# signals = []
# for person in people:
# s = signal(person.name)
# signals.append(s)
# for each signal we just got, let's connect to it and tell it what callback function we want
# to have executed when the signal is emitted.
[s.connect(track_work) for s in signals]
# This is our callback function - we send this to the signal as the callback that we want executed.
# this will handle the signal that the person sends - we know fro mthe person class that when a
# person expends effort, then emit a signal, and pass in themselves and amount of effort expended.
def track_work(sender, effort):
verb = 'rose' if effort > 0 else 'dropped'
if sender.energy < 100: # and sender not in hardworkers:
hardworkers.add(sender)
else:
hardworkers.discard(sender)
return effort
def print_person(person):
print(person.name)
print(person.energy)
# Creating the people objects from a list of names
people = [Person(name) for name in ['ye', 'bryan', 'andrew', 'edwin',
'jerry', 'jose', 'nathan', 'nate']]
## Set we'll add people whose energy levels have changed
hardworkers = set([])
# Observing the people we just created
monitor_changes_in_effort(people)
# Starting a 2 second loop that makes each person work 20 times
start_time = time()
duration = 0.5
interval = duration / 20
while time() < start_time + duration:
# print('Time: ')
# print(datetime.fromtimestamp(time()))
# [print_person(person) for person in people]
[person.work() for person in people]
sleep(interval - ((time() - start_time) % interval))
# print the list of people who were found to have worked:
print('\n\nThe following people finished the day with less energy than they started:\n')
[print_person(person) for person in hardworkers]
print('\n')
# and that's the gist of things. | dstcontrols/osisoftpy | examples/mini_signal_example.py | Python | apache-2.0 | 5,723 | 0.007863 |
import os
def get_terminal_columns():
terminal_rows, terminal_columns = os.popen('stty size', 'r').read().split()
return int(terminal_columns)
def get_terminal_rows():
terminal_rows, terminal_columns = os.popen('stty size', 'r').read().split()
return int(terminal_rows)
def get_header_l1(lines_list, width=None):
text_output = []
if width is None:
width = get_terminal_columns()
text_output.append('')
text_output.append('%s%s%s' % ('+', '-' * (width-2), '+'))
for line in lines_list:
text_output.append('| {:<{width}}|'.format(line, width=width-3))
text_output.append('%s%s%s' % ('+', '-' * (width - 2), '+'))
text_output.append('')
return '\n'.join(text_output)
def get_header_l2(lines_list, width=None):
text_output = []
if width is None:
width = 0
for line in lines_list:
if len(line) > width:
width = len(line)
width += 5
text_output.append('')
text_output.append('#')
text_output.append('##')
for line in lines_list:
text_output.append('### ' + line)
text_output.append('-' * width)
text_output.append('')
return '\n'.join(text_output)
def get_key_value_adjusted(key, value, key_width):
return '{:>{width}}'.format(key, width=key_width) + ': ' + str(value)
def format_seconds(seconds):
output = []
seconds = int(seconds)
if seconds > 86400:
output.append('%s days' % round(seconds / 86400))
seconds %= 86400
if seconds > 3600:
output.append('%s hours' % round(seconds / 3600))
seconds %= 3600
if seconds > 60:
output.append('%s minutes' % round(seconds / 60))
seconds %= 60
if seconds > 0:
output.append('%s seconds' % seconds)
return ' '.join(output)
def format_documentation_list(links_list):
text_output = ['Documentation:', '']
for l in links_list:
text_output.append('- %s' % l)
return '\n'.join(text_output)
| skomendera/PyMyTools | providers/terminal.py | Python | mit | 2,039 | 0.00049 |
"""Utilities for B2share deposit."""
from flask import request
from werkzeug.local import LocalProxy
from werkzeug.routing import PathConverter
def file_id_to_key(value):
"""Convert file UUID to value if in request context."""
from invenio_files_rest.models import ObjectVersion
_, record = request.view_args['pid_value'].data
if value in record.files:
return value
object_version = ObjectVersion.query.filter_by(
bucket_id=record.files.bucket.id, file_id=value
).first()
if object_version:
return object_version.key
return value
class FileKeyConverter(PathConverter):
"""Convert file UUID for key."""
def to_python(self, value):
"""Lazily convert value from UUID to key if need be."""
return LocalProxy(lambda: file_id_to_key(value))
| emanueldima/b2share | b2share/modules/deposit/utils.py | Python | gpl-2.0 | 823 | 0 |
#-*-coding=utf-8-*-
class SupportEncodings(object):
"""
Given the support encoding of piconv
"""
supports = []
def __init__(self):
self.supports = ['ASCII','UTF-8','UTF-16','UTF-32',\
'BIG5','GBK','GB2312','GB18030','EUC-JP', 'SHIFT_JIS', 'ISO-2022-JP'\
'WINDOWS-1252']
def get_support_encodings(self):
return self.supports
def get_all_coded_character_set(self):
return ['']
"""
437, 500, 500V1, 850, 851, 852, 855, 856, 857, 860, 861, 862, 863, 864, 865,
866, 866NAV, 869, 874, 904, 1026, 1046, 1047, 8859_1, 8859_2, 8859_3, 8859_4,
8859_5, 8859_6, 8859_7, 8859_8, 8859_9, 10646-1:1993, 10646-1:1993/UCS4,
ANSI_X3.4-1968, ANSI_X3.4-1986, ANSI_X3.4, ANSI_X3.110-1983, ANSI_X3.110,
ARABIC, ARABIC7, ARMSCII-8, ASCII, ASMO-708, ASMO_449, BALTIC, BIG-5,
BIG-FIVE, BIG5-HKSCS, BIG5, BIG5HKSCS, BIGFIVE, BRF, BS_4730, CA, CN-BIG5,
CN-GB, CN, CP-AR, CP-GR, CP-HU, CP037, CP038, CP273, CP274, CP275, CP278,
CP280, CP281, CP282, CP284, CP285, CP290, CP297, CP367, CP420, CP423, CP424,
CP437, CP500, CP737, CP770, CP771, CP772, CP773, CP774, CP775, CP803, CP813,
CP819, CP850, CP851, CP852, CP855, CP856, CP857, CP860, CP861, CP862, CP863,
CP864, CP865, CP866, CP866NAV, CP868, CP869, CP870, CP871, CP874, CP875,
CP880, CP891, CP901, CP902, CP903, CP904, CP905, CP912, CP915, CP916, CP918,
CP920, CP921, CP922, CP930, CP932, CP933, CP935, CP936, CP937, CP939, CP949,
CP950, CP1004, CP1008, CP1025, CP1026, CP1046, CP1047, CP1070, CP1079,
CP1081, CP1084, CP1089, CP1097, CP1112, CP1122, CP1123, CP1124, CP1125,
CP1129, CP1130, CP1132, CP1133, CP1137, CP1140, CP1141, CP1142, CP1143,
CP1144, CP1145, CP1146, CP1147, CP1148, CP1149, CP1153, CP1154, CP1155,
CP1156, CP1157, CP1158, CP1160, CP1161, CP1162, CP1163, CP1164, CP1166,
CP1167, CP1250, CP1251, CP1252, CP1253, CP1254, CP1255, CP1256, CP1257,
CP1258, CP1282, CP1361, CP1364, CP1371, CP1388, CP1390, CP1399, CP4517,
CP4899, CP4909, CP4971, CP5347, CP9030, CP9066, CP9448, CP10007, CP12712,
CP16804, CPIBM861, CSA7-1, CSA7-2, CSASCII, CSA_T500-1983, CSA_T500,
CSA_Z243.4-1985-1, CSA_Z243.4-1985-2, CSA_Z243.419851, CSA_Z243.419852,
CSDECMCS, CSEBCDICATDE, CSEBCDICATDEA, CSEBCDICCAFR, CSEBCDICDKNO,
CSEBCDICDKNOA, CSEBCDICES, CSEBCDICESA, CSEBCDICESS, CSEBCDICFISE,
CSEBCDICFISEA, CSEBCDICFR, CSEBCDICIT, CSEBCDICPT, CSEBCDICUK, CSEBCDICUS,
CSEUCKR, CSEUCPKDFMTJAPANESE, CSGB2312, CSHPROMAN8, CSIBM037, CSIBM038,
CSIBM273, CSIBM274, CSIBM275, CSIBM277, CSIBM278, CSIBM280, CSIBM281,
CSIBM284, CSIBM285, CSIBM290, CSIBM297, CSIBM420, CSIBM423, CSIBM424,
CSIBM500, CSIBM803, CSIBM851, CSIBM855, CSIBM856, CSIBM857, CSIBM860,
CSIBM863, CSIBM864, CSIBM865, CSIBM866, CSIBM868, CSIBM869, CSIBM870,
CSIBM871, CSIBM880, CSIBM891, CSIBM901, CSIBM902, CSIBM903, CSIBM904,
CSIBM905, CSIBM918, CSIBM921, CSIBM922, CSIBM930, CSIBM932, CSIBM933,
CSIBM935, CSIBM937, CSIBM939, CSIBM943, CSIBM1008, CSIBM1025, CSIBM1026,
CSIBM1097, CSIBM1112, CSIBM1122, CSIBM1123, CSIBM1124, CSIBM1129, CSIBM1130,
CSIBM1132, CSIBM1133, CSIBM1137, CSIBM1140, CSIBM1141, CSIBM1142, CSIBM1143,
CSIBM1144, CSIBM1145, CSIBM1146, CSIBM1147, CSIBM1148, CSIBM1149, CSIBM1153,
CSIBM1154, CSIBM1155, CSIBM1156, CSIBM1157, CSIBM1158, CSIBM1160, CSIBM1161,
CSIBM1163, CSIBM1164, CSIBM1166, CSIBM1167, CSIBM1364, CSIBM1371, CSIBM1388,
CSIBM1390, CSIBM1399, CSIBM4517, CSIBM4899, CSIBM4909, CSIBM4971, CSIBM5347,
CSIBM9030, CSIBM9066, CSIBM9448, CSIBM12712, CSIBM16804, CSIBM11621162,
CSISO4UNITEDKINGDOM, CSISO10SWEDISH, CSISO11SWEDISHFORNAMES,
CSISO14JISC6220RO, CSISO15ITALIAN, CSISO16PORTUGESE, CSISO17SPANISH,
CSISO18GREEK7OLD, CSISO19LATINGREEK, CSISO21GERMAN, CSISO25FRENCH,
CSISO27LATINGREEK1, CSISO49INIS, CSISO50INIS8, CSISO51INISCYRILLIC,
CSISO58GB1988, CSISO60DANISHNORWEGIAN, CSISO60NORWEGIAN1, CSISO61NORWEGIAN2,
CSISO69FRENCH, CSISO84PORTUGUESE2, CSISO85SPANISH2, CSISO86HUNGARIAN,
CSISO88GREEK7, CSISO89ASMO449, CSISO90, CSISO92JISC62991984B, CSISO99NAPLPS,
CSISO103T618BIT, CSISO111ECMACYRILLIC, CSISO121CANADIAN1, CSISO122CANADIAN2,
CSISO139CSN369103, CSISO141JUSIB1002, CSISO143IECP271, CSISO150,
CSISO150GREEKCCITT, CSISO151CUBA, CSISO153GOST1976874, CSISO646DANISH,
CSISO2022CN, CSISO2022JP, CSISO2022JP2, CSISO2022KR, CSISO2033,
CSISO5427CYRILLIC, CSISO5427CYRILLIC1981, CSISO5428GREEK, CSISO10367BOX,
CSISOLATIN1, CSISOLATIN2, CSISOLATIN3, CSISOLATIN4, CSISOLATIN5, CSISOLATIN6,
CSISOLATINARABIC, CSISOLATINCYRILLIC, CSISOLATINGREEK, CSISOLATINHEBREW,
CSKOI8R, CSKSC5636, CSMACINTOSH, CSNATSDANO, CSNATSSEFI, CSN_369103,
CSPC8CODEPAGE437, CSPC775BALTIC, CSPC850MULTILINGUAL, CSPC862LATINHEBREW,
CSPCP852, CSSHIFTJIS, CSUCS4, CSUNICODE, CSWINDOWS31J, CUBA, CWI-2, CWI,
CYRILLIC, DE, DEC-MCS, DEC, DECMCS, DIN_66003, DK, DS2089, DS_2089, E13B,
EBCDIC-AT-DE-A, EBCDIC-AT-DE, EBCDIC-BE, EBCDIC-BR, EBCDIC-CA-FR,
EBCDIC-CP-AR1, EBCDIC-CP-AR2, EBCDIC-CP-BE, EBCDIC-CP-CA, EBCDIC-CP-CH,
EBCDIC-CP-DK, EBCDIC-CP-ES, EBCDIC-CP-FI, EBCDIC-CP-FR, EBCDIC-CP-GB,
EBCDIC-CP-GR, EBCDIC-CP-HE, EBCDIC-CP-IS, EBCDIC-CP-IT, EBCDIC-CP-NL,
EBCDIC-CP-NO, EBCDIC-CP-ROECE, EBCDIC-CP-SE, EBCDIC-CP-TR, EBCDIC-CP-US,
EBCDIC-CP-WT, EBCDIC-CP-YU, EBCDIC-CYRILLIC, EBCDIC-DK-NO-A, EBCDIC-DK-NO,
EBCDIC-ES-A, EBCDIC-ES-S, EBCDIC-ES, EBCDIC-FI-SE-A, EBCDIC-FI-SE, EBCDIC-FR,
EBCDIC-GREEK, EBCDIC-INT, EBCDIC-INT1, EBCDIC-IS-FRISS, EBCDIC-IT,
EBCDIC-JP-E, EBCDIC-JP-KANA, EBCDIC-PT, EBCDIC-UK, EBCDIC-US, EBCDICATDE,
EBCDICATDEA, EBCDICCAFR, EBCDICDKNO, EBCDICDKNOA, EBCDICES, EBCDICESA,
EBCDICESS, EBCDICFISE, EBCDICFISEA, EBCDICFR, EBCDICISFRISS, EBCDICIT,
EBCDICPT, EBCDICUK, EBCDICUS, ECMA-114, ECMA-118, ECMA-128, ECMA-CYRILLIC,
ECMACYRILLIC, ELOT_928, ES, ES2, EUC-CN, EUC-JISX0213, EUC-JP-MS, EUC-JP,
EUC-KR, EUC-TW, EUCCN, EUCJP-MS, EUCJP-OPEN, EUCJP-WIN, EUCJP, EUCKR, EUCTW,
FI, FR, GB, GB2312, GB13000, GB18030, GBK, GB_1988-80, GB_198880,
GEORGIAN-ACADEMY, GEORGIAN-PS, GOST_19768-74, GOST_19768, GOST_1976874,
GREEK-CCITT, GREEK, GREEK7-OLD, GREEK7, GREEK7OLD, GREEK8, GREEKCCITT,
HEBREW, HP-GREEK8, HP-ROMAN8, HP-ROMAN9, HP-THAI8, HP-TURKISH8, HPGREEK8,
HPROMAN8, HPROMAN9, HPTHAI8, HPTURKISH8, HU, IBM-803, IBM-856, IBM-901,
IBM-902, IBM-921, IBM-922, IBM-930, IBM-932, IBM-933, IBM-935, IBM-937,
IBM-939, IBM-943, IBM-1008, IBM-1025, IBM-1046, IBM-1047, IBM-1097, IBM-1112,
IBM-1122, IBM-1123, IBM-1124, IBM-1129, IBM-1130, IBM-1132, IBM-1133,
IBM-1137, IBM-1140, IBM-1141, IBM-1142, IBM-1143, IBM-1144, IBM-1145,
IBM-1146, IBM-1147, IBM-1148, IBM-1149, IBM-1153, IBM-1154, IBM-1155,
IBM-1156, IBM-1157, IBM-1158, IBM-1160, IBM-1161, IBM-1162, IBM-1163,
IBM-1164, IBM-1166, IBM-1167, IBM-1364, IBM-1371, IBM-1388, IBM-1390,
IBM-1399, IBM-4517, IBM-4899, IBM-4909, IBM-4971, IBM-5347, IBM-9030,
IBM-9066, IBM-9448, IBM-12712, IBM-16804, IBM037, IBM038, IBM256, IBM273,
IBM274, IBM275, IBM277, IBM278, IBM280, IBM281, IBM284, IBM285, IBM290,
IBM297, IBM367, IBM420, IBM423, IBM424, IBM437, IBM500, IBM775, IBM803,
IBM813, IBM819, IBM848, IBM850, IBM851, IBM852, IBM855, IBM856, IBM857,
IBM860, IBM861, IBM862, IBM863, IBM864, IBM865, IBM866, IBM866NAV, IBM868,
IBM869, IBM870, IBM871, IBM874, IBM875, IBM880, IBM891, IBM901, IBM902,
IBM903, IBM904, IBM905, IBM912, IBM915, IBM916, IBM918, IBM920, IBM921,
IBM922, IBM930, IBM932, IBM933, IBM935, IBM937, IBM939, IBM943, IBM1004,
IBM1008, IBM1025, IBM1026, IBM1046, IBM1047, IBM1089, IBM1097, IBM1112,
IBM1122, IBM1123, IBM1124, IBM1129, IBM1130, IBM1132, IBM1133, IBM1137,
IBM1140, IBM1141, IBM1142, IBM1143, IBM1144, IBM1145, IBM1146, IBM1147,
IBM1148, IBM1149, IBM1153, IBM1154, IBM1155, IBM1156, IBM1157, IBM1158,
IBM1160, IBM1161, IBM1162, IBM1163, IBM1164, IBM1166, IBM1167, IBM1364,
IBM1371, IBM1388, IBM1390, IBM1399, IBM4517, IBM4899, IBM4909, IBM4971,
IBM5347, IBM9030, IBM9066, IBM9448, IBM12712, IBM16804, IEC_P27-1, IEC_P271,
INIS-8, INIS-CYRILLIC, INIS, INIS8, INISCYRILLIC, ISIRI-3342, ISIRI3342,
ISO-2022-CN-EXT, ISO-2022-CN, ISO-2022-JP-2, ISO-2022-JP-3, ISO-2022-JP,
ISO-2022-KR, ISO-8859-1, ISO-8859-2, ISO-8859-3, ISO-8859-4, ISO-8859-5,
ISO-8859-6, ISO-8859-7, ISO-8859-8, ISO-8859-9, ISO-8859-9E, ISO-8859-10,
ISO-8859-11, ISO-8859-13, ISO-8859-14, ISO-8859-15, ISO-8859-16, ISO-10646,
ISO-10646/UCS2, ISO-10646/UCS4, ISO-10646/UTF-8, ISO-10646/UTF8, ISO-CELTIC,
ISO-IR-4, ISO-IR-6, ISO-IR-8-1, ISO-IR-9-1, ISO-IR-10, ISO-IR-11, ISO-IR-14,
ISO-IR-15, ISO-IR-16, ISO-IR-17, ISO-IR-18, ISO-IR-19, ISO-IR-21, ISO-IR-25,
ISO-IR-27, ISO-IR-37, ISO-IR-49, ISO-IR-50, ISO-IR-51, ISO-IR-54, ISO-IR-55,
ISO-IR-57, ISO-IR-60, ISO-IR-61, ISO-IR-69, ISO-IR-84, ISO-IR-85, ISO-IR-86,
ISO-IR-88, ISO-IR-89, ISO-IR-90, ISO-IR-92, ISO-IR-98, ISO-IR-99, ISO-IR-100,
ISO-IR-101, ISO-IR-103, ISO-IR-109, ISO-IR-110, ISO-IR-111, ISO-IR-121,
ISO-IR-122, ISO-IR-126, ISO-IR-127, ISO-IR-138, ISO-IR-139, ISO-IR-141,
ISO-IR-143, ISO-IR-144, ISO-IR-148, ISO-IR-150, ISO-IR-151, ISO-IR-153,
ISO-IR-155, ISO-IR-156, ISO-IR-157, ISO-IR-166, ISO-IR-179, ISO-IR-193,
ISO-IR-197, ISO-IR-199, ISO-IR-203, ISO-IR-209, ISO-IR-226, ISO/TR_11548-1,
ISO646-CA, ISO646-CA2, ISO646-CN, ISO646-CU, ISO646-DE, ISO646-DK, ISO646-ES,
ISO646-ES2, ISO646-FI, ISO646-FR, ISO646-FR1, ISO646-GB, ISO646-HU,
ISO646-IT, ISO646-JP-OCR-B, ISO646-JP, ISO646-KR, ISO646-NO, ISO646-NO2,
ISO646-PT, ISO646-PT2, ISO646-SE, ISO646-SE2, ISO646-US, ISO646-YU,
ISO2022CN, ISO2022CNEXT, ISO2022JP, ISO2022JP2, ISO2022KR, ISO6937,
ISO8859-1, ISO8859-2, ISO8859-3, ISO8859-4, ISO8859-5, ISO8859-6, ISO8859-7,
ISO8859-8, ISO8859-9, ISO8859-9E, ISO8859-10, ISO8859-11, ISO8859-13,
ISO8859-14, ISO8859-15, ISO8859-16, ISO11548-1, ISO88591, ISO88592, ISO88593,
ISO88594, ISO88595, ISO88596, ISO88597, ISO88598, ISO88599, ISO88599E,
ISO885910, ISO885911, ISO885913, ISO885914, ISO885915, ISO885916,
ISO_646.IRV:1991, ISO_2033-1983, ISO_2033, ISO_5427-EXT, ISO_5427,
ISO_5427:1981, ISO_5427EXT, ISO_5428, ISO_5428:1980, ISO_6937-2,
ISO_6937-2:1983, ISO_6937, ISO_6937:1992, ISO_8859-1, ISO_8859-1:1987,
ISO_8859-2, ISO_8859-2:1987, ISO_8859-3, ISO_8859-3:1988, ISO_8859-4,
ISO_8859-4:1988, ISO_8859-5, ISO_8859-5:1988, ISO_8859-6, ISO_8859-6:1987,
ISO_8859-7, ISO_8859-7:1987, ISO_8859-7:2003, ISO_8859-8, ISO_8859-8:1988,
ISO_8859-9, ISO_8859-9:1989, ISO_8859-9E, ISO_8859-10, ISO_8859-10:1992,
ISO_8859-14, ISO_8859-14:1998, ISO_8859-15, ISO_8859-15:1998, ISO_8859-16,
ISO_8859-16:2001, ISO_9036, ISO_10367-BOX, ISO_10367BOX, ISO_11548-1,
ISO_69372, IT, JIS_C6220-1969-RO, JIS_C6229-1984-B, JIS_C62201969RO,
JIS_C62291984B, JOHAB, JP-OCR-B, JP, JS, JUS_I.B1.002, KOI-7, KOI-8, KOI8-R,
KOI8-RU, KOI8-T, KOI8-U, KOI8, KOI8R, KOI8U, KSC5636, L1, L2, L3, L4, L5, L6,
L7, L8, L10, LATIN-9, LATIN-GREEK-1, LATIN-GREEK, LATIN1, LATIN2, LATIN3,
LATIN4, LATIN5, LATIN6, LATIN7, LATIN8, LATIN9, LATIN10, LATINGREEK,
LATINGREEK1, MAC-CENTRALEUROPE, MAC-CYRILLIC, MAC-IS, MAC-SAMI, MAC-UK, MAC,
MACCYRILLIC, MACINTOSH, MACIS, MACUK, MACUKRAINIAN, MIK, MS-ANSI, MS-ARAB,
MS-CYRL, MS-EE, MS-GREEK, MS-HEBR, MS-MAC-CYRILLIC, MS-TURK, MS932, MS936,
MSCP949, MSCP1361, MSMACCYRILLIC, MSZ_7795.3, MS_KANJI, NAPLPS, NATS-DANO,
NATS-SEFI, NATSDANO, NATSSEFI, NC_NC0010, NC_NC00-10, NC_NC00-10:81,
NF_Z_62-010, NF_Z_62-010_(1973), NF_Z_62-010_1973, NF_Z_62010,
NF_Z_62010_1973, NO, NO2, NS_4551-1, NS_4551-2, NS_45511, NS_45512,
OS2LATIN1, OSF00010001, OSF00010002, OSF00010003, OSF00010004, OSF00010005,
OSF00010006, OSF00010007, OSF00010008, OSF00010009, OSF0001000A, OSF00010020,
OSF00010100, OSF00010101, OSF00010102, OSF00010104, OSF00010105, OSF00010106,
OSF00030010, OSF0004000A, OSF0005000A, OSF05010001, OSF100201A4, OSF100201A8,
OSF100201B5, OSF100201F4, OSF100203B5, OSF1002011C, OSF1002011D, OSF1002035D,
OSF1002035E, OSF1002035F, OSF1002036B, OSF1002037B, OSF10010001, OSF10010004,
OSF10010006, OSF10020025, OSF10020111, OSF10020115, OSF10020116, OSF10020118,
OSF10020122, OSF10020129, OSF10020352, OSF10020354, OSF10020357, OSF10020359,
OSF10020360, OSF10020364, OSF10020365, OSF10020366, OSF10020367, OSF10020370,
OSF10020387, OSF10020388, OSF10020396, OSF10020402, OSF10020417, PT, PT2,
PT154, R8, R9, RK1048, ROMAN8, ROMAN9, RUSCII, SE, SE2, SEN_850200_B,
SEN_850200_C, SHIFT-JIS, SHIFT_JIS, SHIFT_JISX0213, SJIS-OPEN, SJIS-WIN,
SJIS, SS636127, STRK1048-2002, ST_SEV_358-88, T.61-8BIT, T.61, T.618BIT,
TCVN-5712, TCVN, TCVN5712-1, TCVN5712-1:1993, THAI8, TIS-620, TIS620-0,
TIS620.2529-1, TIS620.2533-0, TIS620, TS-5881, TSCII, TURKISH8, UCS-2,
UCS-2BE, UCS-2LE, UCS-4, UCS-4BE, UCS-4LE, UCS2, UCS4, UHC, UJIS, UK,
UNICODE, UNICODEBIG, UNICODELITTLE, US-ASCII, US, UTF-7, UTF-8, UTF-16,
UTF-16BE, UTF-16LE, UTF-32, UTF-32BE, UTF-32LE, UTF7, UTF8, UTF16, UTF16BE,
UTF16LE, UTF32, UTF32BE, UTF32LE, VISCII, WCHAR_T, WIN-SAMI-2, WINBALTRIM,
WINDOWS-31J, WINDOWS-874, WINDOWS-936, WINDOWS-1250, WINDOWS-1251,
WINDOWS-1252, WINDOWS-1253, WINDOWS-1254, WINDOWS-1255, WINDOWS-1256,
WINDOWS-1257, WINDOWS-1258, WINSAMI2, WS2, YU
""" | coodoing/piconv | support_encodings.py | Python | apache-2.0 | 13,095 | 0.002596 |
# module includes
import elliptic
import heat
import IRT
print "Loading comatmor version 0.0.1"
| fameyer/comatmor | src/comatmor/__init__.py | Python | gpl-2.0 | 96 | 0 |
from layers.receivers.base_receiever import BaseReceiver
class ReceiptReceiver(BaseReceiver):
def onReceipt(self, receiptEntity):
ack = ReceiptReceiver.getAckEntity(receiptEntity)
self.toLower(ack)
| hyades/whatsapp-client | src/layers/receivers/receipt_receiver.py | Python | gpl-3.0 | 221 | 0 |
from rest_framework.serializers import ModelSerializer
from app.schedule.models.patient import Patient
from app.schedule.serializers.clinic import ClinicListSerializer
from app.schedule.serializers.dental_plan import DentalPlanSerializer
class PatientSerializer(ModelSerializer):
class Meta:
model = Patient
fields = ('id', 'name', 'last_name', 'sex', 'phone', 'clinic', 'created', 'modified', 'dental_plan')
class PatientListSerializer(PatientSerializer):
clinic = ClinicListSerializer()
dental_plan = DentalPlanSerializer()
| agendaodonto/server | app/schedule/serializers/patient.py | Python | agpl-3.0 | 559 | 0.001789 |
# -- coding: utf-8 --
from flask import render_template, session, redirect, url_for, current_app, request
from .. import db
from ..models import Detail,Contents,Keywords,WXUrls
from . import main
from .forms import NameForm
import wechatsogou
import hashlib
from .errors import *
from ..API.reqweb import *
@main.route('/', methods=['GET', 'POST'])
def index():
form = NameForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.name.data).first()
if user is None:
user = User(username=form.name.data)
db.session.add(user)
session['known'] = False
if current_app.config['FLASKY_ADMIN']:
send_email(current_app.config['FLASKY_ADMIN'], 'New User',
'mail/new_user', user=user)
else:
session['known'] = True
session['name'] = form.name.data
return redirect(url_for('.index'))
return render_template('index.html',
form=form, name=session.get('name'),
known=session.get('known', False))
@main.route('/test/')
def test():
content = Contents(name="test内容");
todo1 = Detail(title='teest title', keywords='列表列表列表',description='描述描述描述描述描述',contents=content)
todo1.save()
ss = Detail.objects().all()
objLen = len(ss)
s1 = ss[0]
a = 4
#todo1.save()
return render_template('detail.html',detail = s1)
@main.route('/content/',methods=['GET', 'POST'])
def content():
keyword=request.args.get('key')
vx_obj = wechatsogou.WechatSogouAPI()
lists = []
sugg_keywords = []
md5_string = ''
keywords = ''
title = ''
des = ''
#try:
if keyword.strip() != '':
lists = vx_obj.search_article(keyword)
for list in lists:
wx_url = list['article']['url']
hash = hashlib.md5()
hash.update(bytes(wx_url))
md5_str = hash.hexdigest()
#list['article'].append('wx_url_md5')
list['article']['wx_url_md5']=md5_str
wx_urls = WXUrls(md5_str = md5_str,wx_url=wx_url)
wx_urls.save()
sugg_keywords = vx_obj.get_sugg(keyword)
#except:
# print('value errot')
key_count = len(sugg_keywords)
if key_count == 1:
title = keywords= sugg_keywords[0]
elif key_count > 1:
title = keyword+'_'+sugg_keywords[0]
for sugg_key in sugg_keywords:
keywords = keywords+ ','+sugg_key
keywords = keywords[1:]
else:
title =keywords= keyword
if title.strip() != '':
hash = hashlib.md5()#md5对象,md5不能反解,但是加密是固定的,就是关系是一一对应,所以有缺陷,可以被对撞出来
hash.update(bytes(title))#要对哪个字符串进行加密,就放这里
md5_string = hash.hexdigest()#拿到加密字符串
keywrods_id = Keywords(md5_string = md5_string,title=keyword)
keywrods_id.save()
else:
print '404.html'
return render_template('content.html',content_list = lists,title=title,keywords=keywords,des=des,sugg_keywords=sugg_keywords)
@main.route('/post/',methods=['GET', 'POST'])
def post():
url_md5=request.args.get('md5')
wx_urls = WXUrls.objects(md5_str=url_md5)[:1]
if wx_urls.count() == 1:
wx_url=wx_urls[0].wx_url
ReqWebInfo.get_wx_article_info(wx_url)
return render_template('detail.html',)
else:
return render_template('404.html')
| Rcoko/flaskLearn | app/main/views.py | Python | mit | 3,576 | 0.014646 |
import argparse
from collections import defaultdict, Counter, deque
import random
import json
import time
from tqdm import tqdm
import wikipedia
class MarkovModel(object):
def __init__(self):
self.states = defaultdict(lambda: Counter())
self.totals = Counter()
def add_sample(self, state, followup):
self.states[state][followup] += 1
self.totals[state] += 1
def generate(self):
result = []
for followup in self.iter_chain():
result.append(followup)
return result
def iter_chain(self, state=tuple()):
while state in self.states:
followup = self.next(state)
state = state[1:] + followup
for token in followup:
yield token
def next(self, state):
r = random.randint(0, self.totals[state] - 1)
for followup, weight in self.states[state].items():
r -= weight
if r < 0:
return followup
raise ValueError("Mismatch of totals / weights for state {}".format(state))
def to_json(self):
converted = {' '.join(state): list(followups.keys()) for state, followups in self.states.items()}
return json.dumps(converted)
def iter_states(tokens, state_size, start_state=tuple(), end_marker=None):
# First transition is from empty state to first token-based state
yield start_state, tuple(tokens[0:state_size])
state = tuple(tokens[0:state_size])
for token in tokens[state_size:]:
# Each additional token means last state to that token
yield state, (token,)
# New state is last {state_size} tokens we yielded
state = state[1:] + (token,)
# End is marked by None
yield state, end_marker
def tokenize_story(story):
story = deque(story)
yield "\n"
while len(story) > 0:
token = eat_one_token(story)
if token is not None:
yield token
def eat_one_token(story):
while len(story) > 0 and isinvalid(story[0]):
story.popleft()
if len(story) == 0:
return None
if isalnum(story[0]):
return eat_word(story)
if ispunctuation(story[0]):
return eat_punctuation(story)
if isnewline(story[0]):
return eat_newline(story)
def isinvalid(char):
return not isalnum(char) and not ispunctuation(char) and not isnewline(char)
def isalnum(char):
return char.isalnum() or char == "'" or char == "’"
def ispunctuation(char):
return char in ",.-!?:&"
def isnewline(char):
return char == '\n'
def eat_word(story):
word = [story.popleft()]
while len(story) > 0 and isalnum(story[0]):
word.append(story.popleft())
return ''.join(word)
def eat_punctuation(story):
token = [story.popleft()]
while len(story) > 0 and ispunctuation(story[0]):
token.append(story.popleft())
return ''.join(token)
def eat_newline(story):
while len(story) > 0 and story[0].isspace():
story.popleft()
return '\n'
def load_story(filenames):
stories = []
for filename in filenames:
with open(filename) as fp:
story = fp.read()
if filename.endswith('.ftxt'):
story = remove_single_newlines(story)
stories.append(story)
return '\n'.join(stories)
def remove_single_newlines(story):
paragraphs = [[]]
for line in story.splitlines():
if len(line.strip()) == 0:
paragraphs.append([])
else:
paragraphs[-1].append(line)
return '\n'.join(' '.join(x for x in p) for p in paragraphs)
def load_wikipedia(num_articles):
lines = []
while num_articles > 0:
chunk = min(10, num_articles)
num_articles -= 10
for article in wikipedia.random(chunk):
try:
page = wikipedia.page(article)
except wikipedia.DisambiguationError as ex:
page = wikipedia.page(ex.args[1][0])
print(article)
lines.extend(x for x in page.content.splitlines() if not x.startswith('==') and len(x) > 0)
return '\n'.join(lines)
def main(args):
model = MarkovModel()
if args.mode == 'txt':
story = load_story(args.txt)
elif args.mode == 'wikipedia':
story = load_wikipedia(100)
else:
raise ValueError("invalid mode {}".format(args.mode))
tokens = list(tqdm(tokenize_story(story), desc="tokenizing"))
for state, followup in tqdm(iter_states(tokens, 3, start_state=tuple('\n'), end_marker=()), desc="building model"):
model.add_sample(state, followup)
print("Saving Model...")
with open("model.json", "w") as fp:
fp.write(model.to_json())
print("Generating Story:")
for token in model.iter_chain(tuple('\n')):
if not ispunctuation(token):
print(" ", end="")
print(token, end="", flush=True)
time.sleep(0.05)
def parse_args():
ap = argparse.ArgumentParser()
ap.add_argument('mode', choices=['txt', 'wikipedia'])
ap.add_argument('--txt', action='append')
return ap.parse_args()
if __name__ == '__main__':
main(parse_args())
| bschug/neverending-story | markov.py | Python | mit | 5,169 | 0.001161 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2012 OpenERP - Team de Localización Argentina.
# https://launchpad.net/~openerp-l10n-ar-localization
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
import decimal_precision as dp
import netsvc
import pooler
from osv import fields, osv, orm
from tools.translate import _
import logging
class account_invoice(osv.osv):
_name = 'account.invoice'
_inherit = 'account.invoice'
_description = 'Account Invoice Debit Note'
_columns = {
'type': fields.selection([
('out_invoice', 'Customer Invoice'),
('in_invoice', 'Supplier Invoice'),
('out_refund', 'Customer Refund'),
('in_refund', 'Supplier Refund'),
('in_debit', 'Supplier Debit Note'), # Added
('out_debit', 'Client Debit Note'), # Added
], 'Type', readonly=True, select=True), # Modified
}
# Modified
def _get_analytic_lines(self, cr, uid, id,context=None):
if context is None:
context = {}
inv = self.browse(cr, uid, id)
cur_obj = self.pool.get('res.currency')
company_currency = inv.company_id.currency_id.id
if inv.type in ('out_invoice', 'in_refund'):
sign = 1
else:
sign = -1
iml = self.pool.get('account.invoice.line').move_line_get(cr, uid, inv.id,context=context)
for il in iml:
if il['account_analytic_id']:
if inv.type in ('in_invoice', 'in_refund', 'in_debit'): # Modified
ref = inv.reference
else:
ref = self._convert_ref(cr, uid, inv.number)
if not inv.journal_id.analytic_journal_id:
raise osv.except_osv(_('No Analytic Journal !'),_("You have to define an analytic journal on the '%s' journal!") % (inv.journal_id.name,))
il['analytic_lines'] = [(0,0, {
'name': il['name'],
'date': inv['date_invoice'],
'account_id': il['account_analytic_id'],
'unit_amount': il['quantity'],
'amount': cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, il['price'], context={'date': inv.date_invoice}) * sign,
'product_id': il['product_id'],
'product_uom_id': il['uos_id'],
'general_account_id': il['account_id'],
'journal_id': inv.journal_id.analytic_journal_id.id,
'ref': ref,
})]
return iml
# Modified
def _get_journal(self, cr, uid, context=None):
if context is None:
context = {}
type_inv = context.get('type', 'out_invoice')
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company_id = context.get('company_id', user.company_id.id)
type2journal = {'out_invoice': 'sale', 'out_debit': 'sale', 'in_invoice': 'purchase', 'in_debit': 'purchase', 'out_refund': 'sale_refund', 'in_refund': 'purchase_refund'} # Modified
refund_journal = {'out_invoice': False, 'out_debit': False, 'in_invoice': False, 'in_debit': False, 'out_refund': True, 'in_refund': True} # Modified
journal_obj = self.pool.get('account.journal')
res = journal_obj.search(cr, uid, [('type', '=', type2journal.get(type_inv, 'sale')),
('company_id', '=', company_id)],
# ('refund_journal', '=', refund_journal.get(type_inv, False))],
limit=1)
return res and res[0] or False # Modified
# Modified
def _get_journal_analytic(self, cr, uid, type_inv, context=None):
type2journal = {'out_invoice': 'sale', 'out_debit': 'sale', 'in_invoice': 'purchase', 'in_debit': 'purchase', 'out_refund': 'sale', 'in_refund': 'purchase'} # Modified
tt = type2journal.get(type_inv, 'sale')
result = self.pool.get('account.analytic.journal').search(cr, uid, [('type','=',tt)], context=context)
if not result:
raise osv.except_osv(_('No Analytic Journal !'),_("You must define an analytic journal of type '%s' !") % (tt,))
return result and result[0] or False # Modified
# Modified
def onchange_partner_id(self, cr, uid, ids, type, partner_id,\
date_invoice=False, payment_term=False, partner_bank_id=False, company_id=False):
invoice_addr_id = False
contact_addr_id = False
partner_payment_term = False
acc_id = False
bank_id = False
fiscal_position = False
opt = [('uid', str(uid))]
if partner_id:
opt.insert(0, ('id', partner_id))
res = self.pool.get('res.partner').address_get(cr, uid, [partner_id], ['contact', 'invoice'])
contact_addr_id = res['contact']
invoice_addr_id = res['invoice']
p = self.pool.get('res.partner').browse(cr, uid, partner_id)
if company_id:
if not p.property_account_receivable or not p.property_account_payable:
raise osv.except_osv(_('Error!'),
_('You need define you account plan to your company'))
if p.property_account_receivable.company_id.id != company_id and p.property_account_payable.company_id.id != company_id:
property_obj = self.pool.get('ir.property')
rec_pro_id = property_obj.search(cr,uid,[('name','=','property_account_receivable'),('res_id','=','res.partner,'+str(partner_id)+''),('company_id','=',company_id)])
pay_pro_id = property_obj.search(cr,uid,[('name','=','property_account_payable'),('res_id','=','res.partner,'+str(partner_id)+''),('company_id','=',company_id)])
if not rec_pro_id:
rec_pro_id = property_obj.search(cr,uid,[('name','=','property_account_receivable'),('company_id','=',company_id)])
if not pay_pro_id:
pay_pro_id = property_obj.search(cr,uid,[('name','=','property_account_payable'),('company_id','=',company_id)])
rec_line_data = property_obj.read(cr,uid,rec_pro_id,['name','value_reference','res_id'])
pay_line_data = property_obj.read(cr,uid,pay_pro_id,['name','value_reference','res_id'])
rec_res_id = rec_line_data and rec_line_data[0].get('value_reference',False) and int(rec_line_data[0]['value_reference'].split(',')[1]) or False
pay_res_id = pay_line_data and pay_line_data[0].get('value_reference',False) and int(pay_line_data[0]['value_reference'].split(',')[1]) or False
if not rec_res_id and not pay_res_id:
raise osv.except_osv(_('Configuration Error !'),
_('Can not find account chart for this company, Please Create account.'))
account_obj = self.pool.get('account.account')
rec_obj_acc = account_obj.browse(cr, uid, [rec_res_id])
pay_obj_acc = account_obj.browse(cr, uid, [pay_res_id])
p.property_account_receivable = rec_obj_acc[0]
p.property_account_payable = pay_obj_acc[0]
if type in ('out_invoice', 'out_refund', 'out_debit'): # Modified
acc_id = p.property_account_receivable.id
else:
acc_id = p.property_account_payable.id
fiscal_position = p.property_account_position and p.property_account_position.id or False
partner_payment_term = p.property_payment_term and p.property_payment_term.id or False
if p.bank_ids:
bank_id = p.bank_ids[0].id
result = {'value': {
'address_contact_id': contact_addr_id,
'address_invoice_id': invoice_addr_id,
'account_id': acc_id,
'payment_term': partner_payment_term,
'fiscal_position': fiscal_position
}
}
if type in ('in_invoice', 'in_refund', 'in_debit'): # Modified
result['value']['partner_bank_id'] = bank_id
if payment_term != partner_payment_term:
if partner_payment_term:
to_update = self.onchange_payment_term_date_invoice(
cr, uid, ids, partner_payment_term, date_invoice)
result['value'].update(to_update['value'])
else:
result['value']['date_due'] = False
if partner_bank_id != bank_id:
to_update = self.onchange_partner_bank(cr, uid, ids, bank_id)
result['value'].update(to_update['value'])
return result
# Modified
def onchange_company_id(self, cr, uid, ids, company_id, part_id, type, invoice_line, currency_id):
val = {}
dom = {}
obj_journal = self.pool.get('account.journal')
account_obj = self.pool.get('account.account')
inv_line_obj = self.pool.get('account.invoice.line')
if company_id and part_id and type:
acc_id = False
partner_obj = self.pool.get('res.partner').browse(cr,uid,part_id)
if partner_obj.property_account_payable and partner_obj.property_account_receivable:
if partner_obj.property_account_payable.company_id.id != company_id and partner_obj.property_account_receivable.company_id.id != company_id:
property_obj = self.pool.get('ir.property')
rec_pro_id = property_obj.search(cr, uid, [('name','=','property_account_receivable'),('res_id','=','res.partner,'+str(part_id)+''),('company_id','=',company_id)])
pay_pro_id = property_obj.search(cr, uid, [('name','=','property_account_payable'),('res_id','=','res.partner,'+str(part_id)+''),('company_id','=',company_id)])
if not rec_pro_id:
rec_pro_id = property_obj.search(cr, uid, [('name','=','property_account_receivable'),('company_id','=',company_id)])
if not pay_pro_id:
pay_pro_id = property_obj.search(cr, uid, [('name','=','property_account_payable'),('company_id','=',company_id)])
rec_line_data = property_obj.read(cr, uid, rec_pro_id, ['name','value_reference','res_id'])
pay_line_data = property_obj.read(cr, uid, pay_pro_id, ['name','value_reference','res_id'])
rec_res_id = rec_line_data and rec_line_data[0].get('value_reference',False) and int(rec_line_data[0]['value_reference'].split(',')[1]) or False
pay_res_id = pay_line_data and pay_line_data[0].get('value_reference',False) and int(pay_line_data[0]['value_reference'].split(',')[1]) or False
if not rec_res_id and not pay_res_id:
raise osv.except_osv(_('Configuration Error !'),
_('Can not find account chart for this company, Please Create account.'))
if type in ('out_invoice', 'out_refund'):
acc_id = rec_res_id
else:
acc_id = pay_res_id
val= {'account_id': acc_id}
if ids:
if company_id:
inv_obj = self.browse(cr,uid,ids)
for line in inv_obj[0].invoice_line:
if line.account_id:
if line.account_id.company_id.id != company_id:
result_id = account_obj.search(cr, uid, [('name','=',line.account_id.name),('company_id','=',company_id)])
if not result_id:
raise osv.except_osv(_('Configuration Error !'),
_('Can not find account chart for this company in invoice line account, Please Create account.'))
# inv_line_obj.write(cr, uid, [line.id], {'account_id': result_id[0]}) SIL
inv_line_obj.write(cr, uid, [line.id], {'account_id': result_id[-1]})
else:
if invoice_line:
for inv_line in invoice_line:
obj_l = account_obj.browse(cr, uid, inv_line[2]['account_id'])
if obj_l.company_id.id != company_id:
raise osv.except_osv(_('Configuration Error !'),
_('Invoice line account company does not match with invoice company.'))
else:
continue
if company_id and type:
if type in ('out_invoice', 'out_debit'): # Modified
journal_type = 'sale'
elif type in ('out_refund'):
journal_type = 'sale_refund'
elif type in ('in_refund', 'in_debit'): # Modified
journal_type = 'purchase_refund'
else:
journal_type = 'purchase'
journal_ids = obj_journal.search(cr, uid, [('company_id','=',company_id), ('type', '=', journal_type)])
if journal_ids:
val['journal_id'] = journal_ids[0]
res_journal_default = self.pool.get('ir.values').get(cr, uid, 'default', 'type=%s' % (type), ['account.invoice'])
for r in res_journal_default:
if r[1] == 'journal_id' and r[2] in journal_ids:
val['journal_id'] = r[2]
if not val.get('journal_id', False):
raise osv.except_osv(_('Configuration Error !'), (_('Can\'t find any account journal of %s type for this company.\n\nYou can create one in the menu: \nConfiguration\Financial Accounting\Accounts\Journals.') % (journal_type)))
dom = {'journal_id': [('id', 'in', journal_ids)]}
else:
journal_ids = obj_journal.search(cr, uid, [])
if currency_id and company_id:
currency = self.pool.get('res.currency').browse(cr, uid, currency_id)
if currency.company_id and currency.company_id.id != company_id:
val['currency_id'] = False
else:
val['currency_id'] = currency.id
if company_id:
company = self.pool.get('res.company').browse(cr, uid, company_id)
if company.currency_id.company_id and company.currency_id.company_id.id != company_id:
val['currency_id'] = False
else:
val['currency_id'] = company.currency_id.id
return {'value': val, 'domain': dom}
# Modified
def action_move_create(self, cr, uid, ids,context=None):
"""Creates invoice related analytics and financial move lines"""
ait_obj = self.pool.get('account.invoice.tax')
cur_obj = self.pool.get('res.currency')
period_obj = self.pool.get('account.period')
payment_term_obj = self.pool.get('account.payment.term')
journal_obj = self.pool.get('account.journal')
move_obj = self.pool.get('account.move')
if context is None:
context = {}
for inv in self.browse(cr, uid, ids,context=context):
if not inv.journal_id.sequence_id:
raise osv.except_osv(_('Error !'), _('Please define sequence on invoice journal'))
if not inv.invoice_line:
raise osv.except_osv(_('No Invoice Lines !'), _('Please create some invoice lines.'))
if inv.move_id:
continue
ctx = context.copy()
ctx.update({'lang': inv.partner_id.lang})
if not inv.date_invoice:
self.write(cr, uid, [inv.id], {'date_invoice': fields.date.context_today(self,cr,uid,context=context)}, context=ctx)
company_currency = inv.company_id.currency_id.id
# create the analytical lines
# one move line per invoice line
iml = self._get_analytic_lines(cr, uid, inv.id,context=ctx)
# check if taxes are all computed
ctx = context.copy()
ctx.update({'lang': inv.partner_id.lang})
compute_taxes = ait_obj.compute(cr, uid, inv.id, context=ctx)
self.check_tax_lines(cr, uid, inv, compute_taxes, ait_obj)
# SIL if inv.type in ('in_invoice', 'in_refund', 'in_debit') and abs(inv.check_total - inv.amount_total) >= (inv.currency_id.rounding/2.0): # Modified
# raise osv.except_osv(_('Bad total !'), _('Please verify the price of the invoice !\nThe real total does not match the computed total.'))
if inv.payment_term:
total_fixed = total_percent = 0
for line in inv.payment_term.line_ids:
if line.value == 'fixed':
total_fixed += line.value_amount
if line.value == 'procent':
total_percent += line.value_amount
total_fixed = (total_fixed * 100) / (inv.amount_total or 1.0)
if (total_fixed + total_percent) > 100:
raise osv.except_osv(_('Error !'), _("Cannot create the invoice !\nThe payment term defined gives a computed amount greater than the total invoiced amount."))
# one move line per tax line
iml += ait_obj.move_line_get(cr, uid, inv.id)
entry_type = ''
if inv.type in ('in_invoice', 'in_refund', 'in_debit'): # Modified
ref = inv.reference
entry_type = 'journal_pur_voucher'
if inv.type == 'in_refund':
entry_type = 'cont_voucher'
else:
ref = self._convert_ref(cr, uid, inv.number)
entry_type = 'journal_sale_vou'
if inv.type == 'out_refund':
entry_type = 'cont_voucher'
diff_currency_p = inv.currency_id.id <> company_currency
# create one move line for the total and possibly adjust the other lines amount
total = 0
total_currency = 0
total, total_currency, iml = self.compute_invoice_totals(cr, uid, inv, company_currency, ref, iml)
acc_id = inv.account_id.id
name = inv['name'] or '/'
totlines = False
if inv.payment_term:
totlines = payment_term_obj.compute(cr,uid, inv.payment_term.id, total, inv.date_invoice or False, context=ctx)
if totlines:
res_amount_currency = total_currency
i = 0
ctx.update({'date': inv.date_invoice})
for t in totlines:
if inv.currency_id.id != company_currency:
amount_currency = cur_obj.compute(cr, uid, company_currency, inv.currency_id.id, t[1], context=ctx)
else:
amount_currency = False
# last line add the diff
res_amount_currency -= amount_currency or 0
i += 1
if i == len(totlines):
amount_currency += res_amount_currency
iml.append({
'type': 'dest',
'name': name,
'price': t[1],
'account_id': acc_id,
'date_maturity': t[0],
'amount_currency': diff_currency_p \
and amount_currency or False,
'currency_id': diff_currency_p \
and inv.currency_id.id or False,
'ref': ref,
})
else:
iml.append({
'type': 'dest',
'name': name,
'price': total,
'account_id': acc_id,
'date_maturity': inv.date_due or False,
'amount_currency': diff_currency_p \
and total_currency or False,
'currency_id': diff_currency_p \
and inv.currency_id.id or False,
'ref': ref
})
date = inv.date_invoice or time.strftime('%Y-%m-%d')
part = inv.partner_id.id
line = map(lambda x:(0,0,self.line_get_convert(cr, uid, x, part, date, context=ctx)),iml)
line = self.group_lines(cr, uid, iml, line, inv)
journal_id = inv.journal_id.id
journal = self.pool.get('account.journal').browse(cr, uid, journal_id,context=ctx)
if journal.centralisation:
raise osv.except_osv(_('UserError'),
_('Cannot create invoice move on centralised journal'))
line = self.finalize_invoice_move_lines(cr, uid, inv, line)
move = {
'ref': inv.reference and inv.reference or inv.name,
'line_id': line,
'journal_id': journal_id,
'date': date,
'type': entry_type,
'narration':inv.comment
}
period_id = inv.period_id and inv.period_id.id or False
if not period_id:
period_ids = self.pool.get('account.period').search(cr, uid, [('date_start','<=',inv.date_invoice or time.strftime('%Y-%m-%d')),('date_stop','>=',inv.date_invoice or time.strftime('%Y-%m-%d')), ('company_id', '=', inv.company_id.id)])
if period_ids:
period_id = period_ids[0]
if period_id:
move['period_id'] = period_id
for i in line:
i[2]['period_id'] = period_id
move_id = move_obj.create(cr, uid, move, context=ctx)
new_move_name = move_obj.browse(cr, uid, move_id, context=ctx).name
# make the invoice point to that move
self.write(cr, uid, [inv.id], {'move_id': move_id,'period_id':period_id, 'move_name':new_move_name}, context=ctx)
# Pass invoice in context in method post: used if you want to get the same
# account move reference when creating the same invoice after a cancelled one:
ctx.update({'invoice':inv})
move_obj.post(cr, uid, [move_id], context=ctx)
self._log_event(cr, uid, ids)
return True
# Modified
def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False):
if context is None:
context = {}
journal_obj = self.pool.get('account.journal')
if context.get('active_model', '') in ['res.partner'] and context.get('active_ids', False) and context['active_ids']:
partner = self.pool.get(context['active_model']).read(cr, uid, context['active_ids'], ['supplier','customer'])[0]
if not view_type:
view_id = self.pool.get('ir.ui.view').search(cr, uid, [('name', '=', 'account.invoice.tree')])
view_type = 'tree'
if view_type == 'form':
if partner['supplier'] and not partner['customer']:
view_id = self.pool.get('ir.ui.view').search(cr,uid,[('name', '=', 'account.invoice.supplier.form')])
else:
view_id = self.pool.get('ir.ui.view').search(cr,uid,[('name', '=', 'account.invoice.form')])
if view_id and isinstance(view_id, (list, tuple)):
view_id = view_id[0]
res = super(account_invoice,self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
type = context.get('journal_type', 'sale')
for field in res['fields']:
if field == 'journal_id':
journal_select = journal_obj._name_search(cr, uid, '', [('type', '=', type)], context=context, limit=None, name_get_uid=1)
res['fields'][field]['selection'] = journal_select
if view_type == 'tree':
doc = etree.XML(res['arch'])
nodes = doc.xpath("//field[@name='partner_id']")
partner_string = _('Customer')
if context.get('type', 'out_invoice') in ('in_invoice', 'in_refund', 'in_debit'): # Modified
partner_string = _('Supplier')
for node in nodes:
node.set('string', partner_string)
res['arch'] = etree.tostring(doc)
return res
# Modified
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
try:
res = super(account_invoice, self).create(cr, uid, vals, context)
for inv_id, name in self.name_get(cr, uid, [res], context=context):
ctx = context.copy()
if vals.get('type', 'in_invoice') in ('out_invoice', 'out_refund', 'out_debit'): # Modified
ctx = self.get_log_context(cr, uid, context=ctx)
message = _("Invoice '%s' is waiting for validation.") % name
self.log(cr, uid, inv_id, message, context=ctx)
return res
except Exception, e:
if '"journal_id" viol' in e.args[0]:
raise orm.except_orm(_('Configuration Error!'),
_('There is no Accounting Journal of type Sale/Purchase defined!'))
else:
raise orm.except_orm(_('Unknown Error'), str(e))
# Modified
def compute_invoice_totals(self, cr, uid, inv, company_currency, ref, invoice_move_lines):
total = 0
total_currency = 0
cur_obj = self.pool.get('res.currency')
for i in invoice_move_lines:
if inv.currency_id.id != company_currency:
i['currency_id'] = inv.currency_id.id
i['amount_currency'] = i['price']
i['price'] = cur_obj.compute(cr, uid, inv.currency_id.id,
company_currency, i['price'],
context={'date': inv.date_invoice or time.strftime('%Y-%m-%d')})
else:
i['amount_currency'] = False
i['currency_id'] = False
i['ref'] = ref
if inv.type in ('out_invoice','in_refund', 'out_debit'): # Modified
total += i['price']
total_currency += i['amount_currency'] or i['price']
i['price'] = - i['price']
else:
total -= i['price']
total_currency -= i['amount_currency'] or i['price']
return total, total_currency, invoice_move_lines
# Modified
def action_number(self, cr, uid, ids, context=None):
if context is None:
context = {}
#TODO: not correct fix but required a frech values before reading it.
self.write(cr, uid, ids, {})
for obj_inv in self.browse(cr, uid, ids, context=context):
id = obj_inv.id
invtype = obj_inv.type
number = obj_inv.number
move_id = obj_inv.move_id and obj_inv.move_id.id or False
reference = obj_inv.reference or ''
self.write(cr, uid, ids, {'internal_number':number})
if invtype in ('in_invoice', 'in_refund', 'in_debit'): # Modified
if not reference:
ref = self._convert_ref(cr, uid, number)
else:
ref = reference
else:
ref = self._convert_ref(cr, uid, number)
cr.execute('UPDATE account_move SET ref=%s ' \
'WHERE id=%s AND (ref is null OR ref = \'\')',
(ref, move_id))
cr.execute('UPDATE account_move_line SET ref=%s ' \
'WHERE move_id=%s AND (ref is null OR ref = \'\')',
(ref, move_id))
cr.execute('UPDATE account_analytic_line SET ref=%s ' \
'FROM account_move_line ' \
'WHERE account_move_line.move_id = %s ' \
'AND account_analytic_line.move_id = account_move_line.id',
(ref, move_id))
for inv_id, name in self.name_get(cr, uid, [id]):
ctx = context.copy()
if obj_inv.type in ('out_invoice', 'out_refund', 'out_debit'): # Modified
ctx = self.get_log_context(cr, uid, context=ctx)
message = _('Invoice ') + " '" + name + "' "+ _("is validated.")
self.log(cr, uid, inv_id, message, context=ctx)
return True
# Modified
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
types = {
'out_invoice': 'CI: ',
'in_invoice': 'SI: ',
'out_refund': 'OR: ',
'in_refund': 'SR: ',
'out_debit': 'CD', # Added
'in_debit': 'SD', # Added
}
return [(r['id'], (r['number']) or types[r['type']] + (r['name'] or '')) for r in self.read(cr, uid, ids, ['type', 'number', 'name'], context, load='_classic_write')]
def invoice_pay_customer(self, cr, uid, ids, context=None):
if not ids:
return []
inv = self.browse(cr, uid, ids[0], context=context)
return {
'name':_("Pay Invoice"),
'view_mode': 'form',
'view_id': False,
'view_type': 'form',
'res_model': 'account.voucher',
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
'domain': '[]',
'context': {
'default_partner_id': inv.partner_id.id,
'default_amount': inv.residual,
'default_name':inv.name,
'close_after_process': True,
'invoice_type':inv.type,
'invoice_id':inv.id,
'default_type': inv.type in ('out_invoice','out_refund','out_debit') and 'receipt' or 'payment', # Added
'type': inv.type in ('out_invoice','out_refund','out_debit') and 'receipt' or 'payment' # Added
}
}
account_invoice()
| pronexo-odoo/odoo-argentina | l10n_ar_account_check_debit_note/invoice.py | Python | agpl-3.0 | 31,158 | 0.008634 |
from __future__ import absolute_import
import filecmp
import os
import sys
import llvmbuild.componentinfo as componentinfo
from llvmbuild.util import fatal, note
###
def cmake_quote_string(value):
"""
cmake_quote_string(value) -> str
Return a quoted form of the given value that is suitable for use in CMake
language files.
"""
# Currently, we only handle escaping backslashes.
value = value.replace("\\", "\\\\")
return value
def cmake_quote_path(value):
"""
cmake_quote_path(value) -> str
Return a quoted form of the given value that is suitable for use in CMake
language files.
"""
# CMake has a bug in it's Makefile generator that doesn't properly quote
# strings it generates. So instead of using proper quoting, we just use "/"
# style paths. Currently, we only handle escaping backslashes.
value = value.replace("\\", "/")
return value
def make_install_dir(path):
"""
make_install_dir(path) -> None
Create the given directory path for installation, including any parents.
"""
# os.makedirs considers it an error to be called with an existent path.
if not os.path.exists(path):
os.makedirs(path)
###
class LLVMProjectInfo(object):
@staticmethod
def load_infos_from_path(llvmbuild_source_root):
def recurse(subpath):
# Load the LLVMBuild file.
llvmbuild_path = os.path.join(llvmbuild_source_root + subpath,
'LLVMBuild.txt')
if not os.path.exists(llvmbuild_path):
fatal("missing LLVMBuild.txt file at: %r" % (llvmbuild_path,))
# Parse the components from it.
common,info_iter = componentinfo.load_from_path(llvmbuild_path,
subpath)
for info in info_iter:
yield info
# Recurse into the specified subdirectories.
for subdir in common.get_list("subdirectories"):
for item in recurse(os.path.join(subpath, subdir)):
yield item
return recurse("/")
@staticmethod
def load_from_path(source_root, llvmbuild_source_root):
infos = list(
LLVMProjectInfo.load_infos_from_path(llvmbuild_source_root))
return LLVMProjectInfo(source_root, infos)
def __init__(self, source_root, component_infos):
# Store our simple ivars.
self.source_root = source_root
self.component_infos = list(component_infos)
self.component_info_map = None
self.ordered_component_infos = None
def validate_components(self):
"""validate_components() -> None
Validate that the project components are well-defined. Among other
things, this checks that:
- Components have valid references.
- Components references do not form cycles.
We also construct the map from component names to info, and the
topological ordering of components.
"""
# Create the component info map and validate that component names are
# unique.
self.component_info_map = {}
for ci in self.component_infos:
existing = self.component_info_map.get(ci.name)
if existing is not None:
# We found a duplicate component name, report it and error out.
fatal("found duplicate component %r (at %r and %r)" % (
ci.name, ci.subpath, existing.subpath))
self.component_info_map[ci.name] = ci
# Disallow 'all' as a component name, which is a special case.
if 'all' in self.component_info_map:
fatal("project is not allowed to define 'all' component")
# Add the root component.
if '$ROOT' in self.component_info_map:
fatal("project is not allowed to define $ROOT component")
self.component_info_map['$ROOT'] = componentinfo.GroupComponentInfo(
'/', '$ROOT', None)
self.component_infos.append(self.component_info_map['$ROOT'])
# Topologically order the component information according to their
# component references.
def visit_component_info(ci, current_stack, current_set):
# Check for a cycles.
if ci in current_set:
# We found a cycle, report it and error out.
cycle_description = ' -> '.join(
'%r (%s)' % (ci.name, relation)
for relation,ci in current_stack)
fatal("found cycle to %r after following: %s -> %s" % (
ci.name, cycle_description, ci.name))
# If we have already visited this item, we are done.
if ci not in components_to_visit:
return
# Otherwise, mark the component info as visited and traverse.
components_to_visit.remove(ci)
# Validate the parent reference, which we treat specially.
if ci.parent is not None:
parent = self.component_info_map.get(ci.parent)
if parent is None:
fatal("component %r has invalid reference %r (via %r)" % (
ci.name, ci.parent, 'parent'))
ci.set_parent_instance(parent)
for relation,referent_name in ci.get_component_references():
# Validate that the reference is ok.
referent = self.component_info_map.get(referent_name)
if referent is None:
fatal("component %r has invalid reference %r (via %r)" % (
ci.name, referent_name, relation))
# Visit the reference.
current_stack.append((relation,ci))
current_set.add(ci)
visit_component_info(referent, current_stack, current_set)
current_set.remove(ci)
current_stack.pop()
# Finally, add the component info to the ordered list.
self.ordered_component_infos.append(ci)
# FIXME: We aren't actually correctly checking for cycles along the
# parent edges. Haven't decided how I want to handle this -- I thought
# about only checking cycles by relation type. If we do that, it falls
# out easily. If we don't, we should special case the check.
self.ordered_component_infos = []
components_to_visit = sorted(
set(self.component_infos),
key = lambda c: c.name)
while components_to_visit:
visit_component_info(components_to_visit[0], [], set())
# Canonicalize children lists.
for c in self.ordered_component_infos:
c.children.sort(key = lambda c: c.name)
def print_tree(self):
def visit(node, depth = 0):
print('%s%-40s (%s)' % (' '*depth, node.name, node.type_name))
for c in node.children:
visit(c, depth + 1)
visit(self.component_info_map['$ROOT'])
def write_components(self, output_path):
# Organize all the components by the directory their LLVMBuild file
# should go in.
info_basedir = {}
for ci in self.component_infos:
# Ignore the $ROOT component.
if ci.parent is None:
continue
info_basedir[ci.subpath] = info_basedir.get(ci.subpath, []) + [ci]
# Compute the list of subdirectories to scan.
subpath_subdirs = {}
for ci in self.component_infos:
# Ignore root components.
if ci.subpath == '/':
continue
# Otherwise, append this subpath to the parent list.
parent_path = os.path.dirname(ci.subpath)
subpath_subdirs[parent_path] = parent_list = subpath_subdirs.get(
parent_path, set())
parent_list.add(os.path.basename(ci.subpath))
# Generate the build files.
for subpath, infos in info_basedir.items():
# Order the components by name to have a canonical ordering.
infos.sort(key = lambda ci: ci.name)
# Format the components into llvmbuild fragments.
fragments = []
# Add the common fragments.
subdirectories = subpath_subdirs.get(subpath)
if subdirectories:
fragment = """\
subdirectories = %s
""" % (" ".join(sorted(subdirectories)),)
fragments.append(("common", fragment))
# Add the component fragments.
num_common_fragments = len(fragments)
for ci in infos:
fragment = ci.get_llvmbuild_fragment()
if fragment is None:
continue
name = "component_%d" % (len(fragments) - num_common_fragments)
fragments.append((name, fragment))
if not fragments:
continue
assert subpath.startswith('/')
directory_path = os.path.join(output_path, subpath[1:])
# Create the directory if it does not already exist.
if not os.path.exists(directory_path):
os.makedirs(directory_path)
# In an effort to preserve comments (which aren't parsed), read in
# the original file and extract the comments. We only know how to
# associate comments that prefix a section name.
f = open(infos[0]._source_path)
comments_map = {}
comment_block = ""
for ln in f:
if ln.startswith(';'):
comment_block += ln
elif ln.startswith('[') and ln.endswith(']\n'):
comments_map[ln[1:-2]] = comment_block
else:
comment_block = ""
f.close()
# Create the LLVMBuild fil[e.
file_path = os.path.join(directory_path, 'LLVMBuild.txt')
f = open(file_path, "w")
# Write the header.
header_fmt = ';===- %s %s-*- Conf -*--===;'
header_name = '.' + os.path.join(subpath, 'LLVMBuild.txt')
header_pad = '-' * (80 - len(header_fmt % (header_name, '')))
header_string = header_fmt % (header_name, header_pad)
f.write("""\
%s
;
; The LLVM Compiler Infrastructure
;
; This file is distributed under the University of Illinois Open Source
; License. See LICENSE.TXT for details.
;
;===------------------------------------------------------------------------===;
;
; This is an LLVMBuild description file for the components in this subdirectory.
;
; For more information on the LLVMBuild system, please see:
;
; http://llvm.org/docs/LLVMBuild.html
;
;===------------------------------------------------------------------------===;
""" % header_string)
# Write out each fragment.each component fragment.
for name,fragment in fragments:
comment = comments_map.get(name)
if comment is not None:
f.write(comment)
f.write("[%s]\n" % name)
f.write(fragment)
if fragment is not fragments[-1][1]:
f.write('\n')
f.close()
def write_library_table(self, output_path, enabled_optional_components):
# Write out the mapping from component names to required libraries.
#
# We do this in topological order so that we know we can append the
# dependencies for added library groups.
entries = {}
for c in self.ordered_component_infos:
# Skip optional components which are not enabled.
if c.type_name == 'OptionalLibrary' \
and c.name not in enabled_optional_components:
continue
# Skip target groups which are not enabled.
tg = c.get_parent_target_group()
if tg and not tg.enabled:
continue
# Only certain components are in the table.
if c.type_name not in ('Library', 'OptionalLibrary', \
'LibraryGroup', 'TargetGroup'):
continue
# Compute the llvm-config "component name". For historical reasons,
# this is lowercased based on the library name.
llvmconfig_component_name = c.get_llvmconfig_component_name()
# Get the library name, or None for LibraryGroups.
if c.type_name == 'Library' or c.type_name == 'OptionalLibrary':
library_name = c.get_prefixed_library_name()
is_installed = c.installed
else:
library_name = None
is_installed = True
# Get the component names of all the required libraries.
required_llvmconfig_component_names = [
self.component_info_map[dep].get_llvmconfig_component_name()
for dep in c.required_libraries]
# Insert the entries for library groups we should add to.
for dep in c.add_to_library_groups:
entries[dep][2].append(llvmconfig_component_name)
# Add the entry.
entries[c.name] = (llvmconfig_component_name, library_name,
required_llvmconfig_component_names,
is_installed)
# Convert to a list of entries and sort by name.
entries = list(entries.values())
# Create an 'all' pseudo component. We keep the dependency list small by
# only listing entries that have no other dependents.
root_entries = set(e[0] for e in entries)
for _,_,deps,_ in entries:
root_entries -= set(deps)
entries.append(('all', None, root_entries, True))
entries.sort()
# Compute the maximum number of required libraries, plus one so there is
# always a sentinel.
max_required_libraries = max(len(deps)
for _,_,deps,_ in entries) + 1
# Write out the library table.
make_install_dir(os.path.dirname(output_path))
f = open(output_path+'.new', 'w')
f.write("""\
//===- llvm-build generated file --------------------------------*- C++ -*-===//
//
// Component Library Dependency Table
//
// Automatically generated file, do not edit!
//
//===----------------------------------------------------------------------===//
""")
f.write('struct AvailableComponent {\n')
f.write(' /// The name of the component.\n')
f.write(' const char *Name;\n')
f.write('\n')
f.write(' /// The name of the library for this component (or NULL).\n')
f.write(' const char *Library;\n')
f.write('\n')
f.write(' /// Whether the component is installed.\n')
f.write(' bool IsInstalled;\n')
f.write('\n')
f.write('\
/// The list of libraries required when linking this component.\n')
f.write(' const char *RequiredLibraries[%d];\n' % (
max_required_libraries))
f.write('} AvailableComponents[%d] = {\n' % len(entries))
for name,library_name,required_names,is_installed in entries:
if library_name is None:
library_name_as_cstr = 'nullptr'
else:
library_name_as_cstr = '"%s"' % library_name
if is_installed:
is_installed_as_cstr = 'true'
else:
is_installed_as_cstr = 'false'
f.write(' { "%s", %s, %s, { %s } },\n' % (
name, library_name_as_cstr, is_installed_as_cstr,
', '.join('"%s"' % dep
for dep in required_names)))
f.write('};\n')
f.close()
if not os.path.isfile(output_path):
os.rename(output_path+'.new', output_path)
elif filecmp.cmp(output_path, output_path+'.new'):
os.remove(output_path+'.new')
else:
os.remove(output_path)
os.rename(output_path+'.new', output_path)
def get_required_libraries_for_component(self, ci, traverse_groups = False):
"""
get_required_libraries_for_component(component_info) -> iter
Given a Library component info descriptor, return an iterator over all
of the directly required libraries for linking with this component. If
traverse_groups is True, then library and target groups will be
traversed to include their required libraries.
"""
assert ci.type_name in ('Library', 'OptionalLibrary', 'LibraryGroup', 'TargetGroup')
for name in ci.required_libraries:
# Get the dependency info.
dep = self.component_info_map[name]
# If it is a library, yield it.
if dep.type_name == 'Library' or dep.type_name == 'OptionalLibrary':
yield dep
continue
# Otherwise if it is a group, yield or traverse depending on what
# was requested.
if dep.type_name in ('LibraryGroup', 'TargetGroup'):
if not traverse_groups:
yield dep
continue
for res in self.get_required_libraries_for_component(dep, True):
yield res
def get_fragment_dependencies(self):
"""
get_fragment_dependencies() -> iter
Compute the list of files (as absolute paths) on which the output
fragments depend (i.e., files for which a modification should trigger a
rebuild of the fragment).
"""
# Construct a list of all the dependencies of the Makefile fragment
# itself. These include all the LLVMBuild files themselves, as well as
# all of our own sources.
#
# Many components may come from the same file, so we make sure to unique
# these.
build_paths = set()
for ci in self.component_infos:
p = os.path.join(self.source_root, ci.subpath[1:], 'LLVMBuild.txt')
if p not in build_paths:
yield p
build_paths.add(p)
# Gather the list of necessary sources by just finding all loaded
# modules that are inside the LLVM source tree.
for module in sys.modules.values():
# Find the module path.
if not hasattr(module, '__file__'):
continue
path = getattr(module, '__file__')
if not path:
continue
# Strip off any compiled suffix.
if os.path.splitext(path)[1] in ['.pyc', '.pyo', '.pyd']:
path = path[:-1]
# If the path exists and is in the source tree, consider it a
# dependency.
if (path.startswith(self.source_root) and os.path.exists(path)):
yield path
def foreach_cmake_library(self, f,
enabled_optional_components,
skip_disabled,
skip_not_installed):
for ci in self.ordered_component_infos:
# Skip optional components which are not enabled.
if ci.type_name == 'OptionalLibrary' \
and ci.name not in enabled_optional_components:
continue
# We only write the information for libraries currently.
if ci.type_name not in ('Library', 'OptionalLibrary'):
continue
# Skip disabled targets.
if skip_disabled:
tg = ci.get_parent_target_group()
if tg and not tg.enabled:
continue
# Skip targets that will not be installed
if skip_not_installed and not ci.installed:
continue
f(ci)
def write_cmake_fragment(self, output_path, enabled_optional_components):
"""
write_cmake_fragment(output_path) -> None
Generate a CMake fragment which includes all of the collated LLVMBuild
information in a format that is easily digestible by a CMake. The exact
contents of this are closely tied to how the CMake configuration
integrates LLVMBuild, see CMakeLists.txt in the top-level.
"""
dependencies = list(self.get_fragment_dependencies())
# Write out the CMake fragment.
make_install_dir(os.path.dirname(output_path))
f = open(output_path, 'w')
# Write the header.
header_fmt = '\
#===-- %s - LLVMBuild Configuration for LLVM %s-*- CMake -*--===#'
header_name = os.path.basename(output_path)
header_pad = '-' * (80 - len(header_fmt % (header_name, '')))
header_string = header_fmt % (header_name, header_pad)
f.write("""\
%s
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
#
# This file contains the LLVMBuild project information in a format easily
# consumed by the CMake based build system.
#
# This file is autogenerated by llvm-build, do not edit!
#
#===------------------------------------------------------------------------===#
""" % header_string)
# Write the dependency information in the best way we can.
f.write("""
# LLVMBuild CMake fragment dependencies.
#
# CMake has no builtin way to declare that the configuration depends on
# a particular file. However, a side effect of configure_file is to add
# said input file to CMake's internal dependency list. So, we use that
# and a dummy output file to communicate the dependency information to
# CMake.
#
# FIXME: File a CMake RFE to get a properly supported version of this
# feature.
""")
for dep in dependencies:
f.write("""\
configure_file(\"%s\"
${CMAKE_CURRENT_BINARY_DIR}/DummyConfigureOutput)\n""" % (
cmake_quote_path(dep),))
# Write the properties we use to encode the required library dependency
# information in a form CMake can easily use directly.
f.write("""
# Explicit library dependency information.
#
# The following property assignments effectively create a map from component
# names to required libraries, in a way that is easily accessed from CMake.
""")
self.foreach_cmake_library(
lambda ci:
f.write("""\
set_property(GLOBAL PROPERTY LLVMBUILD_LIB_DEPS_%s %s)\n""" % (
ci.get_prefixed_library_name(), " ".join(sorted(
dep.get_prefixed_library_name()
for dep in self.get_required_libraries_for_component(ci)))))
,
enabled_optional_components,
skip_disabled = False,
skip_not_installed = False # Dependency info must be emitted for internals libs too
)
f.close()
def write_cmake_exports_fragment(self, output_path, enabled_optional_components):
"""
write_cmake_exports_fragment(output_path) -> None
Generate a CMake fragment which includes LLVMBuild library
dependencies expressed similarly to how CMake would write
them via install(EXPORT).
"""
dependencies = list(self.get_fragment_dependencies())
# Write out the CMake exports fragment.
make_install_dir(os.path.dirname(output_path))
f = open(output_path, 'w')
f.write("""\
# Explicit library dependency information.
#
# The following property assignments tell CMake about link
# dependencies of libraries imported from LLVM.
""")
self.foreach_cmake_library(
lambda ci:
f.write("""\
set_property(TARGET %s PROPERTY IMPORTED_LINK_INTERFACE_LIBRARIES %s)\n""" % (
ci.get_prefixed_library_name(), " ".join(sorted(
dep.get_prefixed_library_name()
for dep in self.get_required_libraries_for_component(ci)))))
,
enabled_optional_components,
skip_disabled = True,
skip_not_installed = True # Do not export internal libraries like gtest
)
f.close()
def add_magic_target_components(parser, project, opts):
"""add_magic_target_components(project, opts) -> None
Add the "magic" target based components to the project, which can only be
determined based on the target configuration options.
This currently is responsible for populating the required_libraries list of
the "all-targets", "Native", "NativeCodeGen", and "Engine" components.
"""
# Determine the available targets.
available_targets = dict((ci.name,ci)
for ci in project.component_infos
if ci.type_name == 'TargetGroup')
# Find the configured native target.
# We handle a few special cases of target names here for historical
# reasons, as these are the names configure currently comes up with.
native_target_name = { 'x86' : 'X86',
'x86_64' : 'X86',
'Unknown' : None }.get(opts.native_target,
opts.native_target)
if native_target_name is None:
native_target = None
else:
native_target = available_targets.get(native_target_name)
if native_target is None:
parser.error("invalid native target: %r (not in project)" % (
opts.native_target,))
if native_target.type_name != 'TargetGroup':
parser.error("invalid native target: %r (not a target)" % (
opts.native_target,))
# Find the list of targets to enable.
if opts.enable_targets is None:
enable_targets = available_targets.values()
else:
# We support both space separated and semi-colon separated lists.
if opts.enable_targets == '':
enable_target_names = []
elif ' ' in opts.enable_targets:
enable_target_names = opts.enable_targets.split()
else:
enable_target_names = opts.enable_targets.split(';')
enable_targets = []
for name in enable_target_names:
target = available_targets.get(name)
if target is None:
parser.error("invalid target to enable: %r (not in project)" % (
name,))
if target.type_name != 'TargetGroup':
parser.error("invalid target to enable: %r (not a target)" % (
name,))
enable_targets.append(target)
# Find the special library groups we are going to populate. We enforce that
# these appear in the project (instead of just adding them) so that they at
# least have an explicit representation in the project LLVMBuild files (and
# comments explaining how they are populated).
def find_special_group(name):
info = info_map.get(name)
if info is None:
fatal("expected project to contain special %r component" % (
name,))
if info.type_name != 'LibraryGroup':
fatal("special component %r should be a LibraryGroup" % (
name,))
if info.required_libraries:
fatal("special component %r must have empty %r list" % (
name, 'required_libraries'))
if info.add_to_library_groups:
fatal("special component %r must have empty %r list" % (
name, 'add_to_library_groups'))
info._is_special_group = True
return info
info_map = dict((ci.name, ci) for ci in project.component_infos)
all_targets = find_special_group('all-targets')
native_group = find_special_group('Native')
native_codegen_group = find_special_group('NativeCodeGen')
engine_group = find_special_group('Engine')
# Set the enabled bit in all the target groups, and append to the
# all-targets list.
for ci in enable_targets:
all_targets.required_libraries.append(ci.name)
ci.enabled = True
# If we have a native target, then that defines the native and
# native_codegen libraries.
if native_target and native_target.enabled:
native_group.required_libraries.append(native_target.name)
native_codegen_group.required_libraries.append(
'%sCodeGen' % native_target.name)
# If we have a native target with a JIT, use that for the engine. Otherwise,
# use the interpreter.
if native_target and native_target.enabled and native_target.has_jit:
engine_group.required_libraries.append('MCJIT')
engine_group.required_libraries.append(native_group.name)
else:
engine_group.required_libraries.append('Interpreter')
def main():
from optparse import OptionParser, OptionGroup
parser = OptionParser("usage: %prog [options]")
group = OptionGroup(parser, "Input Options")
group.add_option("", "--source-root", dest="source_root", metavar="PATH",
help="Path to the LLVM source (inferred if not given)",
action="store", default=None)
group.add_option("", "--llvmbuild-source-root",
dest="llvmbuild_source_root",
help=(
"If given, an alternate path to search for LLVMBuild.txt files"),
action="store", default=None, metavar="PATH")
parser.add_option_group(group)
group = OptionGroup(parser, "Output Options")
group.add_option("", "--print-tree", dest="print_tree",
help="Print out the project component tree [%default]",
action="store_true", default=False)
group.add_option("", "--write-llvmbuild", dest="write_llvmbuild",
help="Write out the LLVMBuild.txt files to PATH",
action="store", default=None, metavar="PATH")
group.add_option("", "--write-library-table",
dest="write_library_table", metavar="PATH",
help="Write the C++ library dependency table to PATH",
action="store", default=None)
group.add_option("", "--write-cmake-fragment",
dest="write_cmake_fragment", metavar="PATH",
help="Write the CMake project information to PATH",
action="store", default=None)
group.add_option("", "--write-cmake-exports-fragment",
dest="write_cmake_exports_fragment", metavar="PATH",
help="Write the CMake exports information to PATH",
action="store", default=None)
parser.add_option_group(group)
group = OptionGroup(parser, "Configuration Options")
group.add_option("", "--native-target",
dest="native_target", metavar="NAME",
help=("Treat the named target as the 'native' one, if "
"given [%default]"),
action="store", default=None)
group.add_option("", "--enable-targets",
dest="enable_targets", metavar="NAMES",
help=("Enable the given space or semi-colon separated "
"list of targets, or all targets if not present"),
action="store", default=None)
group.add_option("", "--enable-optional-components",
dest="optional_components", metavar="NAMES",
help=("Enable the given space or semi-colon separated "
"list of optional components"),
action="store", default="")
parser.add_option_group(group)
(opts, args) = parser.parse_args()
# Determine the LLVM source path, if not given.
source_root = opts.source_root
if source_root:
if not os.path.exists(os.path.join(source_root, 'lib', 'IR',
'Function.cpp')):
parser.error('invalid LLVM source root: %r' % source_root)
else:
llvmbuild_path = os.path.dirname(__file__)
llvm_build_path = os.path.dirname(llvmbuild_path)
utils_path = os.path.dirname(llvm_build_path)
source_root = os.path.dirname(utils_path)
if not os.path.exists(os.path.join(source_root, 'lib', 'IR',
'Function.cpp')):
parser.error('unable to infer LLVM source root, please specify')
# Construct the LLVM project information.
llvmbuild_source_root = opts.llvmbuild_source_root or source_root
project_info = LLVMProjectInfo.load_from_path(
source_root, llvmbuild_source_root)
# Add the magic target based components.
add_magic_target_components(parser, project_info, opts)
# Validate the project component info.
project_info.validate_components()
# Print the component tree, if requested.
if opts.print_tree:
project_info.print_tree()
# Write out the components, if requested. This is useful for auto-upgrading
# the schema.
if opts.write_llvmbuild:
project_info.write_components(opts.write_llvmbuild)
# Write out the required library table, if requested.
if opts.write_library_table:
project_info.write_library_table(opts.write_library_table,
opts.optional_components)
# Write out the cmake fragment, if requested.
if opts.write_cmake_fragment:
project_info.write_cmake_fragment(opts.write_cmake_fragment,
opts.optional_components)
if opts.write_cmake_exports_fragment:
project_info.write_cmake_exports_fragment(opts.write_cmake_exports_fragment,
opts.optional_components)
if __name__=='__main__':
main()
| endlessm/chromium-browser | third_party/swiftshader/third_party/llvm-7.0/llvm/utils/llvm-build/llvmbuild/main.py | Python | bsd-3-clause | 34,146 | 0.002577 |
import os
import logging
from superdesk import get_resource_service
from jinja2.loaders import FileSystemLoader, ModuleLoader, ChoiceLoader, DictLoader, PrefixLoader
from liveblog.mongo_util import decode as mongodecode
__all__ = ['ThemeTemplateLoader', 'CompiledThemeTemplateLoader']
logger = logging.getLogger('superdesk')
class ThemeTemplateLoader(FileSystemLoader):
"""
Theme template loader for jinja2 SEO themes.
"""
def __init__(self, theme, encoding='utf-8', followlinks=False):
theme_name = theme['name']
themes = get_resource_service('themes')
theme_dirname = themes.get_theme_path(theme_name)
self.searchpath = [os.path.join(theme_dirname, 'templates')]
parent_theme = theme.get('extends')
if parent_theme:
parent_dirname = themes.get_theme_path(parent_theme)
self.searchpath.append(os.path.join(parent_dirname, 'templates'))
self.encoding = encoding
self.followlinks = followlinks
class CompiledThemeTemplateLoader(ChoiceLoader):
def __init__(self, theme):
"""
A Mixed logic template loader module. It will use Compiled theme template
for current theme and will also use FileSystemLoader like in order to enable
inheritance
"""
self.loaders = []
theme_name = theme['name']
themes = get_resource_service('themes')
parent_theme = theme.get('extends')
files = theme.get('files', {'templates': {}})
if files.get('templates'):
self.addDictonary(theme)
if parent_theme:
parent = themes.find_one(req=None, name=parent_theme)
self.addDictonary(parent)
else:
compiled = themes.get_theme_compiled_templates_path(theme_name)
self.loaders.append(ModuleLoader(compiled))
if parent_theme:
parent_compiled = themes.get_theme_compiled_templates_path(parent_theme)
self.loaders.append(ModuleLoader(parent_compiled))
# let's now add the parent theme prefix loader
if parent_theme:
prefix_loader = self._parent_prefix_loader(parent_theme)
self.loaders.append(prefix_loader)
def _parent_prefix_loader(self, name):
"""
Creates a PrefixLoader in order to be able to extends parent theme
templates using as prefix the parent theme name
Example:
{% extends 'parent_theme_name/template_name.html' %}
{% include 'parent_theme_name/template_name.html' %}
Args:
name (`str`): Parent theme name
Returns:
PrefixLoader instance with parent_name as prefix
"""
themes = get_resource_service('themes')
parent_dirname = themes.get_theme_path(name)
search_paths = [os.path.join(parent_dirname, 'templates')]
return PrefixLoader({name: FileSystemLoader(search_paths)})
def addDictonary(self, theme):
"""
Add template files as dictionary in the loaders.
"""
files = theme.get('files', {'templates': {}})
if files.get('templates'):
compiled = {}
for file, content in files.get('templates').items():
compiled[mongodecode(file)] = content
self.loaders.append(DictLoader(compiled))
| hlmnrmr/liveblog | server/liveblog/themes/template/loaders.py | Python | agpl-3.0 | 3,378 | 0.001184 |
# Copyright 2016 Nicolas Bessi, Camptocamp SA
# Copyright 2018 Tecnativa - Pedro M. Baeza
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from lxml import etree
from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
class ResPartner(models.Model):
_inherit = "res.partner"
zip_id = fields.Many2one(
comodel_name="res.city.zip",
string="ZIP Location",
index=True,
compute="_compute_zip_id",
readonly=False,
store=True,
)
city_id = fields.Many2one(
index=True, # add index for performance
compute="_compute_city_id",
readonly=False,
store=True,
)
city = fields.Char(compute="_compute_city", readonly=False, store=True)
zip = fields.Char(compute="_compute_zip", readonly=False, store=True)
country_id = fields.Many2one(
compute="_compute_country_id", readonly=False, store=True
)
state_id = fields.Many2one(compute="_compute_state_id", readonly=False, store=True)
@api.depends("state_id", "country_id", "city_id", "zip")
def _compute_zip_id(self):
"""Empty the zip auto-completion field if data mismatch when on UI."""
for record in self.filtered("zip_id"):
fields_map = {
"zip": "name",
"city_id": "city_id",
"state_id": "state_id",
"country_id": "country_id",
}
for rec_field, zip_field in fields_map.items():
if (
record[rec_field]
and record[rec_field] != record._origin[rec_field]
and record[rec_field] != record.zip_id[zip_field]
):
record.zip_id = False
break
@api.depends("zip_id")
def _compute_city_id(self):
if hasattr(super(), "_compute_city_id"):
super()._compute_city_id() # pragma: no cover
for record in self:
if record.zip_id:
record.city_id = record.zip_id.city_id
elif not record.country_enforce_cities:
record.city_id = False
@api.depends("zip_id")
def _compute_city(self):
if hasattr(super(), "_compute_city"):
super()._compute_city() # pragma: no cover
for record in self:
if record.zip_id:
record.city = record.zip_id.city_id.name
@api.depends("zip_id")
def _compute_zip(self):
if hasattr(super(), "_compute_zip"):
super()._compute_zip() # pragma: no cover
for record in self:
if record.zip_id:
record.zip = record.zip_id.name
@api.depends("zip_id", "state_id")
def _compute_country_id(self):
if hasattr(super(), "_compute_country_id"):
super()._compute_country_id() # pragma: no cover
for record in self:
if record.zip_id.city_id.country_id:
record.country_id = record.zip_id.city_id.country_id
elif record.state_id:
record.country_id = record.state_id.country_id
@api.depends("zip_id")
def _compute_state_id(self):
if hasattr(super(), "_compute_state_id"):
super()._compute_state_id() # pragma: no cover
for record in self:
state = record.zip_id.city_id.state_id
if state and record.state_id != state:
record.state_id = record.zip_id.city_id.state_id
@api.constrains("zip_id", "country_id", "city_id", "state_id", "zip")
def _check_zip(self):
if self.env.context.get("skip_check_zip"):
return
for rec in self:
if not rec.zip_id:
continue
if rec.zip_id.city_id.country_id != rec.country_id:
raise ValidationError(
_("The country of the partner %s differs from that in location %s")
% (rec.name, rec.zip_id.name)
)
if rec.zip_id.city_id.state_id != rec.state_id:
raise ValidationError(
_("The state of the partner %s differs from that in location %s")
% (rec.name, rec.zip_id.name)
)
if rec.zip_id.city_id != rec.city_id:
raise ValidationError(
_("The city of partner %s differs from that in location %s")
% (rec.name, rec.zip_id.name)
)
if rec.zip_id.name != rec.zip:
raise ValidationError(
_("The zip of the partner %s differs from that in location %s")
% (rec.name, rec.zip_id.name)
)
def _zip_id_domain(self):
return """
[
("city_id", "=?", city_id),
("city_id.country_id", "=?", country_id),
("city_id.state_id", "=?", state_id),
]
"""
@api.model
def _fields_view_get_address(self, arch):
# We want to use a domain that requires city_id to be on the view
# but we can't add it directly there, otherwise _fields_view_get_address
# in base_address_city won't do its magic, as it immediately returns
# if city_id is already in there. On the other hand, if city_id is not in the
# views, odoo won't let us use it in zip_id's domain.
# For this reason we need to set the domain here.
arch = super()._fields_view_get_address(arch)
doc = etree.fromstring(arch)
for node in doc.xpath("//field[@name='zip_id']"):
node.attrib["domain"] = self._zip_id_domain()
return etree.tostring(doc, encoding="unicode")
@api.model
def _address_fields(self):
return super()._address_fields() + ["zip_id"]
| OCA/partner-contact | base_location/models/res_partner.py | Python | agpl-3.0 | 5,851 | 0.001196 |
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.postgres.search import SearchVector, SearchQuery, SearchRank
from django.core.urlresolvers import reverse
from django.db import models
from github import UnknownObjectException
from social.apps.django_app.default.models import UserSocialAuth
from documents.tasks.wiki_processor import process_wiki
from interface.utils import get_github
from interface.path_processor import PathProcessor
class UserProxy(User):
class Meta:
proxy = True
def get_auth(self):
try:
data = UserSocialAuth.objects.filter(user=self).values_list('extra_data')[0][0]
except:
return None
username = data['login']
password = data['access_token']
return (username, password)
class Repo(models.Model):
user = models.ForeignKey(UserProxy, related_name='repos')
full_name = models.TextField(unique=True)
webhook_id = models.IntegerField(null=True, blank=True)
is_private = models.BooleanField(default=True)
wiki_branch = models.TextField(default='master')
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['full_name']
def __str__(self):
return self.full_name
def get_absolute_url(self):
return reverse('repo_detail', kwargs={'full_name': self.full_name})
@property
def clone_url(self):
return 'https://github.com/{}.git'.format(self.full_name)
def delete(self, *args, **kwargs):
self.remove_webhook()
return super(Repo, self).delete(*args, **kwargs)
def remove_webhook(self):
if not settings.DEBUG:
g = get_github(self.user)
grepo = g.get_repo(self.full_name)
try:
hook = grepo.get_hook(self.webhook_id)
hook.delete()
except UnknownObjectException:
pass
self.webhook_id = None
self.save()
def user_is_collaborator(self, user):
if not user.is_authenticated():
return False
if self.user == user or user.is_staff:
return True
g = get_github(user)
grepo = g.get_repo(self.full_name)
guser = g.get_user(user.username)
return grepo.has_in_collaborators(guser)
def add_webhook(self, request):
if settings.DEBUG:
self.webhook_id = 123
else:
g = get_github(self.user)
grepo = g.get_repo(self.full_name)
hook = grepo.create_hook(
'web',
{
'content_type': 'json',
'url': request.build_absolute_uri(reverse('hooksgithub')),
'secret': settings.WEBHOOK_SECRET
},
events=['push'],
active=True
)
self.webhook_id = hook.id
self.save()
@property
def directory(self):
path_processor = PathProcessor(self.full_name, is_directory=True)
return path_processor.repo_disk_path
def enqueue(self, file_change=None):
file_change = file_change or {}
process_wiki.delay(self.id, file_change)
def get_folder_contents(self, path, documents):
folders = []
docs = []
for document in documents:
doc_path = document.path
if path != '/':
doc_path = doc_path.replace(path, '')
if not doc_path.startswith('/'):
doc_path = '/{}'.format(doc_path)
if doc_path == '/':
docs.append(document.filename)
else:
first_seg = doc_path.split('/', maxsplit=2)[1]
if first_seg:
folder_name = '{}/'.format(first_seg)
if folder_name not in folders:
folders.append(folder_name)
folders = sorted(folders)
docs = sorted(docs)
folders.extend(docs)
return folders
| ZeroCater/Eyrie | interface/models.py | Python | mit | 4,056 | 0.00074 |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The functions for computing gradient updates."""
from typing import Callable, NamedTuple, Sequence
import chex
import haiku as hk
import jax
import optax
from brave.datasets import datasets
from brave.models import embedding_model
class ModelUpdates(NamedTuple):
params: hk.Params
state: hk.State
opt_state: optax.OptState
scalars: embedding_model.Scalars
UpdateFn = Callable[
[chex.PRNGKey, datasets.MiniBatch, hk.Params, hk.State, optax.OptState],
ModelUpdates]
def build_update_fn(optimizer: optax.GradientTransformation,
loss_fn: embedding_model.LossFn) -> UpdateFn:
"""Returns a function for computing model updates.
Args:
optimizer: The optimizer to use e.g. the result of optax.sgd(...).
loss_fn: An instance of the loss function, pmapped across all devices.
Returns:
A callable function that takes one step in the optimization problem using
the gradients of the loss computed by the model loss function.
"""
def update_fn(rng: chex.PRNGKey, minibatch: datasets.MiniBatch,
params: hk.Params, state: hk.State,
opt_state: optax.OptState) -> ModelUpdates:
grad_fn = jax.grad(loss_fn, has_aux=True)
grad, (state, scalars) = grad_fn(params, state, rng, minibatch)
grad = jax.lax.pmean(grad, axis_name='i')
scalars = jax.lax.pmean(scalars, axis_name='i')
updates, opt_state = optimizer.update(grad, opt_state, params)
params = optax.apply_updates(params, updates)
return ModelUpdates(params, state, opt_state, scalars)
return update_fn
def get_batch_dims(global_batch_size: int, device_count: int,
local_device_count: int) -> Sequence[int]:
"""Compute the batch dims for this host.
The global_batch_size is the number of data samples that are optimized over
in one step of the optimization. This value must be split up so that each
individual device gets some share of the batch.
When running with multiple devices, there may be multiple hosts, each
with multiple local devices. Each host has a local copy of the program, and
runs a local copy of the code. Each host must therefore use a batch size
so that when all of the hosts run together, the total number of batched
elements matches the global batch size. We do this by splitting up the global
batch size evenly amongst all devices, and setting the batch size per host
to the number of host devices times the device batch size.
Args:
global_batch_size: The target total batch size per optimization step.
device_count: The total number of devices sharing computation per step.
local_device_count: The number of devices available on the current host.
Returns:
The batch dimensions to use on the currently running host.
"""
per_device_batch_size, remainder = divmod(global_batch_size, device_count)
if remainder:
raise ValueError(
f'Cannot split batch of {global_batch_size} evenly across {local_device_count} devices.'
)
host_batch_dims = (local_device_count, per_device_batch_size)
return host_batch_dims
| deepmind/brave | brave/training/trainer.py | Python | apache-2.0 | 3,771 | 0.003447 |
__author__ = 'matjaz'
| anirudhvenkats/clowdflows | workflows/management/commands/__init__.py | Python | gpl-3.0 | 23 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-07 22:51
from __future__ import unicode_literals
import c3nav.mapdata.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Announcement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='created')),
('active_until', models.DateTimeField(null=True, verbose_name='active until')),
('active', models.BooleanField(default=True, verbose_name='active')),
('message', c3nav.mapdata.fields.I18nField(verbose_name='Message')),
],
options={
'verbose_name': 'Announcement',
'verbose_name_plural': 'Announcements',
'get_latest_by': 'created',
'default_related_name': 'announcements',
},
),
]
| c3nav/c3nav | src/c3nav/site/migrations/0001_announcement.py | Python | apache-2.0 | 1,128 | 0.004433 |
# Copyright 2017, Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import sys
from google.cloud.proto.vision.v1 import geometry_pb2
from google.cloud.proto.vision.v1 import image_annotator_pb2
from google.cloud.proto.vision.v1 import text_annotation_pb2
from google.cloud.proto.vision.v1 import web_detection_pb2
from google.gax.utils.messages import get_messages
names = []
for module in (geometry_pb2, image_annotator_pb2,
text_annotation_pb2, web_detection_pb2):
for name, message in get_messages(module).items():
message.__module__ = 'google.cloud.vision_v1.types'
setattr(sys.modules[__name__], name, message)
names.append(name)
__all__ = tuple(sorted(names))
| calpeyser/google-cloud-python | vision/google/cloud/vision_v1/types.py | Python | apache-2.0 | 1,284 | 0 |
#!/usr/local/bin/python
# -*-coding:utf8-*-
from scrapy.contrib.downloadermiddleware.useragent import UserAgentMiddleware
import random
class RotateUserAgentMiddleware(UserAgentMiddleware):
def __init__(self, user_agent=''):
self.user_agent = user_agent
def process_request(self, request, spider):
ua = random.choice(self.user_agent_list)
if ua:
request.headers.setdefault('User-Agent', ua)
# print '********user-agent:',ua
# the default user_agent_list composes chrome,I E,firefox,Mozilla,opera,netscape
#for more user agent strings,you can find it in http://www.useragentstring.com/pages/useragentstring.php
user_agent_list = [ \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1" \
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6", \
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1", \
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5", \
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3", \
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24", \
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]
| phodal-archive/scrapy-elasticsearch-demo | dianping/dianping/spiders/rotateAgent.py | Python | mit | 2,814 | 0.013859 |
from __future__ import absolute_import
from agms.configuration import Configuration
from agms.agms import Agms
from agms.transaction import Transaction
from agms.safe import SAFE
from agms.report import Report
from agms.recurring import Recurring
from agms.hpp import HPP
from agms.version import Version
| agmscode/agms_python | agms/__init__.py | Python | mit | 305 | 0 |
#!/usr/bin/env python
# coding=utf-8
"""303. Multiples with small digits
https://projecteuler.net/problem=303
For a positive integer n, define f(n) as the least positive multiple of n
that, written in base 10, uses only digits ≤ 2.
Thus f(2)=2, f(3)=12, f(7)=21, f(42)=210, f(89)=1121222.
Also, $\sum \limits_{n = 1}^{100} {\dfrac{f(n)}{n}} = 11363107$.
Find $\sum \limits_{n=1}^{10000} {\dfrac{f(n)}{n}}$.
"""
| openqt/algorithms | projecteuler/pe303-multiples-with-small-digits.py | Python | gpl-3.0 | 418 | 0.014423 |
import json
import os.path
from ems.app import Bootstrapper, absolute_path
from ems.inspection.util import classes
from ems.validation.abstract import Validator, MessageProvider
from ems.validation.registry import Registry
from ems.validation.rule_validator import RuleValidator, SimpleMessageProvider
from ems.validation.validators.base import *
from ems.validation.validators.filesystem import *
class AppPathNormalizer(PathNormalizer):
def normalize(self, path):
return absolute_path(path)
class ValidationBootstrapper(Bootstrapper):
validatorModules = set([
'ems.validation.validators.base',
'ems.validation.validators.filesystem',
])
messagesFile = os.path.join('resources','lang','de','validation.json')
def bootstrap(self, app):
self.app = app
app.share(Registry, self.createRegistry)
app.share(MessageProvider, self.createMessageProvider)
app.share(PathNormalizer, self.createPathNormalizer)
def createRegistry(self):
registry = Registry(self.app)
self.addValidatorClasses(registry)
return registry
def createPathNormalizer(self):
return AppPathNormalizer()
def addValidatorClasses(self, registry):
for module in self.validatorModules:
for cls in self.findModuleValidatorClasses(module):
registry += cls
def createMessageProvider(self):
with open(self.messagesFilePath()) as jsonFile:
messages = json.load(jsonFile)
return SimpleMessageProvider(messages)
def messagesFilePath(self):
return os.path.join(self.app.appPath, self.messagesFile)
@classmethod
def findModuleValidatorClasses(cls, moduleName):
validatorClasses = []
for clazz in classes(moduleName):
if issubclass(clazz, Validator):
validatorClasses.append(clazz)
return validatorClasses
| mtils/ems | ems/support/bootstrappers/validation.py | Python | mit | 1,933 | 0.002587 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2019 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA.
#
# Authors:
# Valerio Cosentino <valcos@bitergia.com>
#
import json
import logging
from grimoirelab_toolkit.datetime import datetime_utcnow, str_to_datetime
from ...backend import (Backend,
BackendCommand,
BackendCommandArgumentParser)
from ...client import HttpClient, RateLimitHandler
from ...errors import BackendError
CATEGORY_TWEET = "tweet"
MAX_SEARCH_QUERY = 500
TWITTER_URL = 'https://twitter.com/'
TWITTER_API_URL = 'https://api.twitter.com/1.1/search/tweets.json'
MAX_ITEMS = 100
# Range before sleeping until rate limit reset
MIN_RATE_LIMIT = 1
# Time to avoid too many request exception
SLEEP_TIME = 30
TWEET_TYPE_MIXED = "mixed"
TWEET_TYPE_RECENT = "recent"
TWEET_TYPE_POPULAR = "popular"
RATE_LIMIT_HEADER = "x-rate-limit-remaining"
RATE_LIMIT_RESET_HEADER = "x-rate-limit-reset"
logger = logging.getLogger(__name__)
class Twitter(Backend):
"""Twitter backend.
This class allows to fetch samples of tweets containing specific
keywords. Initialize this class passing API key needed
for authentication with the parameter `api_key`.
:param query: query to fetch tweets
:param api_token: token or key needed to use the API
:param max_items: maximum number of issues requested on the same query
:param sleep_for_rate: sleep until rate limit is reset
:param min_rate_to_sleep: minimun rate needed to sleep until
it will be reset
:param sleep_time: minimun waiting time to avoid too many request
exception
:param tag: label used to mark the data
:param archive: archive to store/retrieve items
"""
version = '0.2.2'
CATEGORIES = [CATEGORY_TWEET]
def __init__(self, query, api_token, max_items=MAX_ITEMS,
sleep_for_rate=False, min_rate_to_sleep=MIN_RATE_LIMIT,
sleep_time=SLEEP_TIME,
tag=None, archive=None):
origin = TWITTER_URL
if len(query) >= MAX_SEARCH_QUERY:
msg = "Search query length exceeded %s, max is %s" % (len(query), MAX_SEARCH_QUERY)
raise BackendError(cause=msg)
super().__init__(origin, tag=tag, archive=archive)
self.query = query
self.api_token = api_token
self.max_items = max_items
self.sleep_for_rate = sleep_for_rate
self.min_rate_to_sleep = min_rate_to_sleep
self.sleep_time = sleep_time
self.client = None
def fetch(self, category=CATEGORY_TWEET, since_id=None, max_id=None,
geocode=None, lang=None,
include_entities=True, tweets_type=TWEET_TYPE_MIXED):
"""Fetch the tweets from the server.
This method fetches tweets from the TwitterSearch API published in the last seven days.
:param category: the category of items to fetch
:param since_id: if not null, it returns results with an ID greater than the specified ID
:param max_id: when it is set or if not None, it returns results with an ID less than the specified ID
:param geocode: if enabled, returns tweets by users located at latitude,longitude,"mi"|"km"
:param lang: if enabled, restricts tweets to the given language, given by an ISO 639-1 code
:param include_entities: if disabled, it excludes entities node
:param tweets_type: type of tweets returned. Default is “mixed”, others are "recent" and "popular"
:returns: a generator of tweets
"""
kwargs = {"since_id": since_id,
"max_id": max_id,
"geocode": geocode,
"lang": lang,
"include_entities": include_entities,
"result_type": tweets_type}
items = super().fetch(category, **kwargs)
return items
def fetch_items(self, category, **kwargs):
"""Fetch the tweets
:param category: the category of items to fetch
:param kwargs: backend arguments
:returns: a generator of items
"""
since_id = kwargs['since_id']
max_id = kwargs['max_id']
geocode = kwargs['geocode']
lang = kwargs['lang']
entities = kwargs['include_entities']
tweets_type = kwargs['result_type']
logger.info("Fetching tweets %s from %s to %s",
self.query, str(since_id),
str(max_id) if max_id else '--')
tweets_ids = []
min_date = None
max_date = None
group_tweets = self.client.tweets(self.query, since_id=since_id, max_id=max_id, geocode=geocode,
lang=lang, include_entities=entities, result_type=tweets_type)
for tweets in group_tweets:
for i in range(len(tweets)):
tweet = tweets[i]
tweets_ids.append(tweet['id'])
if tweets[-1] == tweet:
min_date = str_to_datetime(tweets[-1]['created_at'])
if tweets[0] == tweet and not max_date:
max_date = str_to_datetime(tweets[0]['created_at'])
yield tweet
logger.info("Fetch process completed: %s (unique %s) tweets fetched, from %s to %s",
len(tweets_ids), len(list(set(tweets_ids))), min_date, max_date)
@classmethod
def has_archiving(cls):
"""Returns whether it supports archiving items on the fetch process.
:returns: this backend supports items archive
"""
return True
@classmethod
def has_resuming(cls):
"""Returns whether it supports to resume the fetch process.
:returns: this backend supports items resuming
"""
return False
@staticmethod
def metadata_id(item):
"""Extracts the identifier from a Twitter item."""
return str(item['id_str'])
@staticmethod
def metadata_updated_on(item):
"""Extracts and coverts the update time from a Twitter item.
The timestamp is extracted from 'created_at' field and converted
to a UNIX timestamp.
:param item: item generated by the backend
:returns: a UNIX timestamp
"""
ts = item['created_at']
ts = str_to_datetime(ts)
return ts.timestamp()
@staticmethod
def metadata_category(item):
"""Extracts the category from a Twitter item.
This backend only generates one type of item which is
'tweet'.
"""
return CATEGORY_TWEET
def _init_client(self, from_archive=False):
"""Init client"""
return TwitterClient(self.api_token, self.max_items,
self.sleep_for_rate, self.min_rate_to_sleep, self.sleep_time,
self.archive, from_archive)
class TwitterClient(HttpClient, RateLimitHandler):
"""Twitter API client.
Client for fetching information from the Twitter server
using its REST API v1.1.
:param api_key: key needed to use the API
:param max_items: maximum number of items per request
:param sleep_for_rate: sleep until rate limit is reset
:param min_rate_to_sleep: minimun rate needed to sleep until
it will be reset
:param sleep_time: time to sleep in case
of connection problems
:param archive: an archive to store/read fetched data
:param from_archive: it tells whether to write/read the archive
"""
def __init__(self, api_key, max_items=MAX_ITEMS,
sleep_for_rate=False, min_rate_to_sleep=MIN_RATE_LIMIT, sleep_time=SLEEP_TIME,
archive=None, from_archive=False):
self.api_key = api_key
self.max_items = max_items
super().__init__(TWITTER_API_URL, sleep_time=sleep_time, extra_status_forcelist=[429],
archive=archive, from_archive=from_archive)
super().setup_rate_limit_handler(sleep_for_rate=sleep_for_rate, min_rate_to_sleep=min_rate_to_sleep,
rate_limit_header=RATE_LIMIT_HEADER,
rate_limit_reset_header=RATE_LIMIT_RESET_HEADER)
def calculate_time_to_reset(self):
"""Number of seconds to wait. They are contained in the rate limit reset header"""
time_to_reset = self.rate_limit_reset_ts - (datetime_utcnow().replace(microsecond=0).timestamp() + 1)
time_to_reset = 0 if time_to_reset < 0 else time_to_reset
return time_to_reset
@staticmethod
def sanitize_for_archive(url, headers, payload):
"""Sanitize payload of a HTTP request by removing the token information
before storing/retrieving archived items
:param: url: HTTP url request
:param: headers: HTTP headers request
:param: payload: HTTP payload request
:returns url, headers and the sanitized payload
"""
if 'Authorization' in headers:
headers.pop('Authorization')
return url, headers, payload
def tweets(self, query, since_id=None, max_id=None, geocode=None, lang=None,
include_entities=True, result_type=TWEET_TYPE_MIXED):
"""Fetch tweets for a given query between since_id and max_id.
:param query: query to fetch tweets
:param since_id: if not null, it returns results with an ID greater than the specified ID
:param max_id: if not null, it returns results with an ID less than the specified ID
:param geocode: if enabled, returns tweets by users located at latitude,longitude,"mi"|"km"
:param lang: if enabled, restricts tweets to the given language, given by an ISO 639-1 code
:param include_entities: if disabled, it excludes entities node
:param result_type: type of tweets returned. Default is “mixed”, others are "recent" and "popular"
:returns: a generator of tweets
"""
resource = self.base_url
params = {'q': query,
'count': self.max_items}
if since_id:
params['since_id'] = since_id
if max_id:
params['max_id'] = max_id
if geocode:
params['geocode'] = geocode
if lang:
params['lang'] = lang
params['include_entities'] = include_entities
params['result_type'] = result_type
while True:
raw_tweets = self._fetch(resource, params=params)
tweets = json.loads(raw_tweets)
if not tweets['statuses']:
break
params['max_id'] = tweets['statuses'][-1]['id'] - 1
yield tweets['statuses']
def _fetch(self, url, params):
"""Fetch a resource.
Method to fetch and to iterate over the contents of a
type of resource. The method returns a generator of
pages for that resource and parameters.
:param url: the endpoint of the API
:param params: parameters to filter
:returns: the text of the response
"""
if not self.from_archive:
self.sleep_for_rate_limit()
headers = {'Authorization': 'Bearer ' + self.api_key}
r = self.fetch(url, payload=params, headers=headers)
if not self.from_archive:
self.update_rate_limit(r)
return r.text
class TwitterCommand(BackendCommand):
"""Class to run Twitter backend from the command line."""
BACKEND = Twitter
@staticmethod
def setup_cmd_parser():
"""Returns the Twitter argument parser."""
parser = BackendCommandArgumentParser(token_auth=True,
archive=True)
# Backend token is required
action = parser.parser._option_string_actions['--api-token']
action.required = True
# Meetup options
group = parser.parser.add_argument_group('Twitter arguments')
group.add_argument('--max-items', dest='max_items',
type=int, default=MAX_ITEMS,
help="Maximum number of items requested on the same query")
group.add_argument('--no-entities', dest='include_entities',
action='store_false',
help=" Exclude entities node")
group.add_argument('--geo-code', dest='geocode',
help="Select tweets by users located at latitude,longitude,radius")
group.add_argument('--lang', dest='lang',
help="Select tweets to the given language in ISO 639-1 code")
group.add_argument('--tweets-type', dest='tweets_type', default=TWEET_TYPE_MIXED,
help="Type of tweets returned. Default is 'mixed', others are 'recent' and 'popular'")
group.add_argument('--sleep-for-rate', dest='sleep_for_rate',
action='store_true',
help="sleep for getting more rate")
group.add_argument('--min-rate-to-sleep', dest='min_rate_to_sleep',
default=MIN_RATE_LIMIT, type=int,
help="sleep until reset when the rate limit reaches this value")
group.add_argument('--sleep-time', dest='sleep_time',
default=SLEEP_TIME, type=int,
help="minimun sleeping time to avoid too many request exception")
# Required arguments
parser.parser.add_argument('query',
help="Search query including operators, max 500 chars")
return parser
| valeriocos/perceval | perceval/backends/core/twitter.py | Python | gpl-3.0 | 14,309 | 0.002238 |
import os
import pydoc
import sys
class DocTree:
def __init__(self, src, dest):
self.basepath = os.getcwd()
sys.path.append(os.path.join(self.basepath, src))
self.src = src
self.dest = dest
self._make_dest(dest)
self._make_docs(src)
self._move_docs(dest)
def _make_dest(self, dest):
path = os.path.join(self.basepath, dest)
if os.path.isdir(path):
os.rmdir(path)
os.makedirs(path)
def _make_docs(self, src):
print('making htmls for ' + src)
pydoc.writedocs(src)
print(os.listdir())
def _move_docs(self, dest):
for f in os.listdir():
if f.endswith('.html'):
_dest = os.path.join(dest, f)
os.rename(f, _dest)
def main():
dest = 'docs'
src = 'vcx/api'
src = os.path.join(os.getcwd(), src)
DocTree(src, dest)
if __name__ == '__main__':
main()
| Artemkaaas/indy-sdk | vcx/wrappers/python3/generate_docs.py | Python | apache-2.0 | 952 | 0.003151 |
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from neutron_lib import constants
from oslo_config import cfg
import six
import testtools
from neutron.agent.common import config as a_cfg
from neutron.agent import firewall
from neutron.agent.linux import ipset_manager
from neutron.agent.linux import iptables_comments as ic
from neutron.agent.linux import iptables_firewall
from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron.conf.agent import securitygroups_rpc as security_config
from neutron.tests import base
from neutron.tests.unit.api.v2 import test_base
_uuid = test_base._uuid
#TODO(mangelajo): replace all 'IPv4', 'IPv6' to constants
FAKE_PREFIX = {'IPv4': '10.0.0.0/24',
'IPv6': 'fe80::/48'}
FAKE_IP = {'IPv4': '10.0.0.1',
'IPv6': 'fe80::1'}
#TODO(mangelajo): replace all '*_sgid' strings for the constants
FAKE_SGID = 'fake_sgid'
OTHER_SGID = 'other_sgid'
_IPv6 = constants.IPv6
_IPv4 = constants.IPv4
RAW_TABLE_OUTPUT = """
# Generated by iptables-save v1.4.21 on Fri Jul 31 16:13:28 2015
*raw
:PREROUTING ACCEPT [11561:3470468]
:OUTPUT ACCEPT [11504:4064044]
:neutron-openvswi-OUTPUT - [0:0]
:neutron-openvswi-PREROUTING - [0:0]
-A PREROUTING -j neutron-openvswi-PREROUTING
-A OUTPUT -j neutron-openvswi-OUTPUT
-A neutron-openvswi-PREROUTING -m physdev --physdev-in qvbe804433b-61 -j CT --zone 1
-A neutron-openvswi-PREROUTING -m physdev --physdev-in tape804433b-61 -j CT --zone 1
-A neutron-openvswi-PREROUTING -m physdev --physdev-in qvb95c24827-02 -j CT --zone 2
-A neutron-openvswi-PREROUTING -m physdev --physdev-in tap95c24827-02 -j CT --zone 2
-A neutron-openvswi-PREROUTING -m physdev --physdev-in qvb61634509-31 -j CT --zone 2
-A neutron-openvswi-PREROUTING -m physdev --physdev-in tap61634509-31 -j CT --zone 2
-A neutron-openvswi-PREROUTING -m physdev --physdev-in qvb8f46cf18-12 -j CT --zone 9
-A neutron-openvswi-PREROUTING -m physdev --physdev-in tap8f46cf18-12 -j CT --zone 9
COMMIT
# Completed on Fri Jul 31 16:13:28 2015
""" # noqa
class BaseIptablesFirewallTestCase(base.BaseTestCase):
def setUp(self):
super(BaseIptablesFirewallTestCase, self).setUp()
cfg.CONF.register_opts(a_cfg.ROOT_HELPER_OPTS, 'AGENT')
security_config.register_securitygroups_opts()
cfg.CONF.set_override('comment_iptables_rules', False, 'AGENT')
self.utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.iptables_cls_p = mock.patch(
'neutron.agent.linux.iptables_manager.IptablesManager')
iptables_cls = self.iptables_cls_p.start()
self.iptables_inst = mock.Mock()
self.v4filter_inst = mock.Mock()
self.v6filter_inst = mock.Mock()
self.iptables_inst.ipv4 = {'filter': self.v4filter_inst,
'raw': self.v4filter_inst
}
self.iptables_inst.ipv6 = {'filter': self.v6filter_inst,
'raw': self.v6filter_inst
}
iptables_cls.return_value = self.iptables_inst
self.iptables_inst.get_rules_for_table.return_value = (
RAW_TABLE_OUTPUT.splitlines())
self.firewall = iptables_firewall.IptablesFirewallDriver()
self.firewall.iptables = self.iptables_inst
class IptablesFirewallTestCase(BaseIptablesFirewallTestCase):
def _fake_port(self):
return {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff:ff:ff',
'network_id': 'fake_net',
'fixed_ips': [FAKE_IP['IPv4'],
FAKE_IP['IPv6']]}
def test_prepare_port_filter_with_no_sg(self):
port = self._fake_port()
self.firewall.prepare_port_filter(port)
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback', '-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 -m udp --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule(
'ofake_dev',
'-j $sg-fallback',
comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT')]
self.v4filter_inst.assert_has_calls(calls)
def test_filter_ipv4_ingress(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress'}
ingress = mock.call.add_rule('ifake_dev', '-j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev', '-s %s -j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p tcp -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule('ifake_dev',
'-s %s -p tcp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_icmp(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'icmp'}
ingress = mock.call.add_rule('ifake_dev', '-p icmp -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev', '-s %s -p icmp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev',
'-s %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p udp -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule('ifake_dev',
'-s %s -p udp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p udp -m udp --dport 10 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev',
'-s %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress'}
egress = mock.call.add_rule('ofake_dev', '-j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-s %s -j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp'}
egress = mock.call.add_rule(
'ofake_dev', '-p tcp -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
egress = mock.call.add_rule('ofake_dev',
'-s %s -p tcp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp'}
egress = mock.call.add_rule('ofake_dev', '-p icmp -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-s %s -p icmp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_type(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'source_port_range_min': 8,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p icmp -m icmp --icmp-type 8 -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_type_name(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'source_port_range_min': 'echo-request',
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p icmp -m icmp --icmp-type echo-request '
'-j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_type_code(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'source_port_range_min': 8,
'source_port_range_max': 0,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p icmp -m icmp --icmp-type 8/0 -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
egress = mock.call.add_rule('ofake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
egress = mock.call.add_rule(
'ofake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp'}
egress = mock.call.add_rule(
'ofake_dev', '-p udp -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'source_ip_prefix': prefix}
egress = mock.call.add_rule('ofake_dev',
'-s %s -p udp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
egress = mock.call.add_rule('ofake_dev',
'-p udp -m udp --dport 10 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
egress = mock.call.add_rule(
'ofake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress'}
ingress = mock.call.add_rule('ifake_dev', '-j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev', '-s %s -j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p tcp -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule('ifake_dev',
'-s %s -p tcp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_icmp(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'icmp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p ipv6-icmp -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev', '-s %s -p ipv6-icmp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def _test_filter_ingress_tcp_min_port_0(self, ethertype):
rule = {'ethertype': ethertype,
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 0,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p tcp -m tcp -m multiport --dports 0:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ingress_tcp_min_port_0_for_ipv4(self):
self._test_filter_ingress_tcp_min_port_0('IPv4')
def test_filter_ingress_tcp_min_port_0_for_ipv6(self):
self._test_filter_ingress_tcp_min_port_0('IPv6')
def test_filter_ipv6_ingress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev',
'-s %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p udp -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule('ifake_dev',
'-s %s -p udp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p udp -m udp --dport 10 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev',
'-s %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress'}
egress = mock.call.add_rule('ofake_dev', '-j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-s %s -j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp'}
egress = mock.call.add_rule(
'ofake_dev', '-p tcp -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
egress = mock.call.add_rule('ofake_dev',
'-s %s -p tcp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp'}
egress = mock.call.add_rule(
'ofake_dev', '-p ipv6-icmp -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-s %s -p ipv6-icmp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_type(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'source_port_range_min': 8,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p ipv6-icmp -m icmp6 --icmpv6-type 8 -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_type_name(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'source_port_range_min': 'echo-request',
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p ipv6-icmp -m icmp6 --icmpv6-type echo-request '
'-j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_type_code(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'source_port_range_min': 8,
'source_port_range_max': 0,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p ipv6-icmp -m icmp6 --icmpv6-type 8/0 -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
egress = mock.call.add_rule('ofake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
egress = mock.call.add_rule(
'ofake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp'}
egress = mock.call.add_rule(
'ofake_dev', '-p udp -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'source_ip_prefix': prefix}
egress = mock.call.add_rule('ofake_dev',
'-s %s -p udp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
egress = mock.call.add_rule('ofake_dev',
'-p udp -m udp --dport 10 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
egress = mock.call.add_rule(
'ofake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def _test_prepare_port_filter(self,
rule,
ingress_expected_call=None,
egress_expected_call=None):
port = self._fake_port()
ethertype = rule['ethertype']
prefix = utils.ip_to_cidr(FAKE_IP[ethertype])
filter_inst = self.v4filter_inst
dhcp_rule = [mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
comment=None)]
if ethertype == 'IPv6':
filter_inst = self.v6filter_inst
dhcp_rule = [mock.call.add_rule('ofake_dev',
'-s ::/128 -d ff02::/16 '
'-p ipv6-icmp -m icmp6 '
'--icmpv6-type %s -j RETURN' %
icmp6_type,
comment=None) for icmp6_type
in constants.ICMPV6_ALLOWED_UNSPEC_ADDR_TYPES]
sg = [rule]
port['security_group_rules'] = sg
self.firewall.prepare_port_filter(port)
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback',
'-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $ifake_dev',
comment=ic.SG_TO_VM_SG)
]
if ethertype == 'IPv6':
for icmp6_type in firewall.ICMPV6_ALLOWED_TYPES:
calls.append(
mock.call.add_rule('ifake_dev',
'-p ipv6-icmp -m icmp6 --icmpv6-type '
'%s -j RETURN' %
icmp6_type, comment=None))
calls += [
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None
)
]
if ingress_expected_call:
calls.append(ingress_expected_call)
calls += [mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule('ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s %s -m mac --mac-source FF:FF:FF:FF:FF:FF -j RETURN'
% prefix,
comment=ic.PAIR_ALLOW)]
if ethertype == 'IPv6':
calls.append(mock.call.add_rule('sfake_dev',
'-s fe80::fdff:ffff:feff:ffff/128 -m mac '
'--mac-source FF:FF:FF:FF:FF:FF -j RETURN',
comment=ic.PAIR_ALLOW))
calls.append(mock.call.add_rule('sfake_dev', '-j DROP',
comment=ic.PAIR_DROP))
calls += dhcp_rule
calls.append(mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None))
if ethertype == 'IPv4':
calls.append(mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None))
calls.append(mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 -m udp --dport 68 -j DROP',
comment=None))
if ethertype == 'IPv6':
calls.append(mock.call.add_rule('ofake_dev',
'-p ipv6-icmp -m icmp6 '
'--icmpv6-type %s -j DROP' %
constants.ICMPV6_TYPE_RA,
comment=None))
calls.append(mock.call.add_rule('ofake_dev',
'-p ipv6-icmp -j RETURN',
comment=None))
calls.append(mock.call.add_rule('ofake_dev', '-p udp -m udp '
'--sport 546 -m udp --dport 547 '
'-j RETURN', comment=None))
calls.append(mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 547 -m udp --dport 546 -j DROP',
comment=None))
calls += [
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
]
if egress_expected_call:
calls.append(egress_expected_call)
calls += [mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule('ofake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT')]
comb = zip(calls, filter_inst.mock_calls)
for (l, r) in comb:
self.assertEqual(l, r)
filter_inst.assert_has_calls(calls)
def _test_remove_conntrack_entries(self, ethertype, protocol,
direction):
port = self._fake_port()
port['security_groups'] = 'fake_sg_id'
self.firewall.filtered_ports[port['device']] = port
self.firewall.updated_rule_sg_ids = set(['fake_sg_id'])
self.firewall.sg_rules['fake_sg_id'] = [
{'direction': direction, 'ethertype': ethertype,
'protocol': protocol}]
self.firewall.filter_defer_apply_on()
self.firewall.sg_rules['fake_sg_id'] = []
self.firewall.filter_defer_apply_off()
cmd = ['conntrack', '-D']
if protocol:
cmd.extend(['-p', protocol])
if ethertype == 'IPv4':
cmd.extend(['-f', 'ipv4'])
if direction == 'ingress':
cmd.extend(['-d', '10.0.0.1'])
else:
cmd.extend(['-s', '10.0.0.1'])
else:
cmd.extend(['-f', 'ipv6'])
if direction == 'ingress':
cmd.extend(['-d', 'fe80::1'])
else:
cmd.extend(['-s', 'fe80::1'])
# initial data has 1, 2, and 9 in use, CT zone will start at 10.
cmd.extend(['-w', 10])
calls = [
mock.call(cmd, run_as_root=True, check_exit_code=True,
extra_ok_codes=[1])]
self.utils_exec.assert_has_calls(calls)
def test_remove_conntrack_entries_for_delete_rule_ipv4(self):
for direction in ['ingress', 'egress']:
for pro in [None, 'tcp', 'icmp', 'udp']:
self._test_remove_conntrack_entries(
'IPv4', pro, direction)
def test_remove_conntrack_entries_for_delete_rule_ipv6(self):
for direction in ['ingress', 'egress']:
for pro in [None, 'tcp', 'icmp', 'udp']:
self._test_remove_conntrack_entries(
'IPv6', pro, direction)
def test_remove_conntrack_entries_for_port_sec_group_change(self):
port = self._fake_port()
port['security_groups'] = ['fake_sg_id']
self.firewall.filtered_ports[port['device']] = port
self.firewall.updated_sg_members = set(['tapfake_dev'])
self.firewall.filter_defer_apply_on()
new_port = copy.deepcopy(port)
new_port['security_groups'] = ['fake_sg_id2']
self.firewall.filtered_ports[port['device']] = new_port
self.firewall.filter_defer_apply_off()
calls = [
# initial data has 1, 2, and 9 in use, CT zone will start at 10.
mock.call(['conntrack', '-D', '-f', 'ipv4', '-d', '10.0.0.1',
'-w', 10],
run_as_root=True, check_exit_code=True,
extra_ok_codes=[1]),
mock.call(['conntrack', '-D', '-f', 'ipv4', '-s', '10.0.0.1',
'-w', 10],
run_as_root=True, check_exit_code=True,
extra_ok_codes=[1]),
mock.call(['conntrack', '-D', '-f', 'ipv6', '-d', 'fe80::1',
'-w', 10],
run_as_root=True, check_exit_code=True,
extra_ok_codes=[1]),
mock.call(['conntrack', '-D', '-f', 'ipv6', '-s', 'fe80::1',
'-w', 10],
run_as_root=True, check_exit_code=True,
extra_ok_codes=[1])]
self.utils_exec.assert_has_calls(calls)
def test_remove_conntrack_entries_for_sg_member_changed_ipv4(self):
for direction in ['ingress', 'egress']:
for protocol in [None, 'tcp', 'icmp', 'udp']:
self._test_remove_conntrack_entries_sg_member_changed(
'IPv4', protocol, direction)
def test_remove_conntrack_entries_for_sg_member_changed_ipv6(self):
for direction in ['ingress', 'egress']:
for protocol in [None, 'tcp', 'icmp', 'udp']:
self._test_remove_conntrack_entries_sg_member_changed(
'IPv6', protocol, direction)
def _test_remove_conntrack_entries_sg_member_changed(self, ethertype,
protocol, direction):
port = self._fake_port()
port['security_groups'] = ['fake_sg_id']
self.firewall.sg_rules.setdefault('fake_sg_id', [])
self.firewall.sg_rules['fake_sg_id'].append(
{'direction': direction, 'remote_group_id': 'fake_sg_id2',
'ethertype': ethertype})
self.firewall.filter_defer_apply_on()
self.firewall.devices_with_updated_sg_members['fake_sg_id2'] = [port]
if ethertype == "IPv4":
self.firewall.pre_sg_members = {'fake_sg_id2': {
'IPv4': ['10.0.0.2', '10.0.0.3']}}
self.firewall.sg_members = {'fake_sg_id2': {
'IPv4': ['10.0.0.3']}}
ethertype = "ipv4"
else:
self.firewall.pre_sg_members = {'fake_sg_id2': {
'IPv6': ['fe80::2', 'fe80::3']}}
self.firewall.sg_members = {'fake_sg_id2': {
'IPv6': ['fe80::3']}}
ethertype = "ipv6"
self.firewall.filter_defer_apply_off()
direction = '-d' if direction == 'ingress' else '-s'
remote_ip_direction = '-s' if direction == '-d' else '-d'
ips = {"ipv4": ['10.0.0.1', '10.0.0.2'],
"ipv6": ['fe80::1', 'fe80::2']}
calls = [
# initial data has 1, 2, and 9 in use, CT zone will start
# at 10.
mock.call(['conntrack', '-D', '-f', ethertype, direction,
ips[ethertype][0], '-w', 10,
remote_ip_direction, ips[ethertype][1]],
run_as_root=True, check_exit_code=True,
extra_ok_codes=[1])]
self.utils_exec.assert_has_calls(calls)
def test_user_sg_rules_deduped_before_call_to_iptables_manager(self):
port = self._fake_port()
port['security_group_rules'] = [{'ethertype': 'IPv4',
'direction': 'ingress'}] * 2
self.firewall.prepare_port_filter(port)
rules = [''.join(c[1]) for c in self.v4filter_inst.add_rule.mock_calls]
self.assertEqual(len(set(rules)), len(rules))
def test_update_delete_port_filter(self):
port = self._fake_port()
port['security_group_rules'] = [{'ethertype': 'IPv4',
'direction': 'ingress'}]
self.firewall.prepare_port_filter(port)
port['security_group_rules'] = [{'ethertype': 'IPv4',
'direction': 'egress'}]
self.firewall.update_port_filter(port)
self.firewall.update_port_filter({'device': 'no-exist-device'})
self.firewall.remove_port_filter(port)
self.firewall.remove_port_filter({'device': 'no-exist-device'})
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback',
'-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule(
'FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
comment=ic.VM_INT_SG),
mock.call.add_rule(
'sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule('ifake_dev', '-j RETURN',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule(
'ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule(
'FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
comment=ic.VM_INT_SG),
mock.call.add_rule(
'sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 -m udp --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev', '-m state --state INVALID -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT'),
mock.call.remove_chain('ifake_dev'),
mock.call.remove_chain('ofake_dev'),
mock.call.remove_chain('sfake_dev'),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule(
'FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
comment=ic.VM_INT_SG),
mock.call.add_rule(
'sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule(
'ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule(
'FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
comment=ic.VM_INT_SG),
mock.call.add_rule(
'sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 -m udp --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule('ofake_dev',
'-j $sg-fallback',
comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT'),
mock.call.remove_chain('ifake_dev'),
mock.call.remove_chain('ofake_dev'),
mock.call.remove_chain('sfake_dev'),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain')]
self.v4filter_inst.assert_has_calls(calls)
def test_remove_unknown_port(self):
port = self._fake_port()
self.firewall.remove_port_filter(port)
# checking no exception occurs
self.assertFalse(self.v4filter_inst.called)
def test_defer_apply(self):
with self.firewall.defer_apply():
pass
self.iptables_inst.assert_has_calls([mock.call.defer_apply_on(),
mock.call.defer_apply_off()])
def test_filter_defer_with_exception(self):
try:
with self.firewall.defer_apply():
raise Exception("same exception")
except Exception:
pass
self.iptables_inst.assert_has_calls([mock.call.defer_apply_on(),
mock.call.defer_apply_off()])
def _mock_chain_applies(self):
class CopyingMock(mock.MagicMock):
"""Copies arguments so mutable arguments can be asserted on.
Copied verbatim from unittest.mock documentation.
"""
def __call__(self, *args, **kwargs):
args = copy.deepcopy(args)
kwargs = copy.deepcopy(kwargs)
return super(CopyingMock, self).__call__(*args, **kwargs)
# Need to use CopyingMock because _{setup,remove}_chains_apply are
# usually called with that's modified between calls (i.e.,
# self.firewall.filtered_ports).
chain_applies = CopyingMock()
self.firewall._setup_chains_apply = chain_applies.setup
self.firewall._remove_chains_apply = chain_applies.remove
return chain_applies
def test_mock_chain_applies(self):
chain_applies = self._mock_chain_applies()
port_prepare = {'device': 'd1', 'mac_address': 'prepare'}
port_update = {'device': 'd1', 'mac_address': 'update'}
self.firewall.prepare_port_filter(port_prepare)
self.firewall.update_port_filter(port_update)
self.firewall.remove_port_filter(port_update)
chain_applies.assert_has_calls([mock.call.remove({}, {}),
mock.call.setup({'d1': port_prepare}, {}),
mock.call.remove({'d1': port_prepare}, {}),
mock.call.setup({'d1': port_update}, {}),
mock.call.remove({'d1': port_update}, {}),
mock.call.setup({}, {})])
def test_defer_chain_apply_need_pre_defer_copy(self):
chain_applies = self._mock_chain_applies()
port = self._fake_port()
device2port = {port['device']: port}
self.firewall.prepare_port_filter(port)
with self.firewall.defer_apply():
self.firewall.remove_port_filter(port)
chain_applies.assert_has_calls([mock.call.remove({}, {}),
mock.call.setup(device2port, {}),
mock.call.remove(device2port, {}),
mock.call.setup({}, {})])
def test_defer_chain_apply_coalesce_simple(self):
chain_applies = self._mock_chain_applies()
port = self._fake_port()
with self.firewall.defer_apply():
self.firewall.prepare_port_filter(port)
self.firewall.update_port_filter(port)
self.firewall.remove_port_filter(port)
chain_applies.assert_has_calls([mock.call.remove({}, {}),
mock.call.setup({}, {})])
def test_defer_chain_apply_coalesce_multiple_ports(self):
chain_applies = self._mock_chain_applies()
port1 = {'device': 'd1', 'mac_address': 'mac1', 'network_id': 'net1'}
port2 = {'device': 'd2', 'mac_address': 'mac2', 'network_id': 'net1'}
device2port = {'d1': port1, 'd2': port2}
with self.firewall.defer_apply():
self.firewall.prepare_port_filter(port1)
self.firewall.prepare_port_filter(port2)
chain_applies.assert_has_calls([mock.call.remove({}, {}),
mock.call.setup(device2port, {})])
def test_ip_spoofing_filter_with_multiple_ips(self):
port = {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff:ff:ff',
'network_id': 'fake_net',
'fixed_ips': ['10.0.0.1', 'fe80::1', '10.0.0.2']}
self.firewall.prepare_port_filter(port)
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback', '-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule('ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.2/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 -m udp --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule('ofake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT')]
self.v4filter_inst.assert_has_calls(calls)
def test_ip_spoofing_no_fixed_ips(self):
port = {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff:ff:ff',
'network_id': 'fake_net',
'fixed_ips': []}
self.firewall.prepare_port_filter(port)
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback', '-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule('ifake_dev', '-j $sg-fallback',
comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-m mac --mac-source FF:FF:FF:FF:FF:FF -j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 -m udp --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sg-fallback',
comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT')]
self.v4filter_inst.assert_has_calls(calls)
class IptablesFirewallEnhancedIpsetTestCase(BaseIptablesFirewallTestCase):
def setUp(self):
super(IptablesFirewallEnhancedIpsetTestCase, self).setUp()
self.firewall.ipset = mock.Mock()
self.firewall.ipset.get_name.side_effect = (
ipset_manager.IpsetManager.get_name)
self.firewall.ipset.set_name_exists.return_value = True
def _fake_port(self, sg_id=FAKE_SGID):
return {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff:ff:ff',
'network_id': 'fake_net',
'fixed_ips': [FAKE_IP['IPv4'],
FAKE_IP['IPv6']],
'security_groups': [sg_id],
'security_group_source_groups': [sg_id]}
def _fake_sg_rule_for_ethertype(self, ethertype, remote_group):
return {'direction': 'ingress', 'remote_group_id': remote_group,
'ethertype': ethertype}
def _fake_sg_rules(self, sg_id=FAKE_SGID, remote_groups=None):
remote_groups = remote_groups or {_IPv4: [FAKE_SGID],
_IPv6: [FAKE_SGID]}
rules = []
for ip_version, remote_group_list in six.iteritems(remote_groups):
for remote_group in remote_group_list:
rules.append(self._fake_sg_rule_for_ethertype(ip_version,
remote_group))
return {sg_id: rules}
def _fake_sg_members(self, sg_ids=None):
return {sg_id: copy.copy(FAKE_IP) for sg_id in (sg_ids or [FAKE_SGID])}
def test_update_security_group_members(self):
sg_members = {'IPv4': ['10.0.0.1', '10.0.0.2'], 'IPv6': ['fe80::1']}
self.firewall.update_security_group_members('fake_sgid', sg_members)
calls = [
mock.call.set_members('fake_sgid', 'IPv4',
['10.0.0.1', '10.0.0.2']),
mock.call.set_members('fake_sgid', 'IPv6',
['fe80::1'])
]
self.firewall.ipset.assert_has_calls(calls, any_order=True)
def _setup_fake_firewall_members_and_rules(self, firewall):
firewall.sg_rules = self._fake_sg_rules()
firewall.pre_sg_rules = self._fake_sg_rules()
firewall.sg_members = self._fake_sg_members()
firewall.pre_sg_members = firewall.sg_members
def _prepare_rules_and_members_for_removal(self):
self._setup_fake_firewall_members_and_rules(self.firewall)
self.firewall.pre_sg_members[OTHER_SGID] = (
self.firewall.pre_sg_members[FAKE_SGID])
def test_determine_remote_sgs_to_remove(self):
self._prepare_rules_and_members_for_removal()
ports = [self._fake_port()]
self.assertEqual(
{_IPv4: set([OTHER_SGID]), _IPv6: set([OTHER_SGID])},
self.firewall._determine_remote_sgs_to_remove(ports))
def test_determine_remote_sgs_to_remove_ipv6_unreferenced(self):
self._prepare_rules_and_members_for_removal()
ports = [self._fake_port()]
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [OTHER_SGID, FAKE_SGID],
_IPv6: [FAKE_SGID]})
self.assertEqual(
{_IPv4: set(), _IPv6: set([OTHER_SGID])},
self.firewall._determine_remote_sgs_to_remove(ports))
def test_get_remote_sg_ids_by_ipversion(self):
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [FAKE_SGID], _IPv6: [OTHER_SGID]})
ports = [self._fake_port()]
self.assertEqual(
{_IPv4: set([FAKE_SGID]), _IPv6: set([OTHER_SGID])},
self.firewall._get_remote_sg_ids_sets_by_ipversion(ports))
def test_get_remote_sg_ids(self):
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [FAKE_SGID, FAKE_SGID, FAKE_SGID],
_IPv6: [OTHER_SGID, OTHER_SGID, OTHER_SGID]})
port = self._fake_port()
self.assertEqual(
{_IPv4: set([FAKE_SGID]), _IPv6: set([OTHER_SGID])},
self.firewall._get_remote_sg_ids(port))
def test_determine_sg_rules_to_remove(self):
self.firewall.pre_sg_rules = self._fake_sg_rules(sg_id=OTHER_SGID)
ports = [self._fake_port()]
self.assertEqual(set([OTHER_SGID]),
self.firewall._determine_sg_rules_to_remove(ports))
def test_get_sg_ids_set_for_ports(self):
sg_ids = set([FAKE_SGID, OTHER_SGID])
ports = [self._fake_port(sg_id) for sg_id in sg_ids]
self.assertEqual(sg_ids,
self.firewall._get_sg_ids_set_for_ports(ports))
def test_remove_sg_members(self):
self.firewall.sg_members = self._fake_sg_members([FAKE_SGID,
OTHER_SGID])
remote_sgs_to_remove = {_IPv4: set([FAKE_SGID]),
_IPv6: set([FAKE_SGID, OTHER_SGID])}
self.firewall._remove_sg_members(remote_sgs_to_remove)
self.assertIn(OTHER_SGID, self.firewall.sg_members)
self.assertNotIn(FAKE_SGID, self.firewall.sg_members)
def test_remove_unused_security_group_info_clears_unused_rules(self):
self._setup_fake_firewall_members_and_rules(self.firewall)
self.firewall.prepare_port_filter(self._fake_port())
# create another SG which won't be referenced by any filtered port
fake_sg_rules = self.firewall.sg_rules['fake_sgid']
self.firewall.pre_sg_rules[OTHER_SGID] = fake_sg_rules
self.firewall.sg_rules[OTHER_SGID] = fake_sg_rules
# call the cleanup function, and check the unused sg_rules are out
self.firewall._remove_unused_security_group_info()
self.assertNotIn(OTHER_SGID, self.firewall.sg_rules)
def test_remove_unused_security_group_info(self):
self.firewall.sg_members = {OTHER_SGID: {_IPv4: [], _IPv6: []}}
self.firewall.pre_sg_members = self.firewall.sg_members
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [FAKE_SGID], _IPv6: [FAKE_SGID]})
self.firewall.pre_sg_rules = self.firewall.sg_rules
port = self._fake_port()
self.firewall.filtered_ports['tapfake_dev'] = port
self.firewall._remove_unused_security_group_info()
self.assertNotIn(OTHER_SGID, self.firewall.sg_members)
def test_not_remove_used_security_group_info(self):
self.firewall.sg_members = {OTHER_SGID: {_IPv4: [], _IPv6: []}}
self.firewall.pre_sg_members = self.firewall.sg_members
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [OTHER_SGID], _IPv6: [OTHER_SGID]})
self.firewall.pre_sg_rules = self.firewall.sg_rules
port = self._fake_port()
self.firewall.filtered_ports['tapfake_dev'] = port
self.firewall._remove_unused_security_group_info()
self.assertIn(OTHER_SGID, self.firewall.sg_members)
def test_remove_all_unused_info(self):
self._setup_fake_firewall_members_and_rules(self.firewall)
self.firewall.filtered_ports = {}
self.firewall._remove_unused_security_group_info()
self.assertFalse(self.firewall.sg_members)
self.assertFalse(self.firewall.sg_rules)
def test_single_fallback_accept_rule(self):
p1, p2 = self._fake_port(), self._fake_port()
self.firewall._setup_chains_apply(dict(p1=p1, p2=p2), {})
v4_adds = self.firewall.iptables.ipv4['filter'].add_rule.mock_calls
v6_adds = self.firewall.iptables.ipv6['filter'].add_rule.mock_calls
sg_chain_v4_accept = [call for call in v4_adds
if call == mock.call('sg-chain', '-j ACCEPT')]
sg_chain_v6_accept = [call for call in v6_adds
if call == mock.call('sg-chain', '-j ACCEPT')]
self.assertEqual(1, len(sg_chain_v4_accept))
self.assertEqual(1, len(sg_chain_v6_accept))
def test_remove_port_filter_with_destroy_ipset_chain(self):
self.firewall.sg_rules = self._fake_sg_rules()
port = self._fake_port()
self.firewall.pre_sg_members = {'fake_sgid': {
'IPv4': [],
'IPv6': []}}
sg_members = {'IPv4': ['10.0.0.1'], 'IPv6': ['fe80::1']}
self.firewall.update_security_group_members('fake_sgid', sg_members)
self.firewall.prepare_port_filter(port)
self.firewall.filter_defer_apply_on()
self.firewall.sg_members = {'fake_sgid': {
'IPv4': [],
'IPv6': []}}
self.firewall.pre_sg_members = {'fake_sgid': {
'IPv4': ['10.0.0.1'],
'IPv6': ['fe80::1']}}
self.firewall.remove_port_filter(port)
self.firewall.filter_defer_apply_off()
calls = [
mock.call.set_members('fake_sgid', 'IPv4', ['10.0.0.1']),
mock.call.set_members('fake_sgid', 'IPv6', ['fe80::1']),
mock.call.get_name('fake_sgid', 'IPv4'),
mock.call.set_name_exists('NIPv4fake_sgid'),
mock.call.get_name('fake_sgid', 'IPv6'),
mock.call.set_name_exists('NIPv6fake_sgid'),
mock.call.destroy('fake_sgid', 'IPv4'),
mock.call.destroy('fake_sgid', 'IPv6')]
self.firewall.ipset.assert_has_calls(calls, any_order=True)
def test_filter_defer_apply_off_with_sg_only_ipv6_rule(self):
self.firewall.sg_rules = self._fake_sg_rules()
self.firewall.pre_sg_rules = self._fake_sg_rules()
self.firewall.ipset_chains = {'IPv4fake_sgid': ['10.0.0.2'],
'IPv6fake_sgid': ['fe80::1']}
self.firewall.sg_members = {'fake_sgid': {
'IPv4': ['10.0.0.2'],
'IPv6': ['fe80::1']}}
self.firewall.pre_sg_members = {'fake_sgid': {
'IPv4': ['10.0.0.2'],
'IPv6': ['fe80::1']}}
self.firewall.sg_rules['fake_sgid'].remove(
{'direction': 'ingress', 'remote_group_id': 'fake_sgid',
'ethertype': 'IPv4'})
self.firewall.sg_rules.update()
self.firewall._defer_apply = True
port = self._fake_port()
self.firewall.filtered_ports['tapfake_dev'] = port
self.firewall._pre_defer_filtered_ports = {}
self.firewall._pre_defer_unfiltered_ports = {}
self.firewall.filter_defer_apply_off()
calls = [mock.call.destroy('fake_sgid', 'IPv4')]
self.firewall.ipset.assert_has_calls(calls, True)
def test_sg_rule_expansion_with_remote_ips(self):
other_ips = ['10.0.0.2', '10.0.0.3', '10.0.0.4']
self.firewall.sg_members = {'fake_sgid': {
'IPv4': [FAKE_IP['IPv4']] + other_ips,
'IPv6': [FAKE_IP['IPv6']]}}
port = self._fake_port()
rule = self._fake_sg_rule_for_ethertype(_IPv4, FAKE_SGID)
rules = self.firewall._expand_sg_rule_with_remote_ips(
rule, port, 'ingress')
self.assertEqual(list(rules),
[dict(list(rule.items()) +
[('source_ip_prefix', '%s/32' % ip)])
for ip in other_ips])
def test_build_ipv4v6_mac_ip_list(self):
mac_oth = 'ffff-ff0f-ffff'
mac_unix = 'FF:FF:FF:0F:FF:FF'
ipv4 = FAKE_IP['IPv4']
ipv6 = FAKE_IP['IPv6']
fake_ipv4_pair = []
fake_ipv4_pair.append((mac_unix, ipv4))
fake_ipv6_pair = []
fake_ipv6_pair.append((mac_unix, ipv6))
fake_ipv6_pair.append((mac_unix, 'fe80::fdff:ffff:fe0f:ffff'))
mac_ipv4_pairs = []
mac_ipv6_pairs = []
self.firewall._build_ipv4v6_mac_ip_list(mac_oth, ipv4,
mac_ipv4_pairs, mac_ipv6_pairs)
self.assertEqual(fake_ipv4_pair, mac_ipv4_pairs)
self.firewall._build_ipv4v6_mac_ip_list(mac_oth, ipv6,
mac_ipv4_pairs, mac_ipv6_pairs)
self.assertEqual(fake_ipv6_pair, mac_ipv6_pairs)
class OVSHybridIptablesFirewallTestCase(BaseIptablesFirewallTestCase):
def setUp(self):
super(OVSHybridIptablesFirewallTestCase, self).setUp()
self.firewall = iptables_firewall.OVSHybridIptablesFirewallDriver()
# initial data has 1, 2, and 9 in use, see RAW_TABLE_OUTPUT above.
self._dev_zone_map = {'61634509-31': 2, '8f46cf18-12': 9,
'95c24827-02': 2, 'e804433b-61': 1}
def test__populate_initial_zone_map(self):
self.assertEqual(self._dev_zone_map, self.firewall._device_zone_map)
def test__generate_device_zone(self):
# initial data has 1, 2, and 9 in use.
# we fill from top up first.
self.assertEqual(10, self.firewall._generate_device_zone('test'))
# once it's maxed out, it scans for gaps
self.firewall._device_zone_map['someport'] = (
iptables_firewall.MAX_CONNTRACK_ZONES)
for i in range(3, 9):
self.assertEqual(i, self.firewall._generate_device_zone(i))
# 9 and 10 are taken so next should be 11
self.assertEqual(11, self.firewall._generate_device_zone('p11'))
# take out zone 1 and make sure it's selected
self.firewall._device_zone_map.pop('e804433b-61')
self.assertEqual(1, self.firewall._generate_device_zone('p1'))
# fill it up and then make sure an extra throws an error
for i in range(1, 65536):
self.firewall._device_zone_map['dev-%s' % i] = i
with testtools.ExpectedException(n_exc.CTZoneExhaustedError):
self.firewall._find_open_zone()
# with it full, try again, this should trigger a cleanup and return 1
self.assertEqual(1, self.firewall._generate_device_zone('p12'))
self.assertEqual({'p12': 1}, self.firewall._device_zone_map)
def test_get_device_zone(self):
# initial data has 1, 2, and 9 in use.
self.assertEqual(10,
self.firewall.get_device_zone('12345678901234567'))
# should have been truncated to 11 chars
self._dev_zone_map.update({'12345678901': 10})
self.assertEqual(self._dev_zone_map, self.firewall._device_zone_map)
| igor-toga/local-snat | neutron/tests/unit/agent/linux/test_iptables_firewall.py | Python | apache-2.0 | 86,232 | 0.000116 |
def add_without_op(x, y):
while y !=0:
carry = x & y
x = x ^ y
y = carry << 1
print(x)
def main():
x, y = map(int, input().split())
add_without_op(x, y)
if __name__ == "__main__":
main() | libchaos/algorithm-python | bit/add_with_op.py | Python | mit | 233 | 0.017167 |
#!/usr/bin/env python
from __future__ import print_function
import os, platform
from argparse import ArgumentParser
import numpy as np
import time
import resource
from mayavi import mlab
from netCDF4 import Dataset
from mpl_toolkits.basemap import Basemap
from numba import jit
@jit
def add_triangles_from_square(x, x1, x2, x3, x4, k):
'''
inserts values of kth and k+1th triangles into array x in place
from face values x1, x2, x3, x4
'''
k1 = k + 1
x[k,0], x[k,1], x[k,2] = x1, x2, x3
x[k1,0], x[k1,1], x[k1,2] = x2, x3, x4
@jit
def get_triangles_j(j, nx, k, t_land, ds_max, lambda_f, phi_f, dep,
x, y, z, t):
'''
inserts coordinates of & col shading for triangles around ij points on j-line:
-- horz face centred on T points at dep[j,i]
+ surrounding vertical faces (only count where neighvouring point is shallower
to avoid double counting)
into max_trianglesx3 arrays x, y, z, t,
starting with the horz face at
x[k,1..3], y[k,1..3], z[k,1..3] and t[k,1..3]
On land points, with dep[j,i]==0 t[k,1..3] set to t_land
'''
jm1, jp1 = j-1, j+1
xx01, xx11, yy01, yy11 = lambda_f[jm1,0], lambda_f[j,0], phi_f[jm1,0],phi_f[j,0]
for i in range(1, nx-1):
im1, ip1 = i-1, i+1
xx00, xx10, yy00, yy10 = xx01, xx11, yy01, yy11
xx01, xx11, yy01, yy11 = lambda_f[jm1,i], lambda_f[j,i], phi_f[jm1,i],phi_f[j,i]
if abs(xx01 - xx00) + abs(yy01 - yy00) + abs(xx11 - xx10) + abs(yy11 - yy10) > ds_max:
continue
# x & y coordinates of f-points surrounding T-point i,j
# 00 = SW, 10 = NW, 01 = SE, 11 = NE
# do horizontal faces of T-box, zig-zag points SW, NW, SE, NE
# insert x & y for 2-triangles (kth & k+1th)
add_triangles_from_square(x, xx00, xx10, xx01, xx11, k)
add_triangles_from_square(y, yy00, yy10, yy01, yy11, k)
# .. constant z
dep00 = dep[j,i]
add_triangles_from_square(z, dep00, dep00, dep00, dep00, k)
# color depends on z
if dep00 == 0.:
add_triangles_from_square(t, t_land, t_land, t_land, t_land, k)
else:
add_triangles_from_square(t, dep00, dep00, dep00, dep00, k)
# & increment k by 2
k += 2
# do vertical faces surrounding T-box
for di, dj in ((1,0),(-1,0),(0,1),(0,-1)):
dep01 = dep[j+dj, i+di]
if dep01 > dep00:
# vertical face zig-zag z points:
add_triangles_from_square(z, dep00, dep01, dep00, dep01, k)
# color is shaded
add_triangles_from_square(t, dep00, dep01, dep00, dep01, k)
if di==-1:
# face at u-points, constant i
add_triangles_from_square(x, xx00, xx00, xx10, xx10, k)
add_triangles_from_square(y, yy00, yy00, yy10, yy10, k)
elif di==1:
add_triangles_from_square(x, xx01, xx01, xx11, xx11, k)
add_triangles_from_square(y, yy01, yy01, yy11, yy11, k)
elif dj ==-1:
# face at v-points, constant j
add_triangles_from_square(y, yy00, yy00, yy01, yy01, k)
add_triangles_from_square(x, xx00, xx00, xx01, xx01, k)
elif dj ==1:
add_triangles_from_square(y, yy10, yy10, yy11, yy11, k)
add_triangles_from_square(x, xx10, xx10, xx11, xx11, k)
k += 2
return k
def get_triangles(dep, lambda_f, phi_f, t_land, ds_max):
'''
takes 2-D array of depths dep, assumed to be positioned at j & i values
Creates mesh of triangles covering lego-block topography consistent with dep
Outputs four ntriangles x 3 arrays x, y, z, t where
x(k,1..3), y(k,1..3), z(k,1..3) and t(k,1..3) are the x, y, z and color values for the kth triangle
'''
# arrays in C-order so last index is x
ny,nx = dep.shape
# max_no_triangles is maximum number of triangles ....
# (ny-2)*(nx-2) is npts with all 4 sides available
# factor of 3 for top and 2 sides; factor of 2 since 2 triangles in each face
# add 2*(ny-2+nx-2) since edge interfaces not accounted for
max_no_triangles = (ny-2)*(nx-2)*3*2 + 2*(ny-2+nx-2)
# can iterate through 1st '0th' index of array, to give 4 2d arrays max_triangles x 3
x, y, z, t = np.zeros((4, max_no_triangles, 3), dtype=dep.dtype)
# first array created will be for first (0th) triangle
k = 0
# loop through each j-line of T-points ...
# note range(m,n) = (m, ....n-1)
for j in range(1, ny-1):
# get triangles for all i-points on j-line
k = get_triangles_j(j, nx, k, t_land, ds_max, lambda_f, phi_f, dep, x, y, z, t)
# k is now total no of triangles; chop off unused parts of the arrays & copy ...
x, y, z, t = [a[:k,:].copy() for a in (x, y, z, t)]
return k, x, y, z, t
def wrap_lon(lon):
"""
Ensures longitude is between -180 & 180. Not really necessary.
"""
# Need [] to ensure lon is changed in-place instead of making new variable
lon[...] = (lon[...] + 180.) % 360. - 180.
class Topography(object):
def __init__(self, xs=None, xe=None, ys=None, ye=None,
domain_dir='.', bathymetry_file='bathy_meter.nc', coordinate_file='coordinates.nc',
bottom = 6000., cmap='gist_earth', map2d = None, globe = False, zs_rat = 0.1):
# xem1, yem1 = xe - 1, ye - 1
xem1, yem1 = xe, ye
t1 = time.time()
pathname = os.path.join(domain_dir,bathymetry_file)
with Dataset(pathname) as f:
# print(f.variables.keys())
dep = f.variables['Bathymetry'][ys:ye,xs:xe]
pathname = os.path.join(domain_dir,coordinate_file)
if not os.path.exists(pathname):
pathname = os.path.join(domain_dir,'mesh_hgr.nc')
with Dataset(pathname) as f:
# print(f.variables.keys())
lambda_f = f.variables['glamf'][...,ys:ye,xs:xe].squeeze()
phi_f = f.variables['gphif'][...,ys:ye,xs:xe].squeeze()
t1, t0 = time.time(), t1
print('%10.5f s taken to read in data\n' % (t1 - t0) )
if globe:
# Plug the South Pole if the bathymetry doesn't extend far enough
minlat = phi_f[:,0].min()
if minlat > -89.9 and minlat < -75.:
nj,ni = phi_f.shape
nextra = 10
dy_deg = (minlat + 90.)/nextra
lonfill = np.empty((nj+nextra,ni), dtype=lambda_f.dtype)
latfill = np.empty((nj+nextra,ni), dtype=phi_f.dtype)
depfill = np.empty((nj+nextra,ni), dtype=dep.dtype)
lonfill[nextra:,:] = lambda_f
latfill[nextra:,:] = phi_f
depfill[nextra:,:] = dep
lonfill[:nextra,:] = lambda_f[0,:]
# Add new dimension None to 1D y-array so it can be 'Broadcast' over longitude
latfill[:nextra,:] = np.arange(-90,minlat,dy_deg)[:,None]
depfill[:nextra,:] = 0.0
phi_f, lambda_f, dep = latfill, lonfill, depfill
del latfill, lonfill, depfill
# Ellipsoidal earth
self.rsphere_eq, self.rsphere_pol = 6378137.00, 6356752.3142
dist = self.rsphere_eq + self.rsphere_pol
self.proj = self.globe_proj
elif map2d is not None:
wrap_lon(lambda_f)
lambda_f, phi_f = map2d(lambda_f, phi_f)
# need to scale heights/depths for consistency with picture using horizontal axes i & j
dlam = lambda_f.max() - lambda_f.min()
dphi = phi_f.max() - phi_f.min()
dist = np.sqrt(dlam*dlam + dphi*dphi)
self.map2d = map2d
self.proj = self.map_proj
ny, nx = lambda_f.shape
ds_max = 20.*dist/max(ny,nx)
# ... and convert from depths--> heights
# ... and scale depth of saturated colorscale
zscale = zs_rat*dist/6000.
self.zscale = zscale
dep = -zscale*dep.astype(np.float64)
t1, t0 = time.time(), t1
print('%10.5f s taken to scale & convert data to float64\n' % (t1 - t0) )
# colors of flat steps are associated with their depth
# reset color for land points to positive value, so land in uppermost color class
# Since there are 256 color classes, only want land in uppermost 1/256 of color values.
# i.e. If dt = (t_land - t_min), need t_land such that t_land - dt/256 =0,
# so > all ocean colour values t<0
zs_min = -zscale*bottom
t_land = np.NaN
ntriangles, x, y, z, t = get_triangles(dep, lambda_f, phi_f, t_land, ds_max)
# ntriangles is now total no of triangles; flatten to 1d arrays of vertices of length 3*triangles
x, y, z, t = [a.ravel() for a in (x, y, z, t)]
# where triangles(k,1...3) contains indices relating to kth triangle
triangles = np.arange(3*ntriangles).reshape(ntriangles,3)
t1, t0 = time.time(), t1
print('%10.5f s taken to calculate vertices\n' % (t1 - t0) )
if globe:
z /= zscale
self.proj(x,y,z)
t1, t0 = time.time(), t1
print('%10.5f s taken to transform triangles onto sphere\n' % (t1 - t0) )
#clear some memory; mayavi consumes a lot;)
del dep
fig = mlab.figure(size=(400, 320), bgcolor=(0.16, 0.28, 0.46))
columns = mlab.triangular_mesh(x, y, z, triangles, colormap=cmap, scalars=t, vmin=zs_min)
# Set color for NAN's (e.g. land)
# columns representing RGBA (red, green, blue, alpha) coded with floats from 0. to 1.
# Black
NaN_color = (0.,0.,0.,1.0)
# dark red/brown
# NoN_color = (0.3,0.05,0.05,1.0)
columns.module_manager.scalar_lut_manager.lut.nan_color = NaN_color
t1, t0 = time.time(), t1
print('%10.5f s taken to setup topography plot\n' % (t1 - t0) )
# need handle to figure & mapping transformation
if platform.system() == "Linux":
# Linux systems return memory in Kbytes
print('peak memory usage is (MB):',
' self:',resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/1024,
' children:',resource.getrusage(resource.RUSAGE_CHILDREN).ru_maxrss/1024
,'\n')
else:
# Assumed MACOS type (Darwin) return in bytes
print('peak memory usage is (MB):',
' self:',resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/(1024*1024),
' children:',resource.getrusage(resource.RUSAGE_CHILDREN).ru_maxrss/(1024*1024)
,'\n')
# self.columns = columns
def map_proj(self, x, y, z):
x[...],y[...] = self.map2d(x, y)
z[...] *= self.zscale
def globe_proj(self, x, y, z):
r_eq, dr_pol_eq = self.rsphere_eq, self.rsphere_pol - self.rsphere_eq
rad = np.pi/180.
z[...] = z*self.zscale + r_eq
y *= rad
x *= rad
xt = z*np.cos(y)
z[...] = (z + dr_pol_eq)*np.sin(y)
y[...] = xt*np.sin(x)
x[...] = xt*np.cos(x)
if __name__ == '__main__':
parser = ArgumentParser(description='produce lego-block topography e.g. \n python ~/VC_code/NEMOcode/lego5.py -b 0 10000 600 10000 -d ../025')
parser.add_argument('-b',dest='bounds',help='ilo (f or u) ihi jlo (f or v) jhi', type=int,
nargs= '*',default=None)
parser.add_argument('--ilo',dest='ilo',help='ilo; overrrides value in bounds', type=int, default=None)
parser.add_argument('--jlo',dest='jlo',help='jlo of southern f (or v) -point bdry', type=int, default=None)
parser.add_argument('--ihi',dest='ihi',help='ihi; overrrides value in bounds', type=int, default=None)
parser.add_argument('--jhi',dest='jhi',help='jhi; overrrides value in bounds', type=int, default=None)
parser.add_argument('--bathy',dest='bathymetry_file',help='bathymetry file if not bathy_meter.nc',
default='bathy_meter.nc')
parser.add_argument('--coords',dest='coordinate_file',help='coordinate file if not coordinates.nc or mesh_hgr.nc',
default='coordinates.nc')
parser.add_argument('--domain','-d',dest='domain_dir',help='directory of bathymetry & coordinates',
default='./')
parser.add_argument('--bottom',dest='bottom',type=float,
help='(positive) depth where colorscale saturates to deepest value',
default=6000.)
parser.add_argument('--globe','-g', dest='globe',action='store_true',
help='do globe', default=False)
args = parser.parse_args()
if args.bounds is None:
xs, xe = None, None
ys, ye = None, None
else:
xs, xe = args.bounds[:2]
ys, ye = args.bounds[2:]
if args.ilo is not None:
xs = args.ilo
if args.jlo is not None:
ys = args.jlo
if args.ihi is not None:
xe = args.ihi
if args.jhi is not None:
ye = args.jhi
if args.globe:
map = None
else:
# Use a basemap projection; see http://matplotlib.org/basemap/users/mapsetup.html
# Lambert conformal
# m = Basemap(llcrnrlon=-95.,llcrnrlat=1.,urcrnrlon=80.,urcrnrlat=80.,\
# rsphere=(6378137.00,6356752.3142),\
# resolution='l',area_thresh=1000.,projection='lcc',\
# lat_1=50.,lon_0=-35.)
# Orthographic (still won't work)
# map = Basemap(projection='ortho',lat_0=50.,lon_0=-35.)
# Mollweide
# map = Basemap(projection='moll',lon_0=0,resolution='c')
# N Polar stereographic
map = Basemap(projection='npstere',boundinglat=10,lon_0=270,resolution='l')
topo = Topography(xs=xs, xe=xe, ys=ys, ye=ye,
domain_dir=args.domain_dir, bathymetry_file=args.bathymetry_file,
coordinate_file= args.coordinate_file,
bottom = args.bottom, map2d = map, globe = args.globe)
mlab.show()
| NEMO-NOC/NEMOsphere | lego5.py | Python | gpl-2.0 | 14,256 | 0.012556 |
# stdlib
import re
import traceback
from contextlib import closing, contextmanager
from collections import defaultdict
# 3p
import pymysql
try:
import psutil
PSUTIL_AVAILABLE = True
except ImportError:
PSUTIL_AVAILABLE = False
# project
from config import _is_affirmative
from checks import AgentCheck
GAUGE = "gauge"
RATE = "rate"
COUNT = "count"
MONOTONIC = "monotonic_count"
# Vars found in "SHOW STATUS;"
STATUS_VARS = {
# Command Metrics
'Slow_queries': ('mysql.performance.slow_queries', RATE),
'Questions': ('mysql.performance.questions', RATE),
'Queries': ('mysql.performance.queries', RATE),
'Com_select': ('mysql.performance.com_select', RATE),
'Com_insert': ('mysql.performance.com_insert', RATE),
'Com_update': ('mysql.performance.com_update', RATE),
'Com_delete': ('mysql.performance.com_delete', RATE),
'Com_replace': ('mysql.performance.com_replace', RATE),
'Com_load': ('mysql.performance.com_load', RATE),
'Com_insert_select': ('mysql.performance.com_insert_select', RATE),
'Com_update_multi': ('mysql.performance.com_update_multi', RATE),
'Com_delete_multi': ('mysql.performance.com_delete_multi', RATE),
'Com_replace_select': ('mysql.performance.com_replace_select', RATE),
# Connection Metrics
'Connections': ('mysql.net.connections', RATE),
'Max_used_connections': ('mysql.net.max_connections', GAUGE),
'Aborted_clients': ('mysql.net.aborted_clients', RATE),
'Aborted_connects': ('mysql.net.aborted_connects', RATE),
# Table Cache Metrics
'Open_files': ('mysql.performance.open_files', GAUGE),
'Open_tables': ('mysql.performance.open_tables', GAUGE),
# Network Metrics
'Bytes_sent': ('mysql.performance.bytes_sent', RATE),
'Bytes_received': ('mysql.performance.bytes_received', RATE),
# Query Cache Metrics
'Qcache_hits': ('mysql.performance.qcache_hits', RATE),
'Qcache_inserts': ('mysql.performance.qcache_inserts', RATE),
'Qcache_lowmem_prunes': ('mysql.performance.qcache_lowmem_prunes', RATE),
# Table Lock Metrics
'Table_locks_waited': ('mysql.performance.table_locks_waited', GAUGE),
'Table_locks_waited_rate': ('mysql.performance.table_locks_waited.rate', RATE),
# Temporary Table Metrics
'Created_tmp_tables': ('mysql.performance.created_tmp_tables', RATE),
'Created_tmp_disk_tables': ('mysql.performance.created_tmp_disk_tables', RATE),
'Created_tmp_files': ('mysql.performance.created_tmp_files', RATE),
# Thread Metrics
'Threads_connected': ('mysql.performance.threads_connected', GAUGE),
'Threads_running': ('mysql.performance.threads_running', GAUGE),
# MyISAM Metrics
'Key_buffer_bytes_unflushed': ('mysql.myisam.key_buffer_bytes_unflushed', GAUGE),
'Key_buffer_bytes_used': ('mysql.myisam.key_buffer_bytes_used', GAUGE),
'Key_read_requests': ('mysql.myisam.key_read_requests', RATE),
'Key_reads': ('mysql.myisam.key_reads', RATE),
'Key_write_requests': ('mysql.myisam.key_write_requests', RATE),
'Key_writes': ('mysql.myisam.key_writes', RATE),
}
# Possibly from SHOW GLOBAL VARIABLES
VARIABLES_VARS = {
'Key_buffer_size': ('mysql.myisam.key_buffer_size', GAUGE),
'Key_cache_utilization': ('mysql.performance.key_cache_utilization', GAUGE),
'max_connections': ('mysql.net.max_connections_available', GAUGE),
'query_cache_size': ('mysql.performance.qcache_size', GAUGE),
'table_open_cache': ('mysql.performance.table_open_cache', GAUGE),
'thread_cache_size': ('mysql.performance.thread_cache_size', GAUGE)
}
INNODB_VARS = {
# InnoDB metrics
'Innodb_data_reads': ('mysql.innodb.data_reads', RATE),
'Innodb_data_writes': ('mysql.innodb.data_writes', RATE),
'Innodb_os_log_fsyncs': ('mysql.innodb.os_log_fsyncs', RATE),
'Innodb_mutex_spin_waits': ('mysql.innodb.mutex_spin_waits', RATE),
'Innodb_mutex_spin_rounds': ('mysql.innodb.mutex_spin_rounds', RATE),
'Innodb_mutex_os_waits': ('mysql.innodb.mutex_os_waits', RATE),
'Innodb_row_lock_waits': ('mysql.innodb.row_lock_waits', RATE),
'Innodb_row_lock_time': ('mysql.innodb.row_lock_time', RATE),
'Innodb_row_lock_current_waits': ('mysql.innodb.row_lock_current_waits', GAUGE),
'Innodb_current_row_locks': ('mysql.innodb.current_row_locks', GAUGE),
'Innodb_buffer_pool_bytes_dirty': ('mysql.innodb.buffer_pool_dirty', GAUGE),
'Innodb_buffer_pool_bytes_free': ('mysql.innodb.buffer_pool_free', GAUGE),
'Innodb_buffer_pool_bytes_used': ('mysql.innodb.buffer_pool_used', GAUGE),
'Innodb_buffer_pool_bytes_total': ('mysql.innodb.buffer_pool_total', GAUGE),
'Innodb_buffer_pool_read_requests': ('mysql.innodb.buffer_pool_read_requests', RATE),
'Innodb_buffer_pool_reads': ('mysql.innodb.buffer_pool_reads', RATE),
'Innodb_buffer_pool_pages_utilization': ('mysql.innodb.buffer_pool_utilization', GAUGE),
}
# Calculated from "SHOW MASTER LOGS;"
BINLOG_VARS = {
'Binlog_space_usage_bytes': ('mysql.binlog.disk_use', GAUGE),
}
# Additional Vars found in "SHOW STATUS;"
# Will collect if [FLAG NAME] is True
OPTIONAL_STATUS_VARS = {
'Binlog_cache_disk_use': ('mysql.binlog.cache_disk_use', GAUGE),
'Binlog_cache_use': ('mysql.binlog.cache_use', GAUGE),
'Handler_commit': ('mysql.performance.handler_commit', RATE),
'Handler_delete': ('mysql.performance.handler_delete', RATE),
'Handler_prepare': ('mysql.performance.handler_prepare', RATE),
'Handler_read_first': ('mysql.performance.handler_read_first', RATE),
'Handler_read_key': ('mysql.performance.handler_read_key', RATE),
'Handler_read_next': ('mysql.performance.handler_read_next', RATE),
'Handler_read_prev': ('mysql.performance.handler_read_prev', RATE),
'Handler_read_rnd': ('mysql.performance.handler_read_rnd', RATE),
'Handler_read_rnd_next': ('mysql.performance.handler_read_rnd_next', RATE),
'Handler_rollback': ('mysql.performance.handler_rollback', RATE),
'Handler_update': ('mysql.performance.handler_update', RATE),
'Handler_write': ('mysql.performance.handler_write', RATE),
'Opened_tables': ('mysql.performance.opened_tables', RATE),
'Qcache_total_blocks': ('mysql.performance.qcache_total_blocks', GAUGE),
'Qcache_free_blocks': ('mysql.performance.qcache_free_blocks', GAUGE),
'Qcache_free_memory': ('mysql.performance.qcache_free_memory', GAUGE),
'Qcache_not_cached': ('mysql.performance.qcache_not_cached', RATE),
'Qcache_queries_in_cache': ('mysql.performance.qcache_queries_in_cache', GAUGE),
'Select_full_join': ('mysql.performance.select_full_join', RATE),
'Select_full_range_join': ('mysql.performance.select_full_range_join', RATE),
'Select_range': ('mysql.performance.select_range', RATE),
'Select_range_check': ('mysql.performance.select_range_check', RATE),
'Select_scan': ('mysql.performance.select_scan', RATE),
'Sort_merge_passes': ('mysql.performance.sort_merge_passes', RATE),
'Sort_range': ('mysql.performance.sort_range', RATE),
'Sort_rows': ('mysql.performance.sort_rows', RATE),
'Sort_scan': ('mysql.performance.sort_scan', RATE),
'Table_locks_immediate': ('mysql.performance.table_locks_immediate', GAUGE),
'Table_locks_immediate_rate': ('mysql.performance.table_locks_immediate.rate', RATE),
'Threads_cached': ('mysql.performance.threads_cached', GAUGE),
'Threads_created': ('mysql.performance.threads_created', MONOTONIC)
}
# Status Vars added in Mysql 5.6.6
OPTIONAL_STATUS_VARS_5_6_6 = {
'Table_open_cache_hits': ('mysql.performance.table_cache_hits', RATE),
'Table_open_cache_misses': ('mysql.performance.table_cache_misses', RATE),
}
# Will collect if [extra_innodb_metrics] is True
OPTIONAL_INNODB_VARS = {
'Innodb_active_transactions': ('mysql.innodb.active_transactions', GAUGE),
'Innodb_buffer_pool_bytes_data': ('mysql.innodb.buffer_pool_data', GAUGE),
'Innodb_buffer_pool_pages_data': ('mysql.innodb.buffer_pool_pages_data', GAUGE),
'Innodb_buffer_pool_pages_dirty': ('mysql.innodb.buffer_pool_pages_dirty', GAUGE),
'Innodb_buffer_pool_pages_flushed': ('mysql.innodb.buffer_pool_pages_flushed', RATE),
'Innodb_buffer_pool_pages_free': ('mysql.innodb.buffer_pool_pages_free', GAUGE),
'Innodb_buffer_pool_pages_total': ('mysql.innodb.buffer_pool_pages_total', GAUGE),
'Innodb_buffer_pool_read_ahead': ('mysql.innodb.buffer_pool_read_ahead', RATE),
'Innodb_buffer_pool_read_ahead_evicted': ('mysql.innodb.buffer_pool_read_ahead_evicted', RATE),
'Innodb_buffer_pool_read_ahead_rnd': ('mysql.innodb.buffer_pool_read_ahead_rnd', GAUGE),
'Innodb_buffer_pool_wait_free': ('mysql.innodb.buffer_pool_wait_free', MONOTONIC),
'Innodb_buffer_pool_write_requests': ('mysql.innodb.buffer_pool_write_requests', RATE),
'Innodb_checkpoint_age': ('mysql.innodb.checkpoint_age', GAUGE),
'Innodb_current_transactions': ('mysql.innodb.current_transactions', GAUGE),
'Innodb_data_fsyncs': ('mysql.innodb.data_fsyncs', RATE),
'Innodb_data_pending_fsyncs': ('mysql.innodb.data_pending_fsyncs', GAUGE),
'Innodb_data_pending_reads': ('mysql.innodb.data_pending_reads', GAUGE),
'Innodb_data_pending_writes': ('mysql.innodb.data_pending_writes', GAUGE),
'Innodb_data_read': ('mysql.innodb.data_read', RATE),
'Innodb_data_written': ('mysql.innodb.data_written', RATE),
'Innodb_dblwr_pages_written': ('mysql.innodb.dblwr_pages_written', RATE),
'Innodb_dblwr_writes': ('mysql.innodb.dblwr_writes', RATE),
'Innodb_hash_index_cells_total': ('mysql.innodb.hash_index_cells_total', GAUGE),
'Innodb_hash_index_cells_used': ('mysql.innodb.hash_index_cells_used', GAUGE),
'Innodb_history_list_length': ('mysql.innodb.history_list_length', GAUGE),
'Innodb_ibuf_free_list': ('mysql.innodb.ibuf_free_list', GAUGE),
'Innodb_ibuf_merged': ('mysql.innodb.ibuf_merged', RATE),
'Innodb_ibuf_merged_delete_marks': ('mysql.innodb.ibuf_merged_delete_marks', RATE),
'Innodb_ibuf_merged_deletes': ('mysql.innodb.ibuf_merged_deletes', RATE),
'Innodb_ibuf_merged_inserts': ('mysql.innodb.ibuf_merged_inserts', RATE),
'Innodb_ibuf_merges': ('mysql.innodb.ibuf_merges', RATE),
'Innodb_ibuf_segment_size': ('mysql.innodb.ibuf_segment_size', GAUGE),
'Innodb_ibuf_size': ('mysql.innodb.ibuf_size', GAUGE),
'Innodb_lock_structs': ('mysql.innodb.lock_structs', RATE),
'Innodb_locked_tables': ('mysql.innodb.locked_tables', GAUGE),
'Innodb_locked_transactions': ('mysql.innodb.locked_transactions', GAUGE),
'Innodb_log_waits': ('mysql.innodb.log_waits', RATE),
'Innodb_log_write_requests': ('mysql.innodb.log_write_requests', RATE),
'Innodb_log_writes': ('mysql.innodb.log_writes', RATE),
'Innodb_lsn_current': ('mysql.innodb.lsn_current', RATE),
'Innodb_lsn_flushed': ('mysql.innodb.lsn_flushed', RATE),
'Innodb_lsn_last_checkpoint': ('mysql.innodb.lsn_last_checkpoint', RATE),
'Innodb_mem_adaptive_hash': ('mysql.innodb.mem_adaptive_hash', GAUGE),
'Innodb_mem_additional_pool': ('mysql.innodb.mem_additional_pool', GAUGE),
'Innodb_mem_dictionary': ('mysql.innodb.mem_dictionary', GAUGE),
'Innodb_mem_file_system': ('mysql.innodb.mem_file_system', GAUGE),
'Innodb_mem_lock_system': ('mysql.innodb.mem_lock_system', GAUGE),
'Innodb_mem_page_hash': ('mysql.innodb.mem_page_hash', GAUGE),
'Innodb_mem_recovery_system': ('mysql.innodb.mem_recovery_system', GAUGE),
'Innodb_mem_thread_hash': ('mysql.innodb.mem_thread_hash', GAUGE),
'Innodb_mem_total': ('mysql.innodb.mem_total', GAUGE),
'Innodb_os_file_fsyncs': ('mysql.innodb.os_file_fsyncs', RATE),
'Innodb_os_file_reads': ('mysql.innodb.os_file_reads', RATE),
'Innodb_os_file_writes': ('mysql.innodb.os_file_writes', RATE),
'Innodb_os_log_pending_fsyncs': ('mysql.innodb.os_log_pending_fsyncs', GAUGE),
'Innodb_os_log_pending_writes': ('mysql.innodb.os_log_pending_writes', GAUGE),
'Innodb_os_log_written': ('mysql.innodb.os_log_written', RATE),
'Innodb_pages_created': ('mysql.innodb.pages_created', RATE),
'Innodb_pages_read': ('mysql.innodb.pages_read', RATE),
'Innodb_pages_written': ('mysql.innodb.pages_written', RATE),
'Innodb_pending_aio_log_ios': ('mysql.innodb.pending_aio_log_ios', GAUGE),
'Innodb_pending_aio_sync_ios': ('mysql.innodb.pending_aio_sync_ios', GAUGE),
'Innodb_pending_buffer_pool_flushes': ('mysql.innodb.pending_buffer_pool_flushes', GAUGE),
'Innodb_pending_checkpoint_writes': ('mysql.innodb.pending_checkpoint_writes', GAUGE),
'Innodb_pending_ibuf_aio_reads': ('mysql.innodb.pending_ibuf_aio_reads', GAUGE),
'Innodb_pending_log_flushes': ('mysql.innodb.pending_log_flushes', GAUGE),
'Innodb_pending_log_writes': ('mysql.innodb.pending_log_writes', GAUGE),
'Innodb_pending_normal_aio_reads': ('mysql.innodb.pending_normal_aio_reads', GAUGE),
'Innodb_pending_normal_aio_writes': ('mysql.innodb.pending_normal_aio_writes', GAUGE),
'Innodb_queries_inside': ('mysql.innodb.queries_inside', GAUGE),
'Innodb_queries_queued': ('mysql.innodb.queries_queued', GAUGE),
'Innodb_read_views': ('mysql.innodb.read_views', GAUGE),
'Innodb_rows_deleted': ('mysql.innodb.rows_deleted', RATE),
'Innodb_rows_inserted': ('mysql.innodb.rows_inserted', RATE),
'Innodb_rows_read': ('mysql.innodb.rows_read', RATE),
'Innodb_rows_updated': ('mysql.innodb.rows_updated', RATE),
'Innodb_s_lock_os_waits': ('mysql.innodb.s_lock_os_waits', RATE),
'Innodb_s_lock_spin_rounds': ('mysql.innodb.s_lock_spin_rounds', RATE),
'Innodb_s_lock_spin_waits': ('mysql.innodb.s_lock_spin_waits', RATE),
'Innodb_semaphore_wait_time': ('mysql.innodb.semaphore_wait_time', GAUGE),
'Innodb_semaphore_waits': ('mysql.innodb.semaphore_waits', GAUGE),
'Innodb_tables_in_use': ('mysql.innodb.tables_in_use', GAUGE),
'Innodb_x_lock_os_waits': ('mysql.innodb.x_lock_os_waits', RATE),
'Innodb_x_lock_spin_rounds': ('mysql.innodb.x_lock_spin_rounds', RATE),
'Innodb_x_lock_spin_waits': ('mysql.innodb.x_lock_spin_waits', RATE),
}
GALERA_VARS = {
'wsrep_cluster_size': ('mysql.galera.wsrep_cluster_size', GAUGE),
'wsrep_local_recv_queue_avg': ('mysql.galera.wsrep_local_recv_queue_avg', GAUGE),
'wsrep_flow_control_paused': ('mysql.galera.wsrep_flow_control_paused', GAUGE),
'wsrep_cert_deps_distance': ('mysql.galera.wsrep_cert_deps_distance', GAUGE),
'wsrep_local_send_queue_avg': ('mysql.galera.wsrep_local_send_queue_avg', GAUGE),
}
PERFORMANCE_VARS = {
'query_run_time_avg': ('mysql.performance.query_run_time.avg', GAUGE),
'perf_digest_95th_percentile_avg_us': ('mysql.performance.digest_95th_percentile.avg_us', GAUGE),
}
SCHEMA_VARS = {
'information_schema_size': ('mysql.info.schema.size', GAUGE),
}
REPLICA_VARS = {
'Seconds_Behind_Master': ('mysql.replication.seconds_behind_master', GAUGE),
'Slaves_connected': ('mysql.replication.slaves_connected', COUNT),
}
SYNTHETIC_VARS = {
'Qcache_utilization': ('mysql.performance.qcache.utilization', GAUGE),
'Qcache_instant_utilization': ('mysql.performance.qcache.utilization.instant', GAUGE),
}
class MySql(AgentCheck):
SERVICE_CHECK_NAME = 'mysql.can_connect'
SLAVE_SERVICE_CHECK_NAME = 'mysql.replication.slave_running'
MAX_CUSTOM_QUERIES = 20
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self.mysql_version = {}
self.qcache_stats = {}
def get_library_versions(self):
return {"pymysql": pymysql.__version__}
def check(self, instance):
host, port, user, password, mysql_sock, defaults_file, tags, options, queries, ssl = \
self._get_config(instance)
self._set_qcache_stats()
if (not host or not user) and not defaults_file:
raise Exception("Mysql host and user are needed.")
with self._connect(host, port, mysql_sock, user,
password, defaults_file, ssl) as db:
try:
# Metadata collection
self._collect_metadata(db, host)
# Metric collection
self._collect_metrics(host, db, tags, options, queries)
self._collect_system_metrics(host, db, tags)
# keeping track of these:
self._put_qcache_stats()
except Exception as e:
self.log.exception("error!")
raise e
def _get_config(self, instance):
self.host = instance.get('server', '')
self.port = int(instance.get('port', 0))
self.mysql_sock = instance.get('sock', '')
self.defaults_file = instance.get('defaults_file', '')
user = instance.get('user', '')
password = instance.get('pass', '')
tags = instance.get('tags', [])
options = instance.get('options', {})
queries = instance.get('queries', [])
ssl = instance.get('ssl', {})
return (self.host, self.port, user, password, self.mysql_sock,
self.defaults_file, tags, options, queries, ssl)
def _set_qcache_stats(self):
host_key = self._get_host_key()
qcache_st = self.qcache_stats.get(host_key, (None, None, None))
self._qcache_hits = qcache_st[0]
self._qcache_inserts = qcache_st[1]
self._qcache_not_cached = qcache_st[2]
def _put_qcache_stats(self):
host_key = self._get_host_key()
self.qcache_stats[host_key] = (
self._qcache_hits,
self._qcache_inserts,
self._qcache_not_cached
)
def _get_host_key(self):
if self.defaults_file:
return self.defaults_file
hostkey = self.host
if self.mysql_sock:
hostkey = "{0}:{1}".format(hostkey, self.mysql_sock)
elif self.port:
hostkey = "{0}:{1}".format(hostkey, self.port)
return hostkey
@contextmanager
def _connect(self, host, port, mysql_sock, user, password, defaults_file, ssl):
self.service_check_tags = [
'server:%s' % (mysql_sock if mysql_sock != '' else host),
'port:%s' % ('unix_socket' if port == 0 else port)
]
db = None
try:
ssl = dict(ssl) if ssl else None
if defaults_file != '':
db = pymysql.connect(read_default_file=defaults_file, ssl=ssl)
elif mysql_sock != '':
self.service_check_tags = [
'server:{0}'.format(mysql_sock),
'port:unix_socket'
]
db = pymysql.connect(
unix_socket=mysql_sock,
user=user,
passwd=password
)
elif port:
db = pymysql.connect(
host=host,
port=port,
user=user,
passwd=password,
ssl=ssl
)
else:
db = pymysql.connect(
host=host,
user=user,
passwd=password,
ssl=ssl
)
self.log.debug("Connected to MySQL")
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,
tags=self.service_check_tags)
yield db
except Exception:
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
tags=self.service_check_tags)
raise
finally:
if db:
db.close()
def _collect_metrics(self, host, db, tags, options, queries):
# Get aggregate of all VARS we want to collect
metrics = STATUS_VARS
# collect results from db
results = self._get_stats_from_status(db)
results.update(self._get_stats_from_variables(db))
if self._is_innodb_engine_enabled(db):
results.update(self._get_stats_from_innodb_status(db))
innodb_keys = [
'Innodb_page_size',
'Innodb_buffer_pool_pages_data',
'Innodb_buffer_pool_pages_dirty',
'Innodb_buffer_pool_pages_total',
'Innodb_buffer_pool_pages_free',
]
for inno_k in innodb_keys:
results[inno_k] = self._collect_scalar(inno_k, results)
try:
innodb_page_size = results['Innodb_page_size']
innodb_buffer_pool_pages_used = results['Innodb_buffer_pool_pages_total'] - \
results['Innodb_buffer_pool_pages_free']
if 'Innodb_buffer_pool_bytes_data' not in results:
results[
'Innodb_buffer_pool_bytes_data'] = results['Innodb_buffer_pool_pages_data'] * innodb_page_size
if 'Innodb_buffer_pool_bytes_dirty' not in results:
results[
'Innodb_buffer_pool_bytes_dirty'] = results['Innodb_buffer_pool_pages_dirty'] * innodb_page_size
if 'Innodb_buffer_pool_bytes_free' not in results:
results[
'Innodb_buffer_pool_bytes_free'] = results['Innodb_buffer_pool_pages_free'] * innodb_page_size
if 'Innodb_buffer_pool_bytes_total' not in results:
results[
'Innodb_buffer_pool_bytes_total'] = results['Innodb_buffer_pool_pages_total'] * innodb_page_size
if 'Innodb_buffer_pool_pages_utilization' not in results:
results['Innodb_buffer_pool_pages_utilization'] = innodb_buffer_pool_pages_used / \
results['Innodb_buffer_pool_pages_total']
if 'Innodb_buffer_pool_bytes_used' not in results:
results[
'Innodb_buffer_pool_bytes_used'] = innodb_buffer_pool_pages_used * innodb_page_size
except (KeyError, TypeError) as e:
self.log.error("Not all InnoDB buffer pool metrics are available, unable to compute: {0}".format(e))
if _is_affirmative(options.get('extra_innodb_metrics', False)):
self.log.debug("Collecting Extra Innodb Metrics")
metrics.update(OPTIONAL_INNODB_VARS)
# Binary log statistics
if self._get_variable_enabled(results, 'log_bin'):
results[
'Binlog_space_usage_bytes'] = self._get_binary_log_stats(db)
# Compute key cache utilization metric
key_blocks_unused = self._collect_scalar('Key_blocks_unused', results)
key_cache_block_size = self._collect_scalar('key_cache_block_size', results)
key_buffer_size = self._collect_scalar('key_buffer_size', results)
results['Key_buffer_size'] = key_buffer_size
try:
key_cache_utilization = 1 - ((key_blocks_unused * key_cache_block_size) / key_buffer_size)
results['Key_buffer_bytes_used'] = self._collect_scalar(
'Key_blocks_used', results) * key_cache_block_size
results['Key_buffer_bytes_unflushed'] = self._collect_scalar(
'Key_blocks_not_flushed', results) * key_cache_block_size
results['Key_cache_utilization'] = key_cache_utilization
except TypeError as e:
self.log.error("Not all Key metrics are available, unable to compute: {0}".format(e))
metrics.update(VARIABLES_VARS)
metrics.update(INNODB_VARS)
metrics.update(BINLOG_VARS)
if _is_affirmative(options.get('extra_status_metrics', False)):
self.log.debug("Collecting Extra Status Metrics")
metrics.update(OPTIONAL_STATUS_VARS)
if self._version_compatible(db, host, "5.6.6"):
metrics.update(OPTIONAL_STATUS_VARS_5_6_6)
if _is_affirmative(options.get('galera_cluster', False)):
# already in result-set after 'SHOW STATUS' just add vars to collect
self.log.debug("Collecting Galera Metrics.")
metrics.update(GALERA_VARS)
performance_schema_enabled = self._get_variable_enabled(results, 'performance_schema')
if _is_affirmative(options.get('extra_performance_metrics', False)) and \
self._version_compatible(db, host, "5.6.0") and \
performance_schema_enabled:
# report avg query response time per schema to Datadog
results['perf_digest_95th_percentile_avg_us'] = self._get_query_exec_time_95th_us(db)
results['query_run_time_avg'] = self._query_exec_time_per_schema(db)
metrics.update(PERFORMANCE_VARS)
if _is_affirmative(options.get('schema_size_metrics', False)):
# report avg query response time per schema to Datadog
results['information_schema_size'] = self._query_size_per_schema(db)
metrics.update(SCHEMA_VARS)
if _is_affirmative(options.get('replication', False)):
# Get replica stats
results.update(self._get_replica_stats(db))
results.update(self._get_slave_status(db, performance_schema_enabled))
metrics.update(REPLICA_VARS)
# get slave running form global status page
slave_running_status = AgentCheck.UNKNOWN
slave_running = self._collect_string('Slave_running', results)
binlog_running = results.get('Binlog_enabled', False)
# slaves will only be collected iff user has PROCESS privileges.
slaves = self._collect_scalar('Slaves_connected', results)
if slave_running is not None:
if slave_running.lower().strip() == 'on':
slave_running_status = AgentCheck.OK
else:
slave_running_status = AgentCheck.CRITICAL
elif slaves or binlog_running:
if slaves and slaves > 0 and binlog_running:
slave_running_status = AgentCheck.OK
else:
slave_running_status = AgentCheck.WARNING
else:
# MySQL 5.7.x might not have 'Slave_running'. See: https://bugs.mysql.com/bug.php?id=78544
# look at replica vars collected at the top of if-block
if self._version_compatible(db, host, "5.7.0"):
slave_io_running = self._collect_string('Slave_IO_Running', results)
slave_sql_running = self._collect_string('Slave_SQL_Running', results)
if slave_io_running:
slave_io_running = (slave_io_running.lower().strip() == "yes")
if slave_sql_running:
slave_sql_running = (slave_sql_running.lower().strip() == "yes")
if not (slave_io_running is None and slave_sql_running is None):
if slave_io_running and slave_sql_running:
slave_running_status = AgentCheck.OK
elif not slave_io_running and not slave_sql_running:
slave_running_status = AgentCheck.CRITICAL
else:
# not everything is running smoothly
slave_running_status = AgentCheck.WARNING
# deprecated in favor of service_check("mysql.replication.slave_running")
self.gauge(self.SLAVE_SERVICE_CHECK_NAME, (1 if slave_running_status == AgentCheck.OK else 0), tags=tags)
self.service_check(self.SLAVE_SERVICE_CHECK_NAME, slave_running_status, tags=self.service_check_tags)
# "synthetic" metrics
metrics.update(SYNTHETIC_VARS)
self._compute_synthetic_results(results)
# remove uncomputed metrics
for k in SYNTHETIC_VARS:
if k not in results:
metrics.pop(k, None)
# add duped metrics - reporting some as both rate and gauge
dupes = [('Table_locks_waited', 'Table_locks_waited_rate'),
('Table_locks_immediate', 'Table_locks_immediate_rate')]
for src, dst in dupes:
if src in results:
results[dst] = results[src]
self._submit_metrics(metrics, results, tags)
# Collect custom query metrics
# Max of 20 queries allowed
if isinstance(queries, list):
for index, check in enumerate(queries[:self.MAX_CUSTOM_QUERIES]):
total_tags = tags + check.get('tags', [])
self._collect_dict(check['type'], {check['field']: check['metric']}, check['query'], db, tags=total_tags)
if len(queries) > self.MAX_CUSTOM_QUERIES:
self.warning("Maximum number (%s) of custom queries reached. Skipping the rest."
% self.MAX_CUSTOM_QUERIES)
def _collect_metadata(self, db, host):
version = self._get_version(db, host)
self.service_metadata('version', ".".join(version))
def _submit_metrics(self, variables, dbResults, tags):
for variable, metric in variables.iteritems():
metric_name, metric_type = metric
for tag, value in self._collect_all_scalars(variable, dbResults):
metric_tags = list(tags)
if tag:
metric_tags.append(tag)
if value is not None:
if metric_type == RATE:
self.rate(metric_name, value, tags=metric_tags)
elif metric_type == GAUGE:
self.gauge(metric_name, value, tags=metric_tags)
elif metric_type == COUNT:
self.count(metric_name, value, tags=metric_tags)
elif metric_type == MONOTONIC:
self.monotonic_count(metric_name, value, tags=metric_tags)
def _version_compatible(self, db, host, compat_version):
# some patch version numbers contain letters (e.g. 5.0.51a)
# so let's be careful when we compute the version number
try:
mysql_version = self._get_version(db, host)
except Exception, e:
self.warning("Cannot compute mysql version, assuming it's older.: %s"
% str(e))
return False
self.log.debug("MySQL version %s" % mysql_version)
patchlevel = int(re.match(r"([0-9]+)", mysql_version[2]).group(1))
version = (int(mysql_version[0]), int(mysql_version[1]), patchlevel)
return version > compat_version
def _get_version(self, db, host):
hostkey = self._get_host_key()
if hostkey in self.mysql_version:
version = self.mysql_version[hostkey]
return version
# Get MySQL version
with closing(db.cursor()) as cursor:
cursor.execute('SELECT VERSION()')
result = cursor.fetchone()
# Version might include a description e.g. 4.1.26-log.
# See
# http://dev.mysql.com/doc/refman/4.1/en/information-functions.html#function_version
version = result[0].split('-')
version = version[0].split('.')
self.mysql_version[hostkey] = version
return version
def _collect_all_scalars(self, key, dictionary):
if key not in dictionary or dictionary[key] is None:
yield None, None
elif isinstance(dictionary[key], dict):
for tag, _ in dictionary[key].iteritems():
yield tag, self._collect_type(tag, dictionary[key], float)
else:
yield None, self._collect_type(key, dictionary, float)
def _collect_scalar(self, key, dict):
return self._collect_type(key, dict, float)
def _collect_string(self, key, dict):
return self._collect_type(key, dict, unicode)
def _collect_type(self, key, dict, the_type):
self.log.debug("Collecting data with %s" % key)
if key not in dict:
self.log.debug("%s returned None" % key)
return None
self.log.debug("Collecting done, value %s" % dict[key])
return the_type(dict[key])
def _collect_dict(self, metric_type, field_metric_map, query, db, tags):
"""
Query status and get a dictionary back.
Extract each field out of the dictionary
and stuff it in the corresponding metric.
query: show status...
field_metric_map: {"Seconds_behind_master": "mysqlSecondsBehindMaster"}
"""
try:
with closing(db.cursor()) as cursor:
cursor.execute(query)
result = cursor.fetchone()
if result is not None:
for field in field_metric_map.keys():
# Get the agent metric name from the column name
metric = field_metric_map[field]
# Find the column name in the cursor description to identify the column index
# http://www.python.org/dev/peps/pep-0249/
# cursor.description is a tuple of (column_name, ..., ...)
try:
col_idx = [d[0].lower() for d in cursor.description].index(field.lower())
self.log.debug("Collecting metric: %s" % metric)
if result[col_idx] is not None:
self.log.debug(
"Collecting done, value %s" % result[col_idx])
if metric_type == GAUGE:
self.gauge(metric, float(
result[col_idx]), tags=tags)
elif metric_type == RATE:
self.rate(metric, float(
result[col_idx]), tags=tags)
else:
self.gauge(metric, float(
result[col_idx]), tags=tags)
else:
self.log.debug(
"Received value is None for index %d" % col_idx)
except ValueError:
self.log.exception("Cannot find %s in the columns %s"
% (field, cursor.description))
except Exception:
self.warning("Error while running %s\n%s" %
(query, traceback.format_exc()))
self.log.exception("Error while running %s" % query)
def _collect_system_metrics(self, host, db, tags):
pid = None
# The server needs to run locally, accessed by TCP or socket
if host in ["localhost", "127.0.0.1"] or db.port == long(0):
pid = self._get_server_pid(db)
if pid:
self.log.debug("System metrics for mysql w\ pid: %s" % pid)
# At last, get mysql cpu data out of psutil or procfs
try:
ucpu, scpu = None, None
if PSUTIL_AVAILABLE:
proc = psutil.Process(pid)
ucpu = proc.cpu_times()[0]
scpu = proc.cpu_times()[1]
if ucpu and scpu:
self.rate("mysql.performance.user_time", ucpu, tags=tags)
# should really be system_time
self.rate("mysql.performance.kernel_time", scpu, tags=tags)
self.rate("mysql.performance.cpu_time", ucpu+scpu, tags=tags)
except Exception:
self.warning("Error while reading mysql (pid: %s) procfs data\n%s"
% (pid, traceback.format_exc()))
def _get_server_pid(self, db):
pid = None
# Try to get pid from pid file, it can fail for permission reason
pid_file = None
try:
with closing(db.cursor()) as cursor:
cursor.execute("SHOW VARIABLES LIKE 'pid_file'")
pid_file = cursor.fetchone()[1]
except Exception:
self.warning("Error while fetching pid_file variable of MySQL.")
if pid_file is not None:
self.log.debug("pid file: %s" % str(pid_file))
try:
f = open(pid_file)
pid = int(f.readline())
f.close()
except IOError:
self.log.debug("Cannot read mysql pid file %s" % pid_file)
# If pid has not been found, read it from ps
if pid is None and PSUTIL_AVAILABLE:
try:
for proc in psutil.process_iter():
if proc.name() == "mysqld":
pid = proc.pid
except Exception:
self.log.exception("Error while fetching mysql pid from psutil")
return pid
def _get_stats_from_status(self, db):
with closing(db.cursor()) as cursor:
cursor.execute("SHOW /*!50002 GLOBAL */ STATUS;")
results = dict(cursor.fetchall())
return results
def _get_stats_from_variables(self, db):
with closing(db.cursor()) as cursor:
cursor.execute("SHOW GLOBAL VARIABLES;")
results = dict(cursor.fetchall())
return results
def _get_binary_log_stats(self, db):
try:
with closing(db.cursor()) as cursor:
cursor.execute("SHOW BINARY LOGS;")
master_logs = dict(cursor.fetchall())
binary_log_space = 0
for key, value in master_logs.iteritems():
binary_log_space += value
return binary_log_space
except (pymysql.err.InternalError, pymysql.err.OperationalError) as e:
self.warning("Privileges error accessing the BINARY LOGS (must grant REPLICATION CLIENT): %s" % str(e))
return None
def _is_innodb_engine_enabled(self, db):
# Whether InnoDB engine is available or not can be found out either
# from the output of SHOW ENGINES or from information_schema.ENGINES
# table. Later is choosen because that involves no string parsing.
with closing(db.cursor()) as cursor:
cursor.execute(
"select engine from information_schema.ENGINES where engine='InnoDB'")
return_val = True if cursor.rowcount > 0 else False
return return_val
def _get_replica_stats(self, db):
try:
with closing(db.cursor(pymysql.cursors.DictCursor)) as cursor:
replica_results = {}
cursor.execute("SHOW SLAVE STATUS;")
slave_results = cursor.fetchone()
if slave_results:
replica_results.update(slave_results)
cursor.execute("SHOW MASTER STATUS;")
binlog_results = cursor.fetchone()
if binlog_results:
replica_results.update({'Binlog_enabled': True})
return replica_results
except (pymysql.err.InternalError, pymysql.err.OperationalError) as e:
self.warning("Privileges error getting replication status (must grant REPLICATION CLIENT): %s" % str(e))
return {}
def _get_slave_status(self, db, nonblocking=False):
try:
with closing(db.cursor()) as cursor:
# querying threads instead of PROCESSLIST to avoid mutex impact on
# performance.
if nonblocking:
cursor.execute("SELECT THREAD_ID, NAME FROM performance_schema.threads WHERE NAME LIKE '%worker'")
else:
cursor.execute("SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST WHERE COMMAND LIKE '%Binlog dump%'")
slave_results = cursor.fetchall()
slaves = 0
for row in slave_results:
slaves += 1
return {'Slaves_connected': slaves}
except (pymysql.err.InternalError, pymysql.err.OperationalError) as e:
self.warning("Privileges error accessing the process tables (must grant PROCESS): %s" % str(e))
return {}
def _get_stats_from_innodb_status(self, db):
# There are a number of important InnoDB metrics that are reported in
# InnoDB status but are not otherwise present as part of the STATUS
# variables in MySQL. Majority of these metrics are reported though
# as a part of STATUS variables in Percona Server and MariaDB.
# Requires querying user to have PROCESS privileges.
try:
with closing(db.cursor()) as cursor:
cursor.execute("SHOW /*!50000 ENGINE*/ INNODB STATUS")
innodb_status = cursor.fetchone()
innodb_status_text = innodb_status[2]
except (pymysql.err.InternalError, pymysql.err.OperationalError) as e:
self.warning("Privilege error accessing the INNODB status tables (must grant PROCESS): %s" % str(e))
return {}
results = defaultdict(int)
# Here we now parse InnoDB STATUS one line at a time
# This is heavily inspired by the Percona monitoring plugins work
txn_seen = False
prev_line = ''
for line in innodb_status_text.splitlines():
line = line.strip()
row = re.split(" +", line)
row = [item.strip(',') for item in row]
row = [item.strip(';') for item in row]
row = [item.strip('[') for item in row]
row = [item.strip(']') for item in row]
# SEMAPHORES
if line.find('Mutex spin waits') == 0:
# Mutex spin waits 79626940, rounds 157459864, OS waits 698719
# Mutex spin waits 0, rounds 247280272495, OS waits 316513438
results['Innodb_mutex_spin_waits'] = long(row[3])
results['Innodb_mutex_spin_rounds'] = long(row[5])
results['Innodb_mutex_os_waits'] = long(row[8])
elif line.find('RW-shared spins') == 0 and line.find(';') > 0:
# RW-shared spins 3859028, OS waits 2100750; RW-excl spins
# 4641946, OS waits 1530310
results['Innodb_s_lock_spin_waits'] = long(row[2])
results['Innodb_x_lock_spin_waits'] = long(row[8])
results['Innodb_s_lock_os_waits'] = long(row[5])
results['Innodb_x_lock_os_waits'] = long(row[11])
elif line.find('RW-shared spins') == 0 and line.find('; RW-excl spins') == -1:
# Post 5.5.17 SHOW ENGINE INNODB STATUS syntax
# RW-shared spins 604733, rounds 8107431, OS waits 241268
results['Innodb_s_lock_spin_waits'] = long(row[2])
results['Innodb_s_lock_spin_rounds'] = long(row[4])
results['Innodb_s_lock_os_waits'] = long(row[7])
elif line.find('RW-excl spins') == 0:
# Post 5.5.17 SHOW ENGINE INNODB STATUS syntax
# RW-excl spins 604733, rounds 8107431, OS waits 241268
results['Innodb_x_lock_spin_waits'] = long(row[2])
results['Innodb_x_lock_spin_rounds'] = long(row[4])
results['Innodb_x_lock_os_waits'] = long(row[7])
elif line.find('seconds the semaphore:') > 0:
# --Thread 907205 has waited at handler/ha_innodb.cc line 7156 for 1.00 seconds the semaphore:
results['Innodb_semaphore_waits'] += 1
results[
'Innodb_semaphore_wait_time'] += long(float(row[9])) * 1000
# TRANSACTIONS
elif line.find('Trx id counter') == 0:
# The beginning of the TRANSACTIONS section: start counting
# transactions
# Trx id counter 0 1170664159
# Trx id counter 861B144C
txn_seen = True
elif line.find('History list length') == 0:
# History list length 132
results['Innodb_history_list_length'] = long(row[3])
elif txn_seen and line.find('---TRANSACTION') == 0:
# ---TRANSACTION 0, not started, process no 13510, OS thread id 1170446656
results['Innodb_current_transactions'] += 1
if line.find('ACTIVE') > 0:
results['Innodb_active_transactions'] += 1
elif txn_seen and line.find('------- TRX HAS BEEN') == 0:
# ------- TRX HAS BEEN WAITING 32 SEC FOR THIS LOCK TO BE GRANTED:
results['Innodb_row_lock_time'] += long(row[5]) * 1000
elif line.find('read views open inside InnoDB') > 0:
# 1 read views open inside InnoDB
results['Innodb_read_views'] = long(row[0])
elif line.find('mysql tables in use') == 0:
# mysql tables in use 2, locked 2
results['Innodb_tables_in_use'] += long(row[4])
results['Innodb_locked_tables'] += long(row[6])
elif txn_seen and line.find('lock struct(s)') > 0:
# 23 lock struct(s), heap size 3024, undo log entries 27
# LOCK WAIT 12 lock struct(s), heap size 3024, undo log entries 5
# LOCK WAIT 2 lock struct(s), heap size 368
if line.find('LOCK WAIT') == 0:
results['Innodb_lock_structs'] += long(row[2])
results['Innodb_locked_transactions'] += 1
elif line.find('ROLLING BACK') == 0:
# ROLLING BACK 127539 lock struct(s), heap size 15201832,
# 4411492 row lock(s), undo log entries 1042488
results['Innodb_lock_structs'] += long(row[2])
else:
results['Innodb_lock_structs'] += long(row[0])
# FILE I/O
elif line.find(' OS file reads, ') > 0:
# 8782182 OS file reads, 15635445 OS file writes, 947800 OS
# fsyncs
results['Innodb_os_file_reads'] = long(row[0])
results['Innodb_os_file_writes'] = long(row[4])
results['Innodb_os_file_fsyncs'] = long(row[8])
elif line.find('Pending normal aio reads:') == 0:
# Pending normal aio reads: 0, aio writes: 0,
# or Pending normal aio reads: [0, 0, 0, 0] , aio writes: [0, 0, 0, 0] ,
# or Pending normal aio reads: 0 [0, 0, 0, 0] , aio writes: 0 [0, 0, 0, 0] ,
if len(row) == 16:
results['Innodb_pending_normal_aio_reads'] = (long(row[4]) + long(row[5]) +
long(row[6]) + long(row[7]))
results['Innodb_pending_normal_aio_writes'] = (long(row[11]) + long(row[12]) +
long(row[13]) + long(row[14]))
elif len(row) == 18:
results['Innodb_pending_normal_aio_reads'] = long(row[4])
results['Innodb_pending_normal_aio_writes'] = long(row[12])
else:
results['Innodb_pending_normal_aio_reads'] = long(row[4])
results['Innodb_pending_normal_aio_writes'] = long(row[7])
elif line.find('ibuf aio reads') == 0:
# ibuf aio reads: 0, log i/o's: 0, sync i/o's: 0
# or ibuf aio reads:, log i/o's:, sync i/o's:
if len(row) == 10:
results['Innodb_pending_ibuf_aio_reads'] = long(row[3])
results['Innodb_pending_aio_log_ios'] = long(row[6])
results['Innodb_pending_aio_sync_ios'] = long(row[9])
elif len(row) == 7:
results['Innodb_pending_ibuf_aio_reads'] = 0
results['Innodb_pending_aio_log_ios'] = 0
results['Innodb_pending_aio_sync_ios'] = 0
elif line.find('Pending flushes (fsync)') == 0:
# Pending flushes (fsync) log: 0; buffer pool: 0
results['Innodb_pending_log_flushes'] = long(row[4])
results['Innodb_pending_buffer_pool_flushes'] = long(row[7])
# INSERT BUFFER AND ADAPTIVE HASH INDEX
elif line.find('Ibuf for space 0: size ') == 0:
# Older InnoDB code seemed to be ready for an ibuf per tablespace. It
# had two lines in the output. Newer has just one line, see below.
# Ibuf for space 0: size 1, free list len 887, seg size 889, is not empty
# Ibuf for space 0: size 1, free list len 887, seg size 889,
results['Innodb_ibuf_size'] = long(row[5])
results['Innodb_ibuf_free_list'] = long(row[9])
results['Innodb_ibuf_segment_size'] = long(row[12])
elif line.find('Ibuf: size ') == 0:
# Ibuf: size 1, free list len 4634, seg size 4636,
results['Innodb_ibuf_size'] = long(row[2])
results['Innodb_ibuf_free_list'] = long(row[6])
results['Innodb_ibuf_segment_size'] = long(row[9])
if line.find('merges') > -1:
results['Innodb_ibuf_merges'] = long(row[10])
elif line.find(', delete mark ') > 0 and prev_line.find('merged operations:') == 0:
# Output of show engine innodb status has changed in 5.5
# merged operations:
# insert 593983, delete mark 387006, delete 73092
results['Innodb_ibuf_merged_inserts'] = long(row[1])
results['Innodb_ibuf_merged_delete_marks'] = long(row[4])
results['Innodb_ibuf_merged_deletes'] = long(row[6])
results['Innodb_ibuf_merged'] = results['Innodb_ibuf_merged_inserts'] + results[
'Innodb_ibuf_merged_delete_marks'] + results['Innodb_ibuf_merged_deletes']
elif line.find(' merged recs, ') > 0:
# 19817685 inserts, 19817684 merged recs, 3552620 merges
results['Innodb_ibuf_merged_inserts'] = long(row[0])
results['Innodb_ibuf_merged'] = long(row[2])
results['Innodb_ibuf_merges'] = long(row[5])
elif line.find('Hash table size ') == 0:
# In some versions of InnoDB, the used cells is omitted.
# Hash table size 4425293, used cells 4229064, ....
# Hash table size 57374437, node heap has 72964 buffer(s) <--
# no used cells
results['Innodb_hash_index_cells_total'] = long(row[3])
results['Innodb_hash_index_cells_used'] = long(
row[6]) if line.find('used cells') > 0 else 0
# LOG
elif line.find(" log i/o's done, ") > 0:
# 3430041 log i/o's done, 17.44 log i/o's/second
# 520835887 log i/o's done, 17.28 log i/o's/second, 518724686
# syncs, 2980893 checkpoints
results['Innodb_log_writes'] = long(row[0])
elif line.find(" pending log writes, ") > 0:
# 0 pending log writes, 0 pending chkp writes
results['Innodb_pending_log_writes'] = long(row[0])
results['Innodb_pending_checkpoint_writes'] = long(row[4])
elif line.find("Log sequence number") == 0:
# This number is NOT printed in hex in InnoDB plugin.
# Log sequence number 272588624
results['Innodb_lsn_current'] = long(row[3])
elif line.find("Log flushed up to") == 0:
# This number is NOT printed in hex in InnoDB plugin.
# Log flushed up to 272588624
results['Innodb_lsn_flushed'] = long(row[4])
elif line.find("Last checkpoint at") == 0:
# Last checkpoint at 272588624
results['Innodb_lsn_last_checkpoint'] = long(row[3])
# BUFFER POOL AND MEMORY
elif line.find("Total memory allocated") == 0 and line.find("in additional pool allocated") > 0:
# Total memory allocated 29642194944; in additional pool allocated 0
# Total memory allocated by read views 96
results['Innodb_mem_total'] = long(row[3])
results['Innodb_mem_additional_pool'] = long(row[8])
elif line.find('Adaptive hash index ') == 0:
# Adaptive hash index 1538240664 (186998824 + 1351241840)
results['Innodb_mem_adaptive_hash'] = long(row[3])
elif line.find('Page hash ') == 0:
# Page hash 11688584
results['Innodb_mem_page_hash'] = long(row[2])
elif line.find('Dictionary cache ') == 0:
# Dictionary cache 145525560 (140250984 + 5274576)
results['Innodb_mem_dictionary'] = long(row[2])
elif line.find('File system ') == 0:
# File system 313848 (82672 + 231176)
results['Innodb_mem_file_system'] = long(row[2])
elif line.find('Lock system ') == 0:
# Lock system 29232616 (29219368 + 13248)
results['Innodb_mem_lock_system'] = long(row[2])
elif line.find('Recovery system ') == 0:
# Recovery system 0 (0 + 0)
results['Innodb_mem_recovery_system'] = long(row[2])
elif line.find('Threads ') == 0:
# Threads 409336 (406936 + 2400)
results['Innodb_mem_thread_hash'] = long(row[1])
elif line.find("Buffer pool size ") == 0:
# The " " after size is necessary to avoid matching the wrong line:
# Buffer pool size 1769471
# Buffer pool size, bytes 28991012864
results['Innodb_buffer_pool_pages_total'] = long(row[3])
elif line.find("Free buffers") == 0:
# Free buffers 0
results['Innodb_buffer_pool_pages_free'] = long(row[2])
elif line.find("Database pages") == 0:
# Database pages 1696503
results['Innodb_buffer_pool_pages_data'] = long(row[2])
elif line.find("Modified db pages") == 0:
# Modified db pages 160602
results['Innodb_buffer_pool_pages_dirty'] = long(row[3])
elif line.find("Pages read ahead") == 0:
# Must do this BEFORE the next test, otherwise it'll get fooled by this
# line from the new plugin:
# Pages read ahead 0.00/s, evicted without access 0.06/s
pass
elif line.find("Pages read") == 0:
# Pages read 15240822, created 1770238, written 21705836
results['Innodb_pages_read'] = long(row[2])
results['Innodb_pages_created'] = long(row[4])
results['Innodb_pages_written'] = long(row[6])
# ROW OPERATIONS
elif line.find('Number of rows inserted') == 0:
# Number of rows inserted 50678311, updated 66425915, deleted
# 20605903, read 454561562
results['Innodb_rows_inserted'] = long(row[4])
results['Innodb_rows_updated'] = long(row[6])
results['Innodb_rows_deleted'] = long(row[8])
results['Innodb_rows_read'] = long(row[10])
elif line.find(" queries inside InnoDB, ") > 0:
# 0 queries inside InnoDB, 0 queries in queue
results['Innodb_queries_inside'] = long(row[0])
results['Innodb_queries_queued'] = long(row[4])
prev_line = line
# We need to calculate this metric separately
try:
results['Innodb_checkpoint_age'] = results[
'Innodb_lsn_current'] - results['Innodb_lsn_last_checkpoint']
except KeyError as e:
self.log.error("Not all InnoDB LSN metrics available, unable to compute: {0}".format(e))
# Finally we change back the metrics values to string to make the values
# consistent with how they are reported by SHOW GLOBAL STATUS
for metric, value in results.iteritems():
results[metric] = str(value)
return results
def _get_variable_enabled(self, results, var):
enabled = self._collect_string(var, results)
return (enabled and enabled.lower().strip() == 'on')
def _get_query_exec_time_95th_us(self, db):
# Fetches the 95th percentile query execution time and returns the value
# in microseconds
sql_95th_percentile = """SELECT s2.avg_us avg_us,
IFNULL(SUM(s1.cnt)/NULLIF((SELECT COUNT(*) FROM performance_schema.events_statements_summary_by_digest), 0), 0) percentile
FROM (SELECT COUNT(*) cnt, ROUND(avg_timer_wait/1000000) AS avg_us
FROM performance_schema.events_statements_summary_by_digest
GROUP BY avg_us) AS s1
JOIN (SELECT COUNT(*) cnt, ROUND(avg_timer_wait/1000000) AS avg_us
FROM performance_schema.events_statements_summary_by_digest
GROUP BY avg_us) AS s2
ON s1.avg_us <= s2.avg_us
GROUP BY s2.avg_us
HAVING percentile > 0.95
ORDER BY percentile
LIMIT 1"""
try:
with closing(db.cursor()) as cursor:
cursor.execute(sql_95th_percentile)
if cursor.rowcount < 1:
self.warning("Failed to fetch records from the perf schema 'events_statements_summary_by_digest' table.")
return None
row = cursor.fetchone()
query_exec_time_95th_per = row[0]
return query_exec_time_95th_per
except (pymysql.err.InternalError, pymysql.err.OperationalError) as e:
self.warning("95th percentile performance metrics unavailable at this time: %s" % str(e))
return None
def _query_exec_time_per_schema(self, db):
# Fetches the avg query execution time per schema and returns the
# value in microseconds
sql_avg_query_run_time = """SELECT schema_name, SUM(count_star) cnt, ROUND(AVG(avg_timer_wait)/1000000) AS avg_us
FROM performance_schema.events_statements_summary_by_digest
WHERE schema_name IS NOT NULL
GROUP BY schema_name"""
try:
with closing(db.cursor()) as cursor:
cursor.execute(sql_avg_query_run_time)
if cursor.rowcount < 1:
self.warning("Failed to fetch records from the perf schema 'events_statements_summary_by_digest' table.")
return None
schema_query_avg_run_time = {}
for row in cursor.fetchall():
schema_name = str(row[0])
avg_us = long(row[2])
# set the tag as the dictionary key
schema_query_avg_run_time["schema:{0}".format(schema_name)] = avg_us
return schema_query_avg_run_time
except (pymysql.err.InternalError, pymysql.err.OperationalError) as e:
self.warning("Avg exec time performance metrics unavailable at this time: %s" % str(e))
return None
def _query_size_per_schema(self, db):
# Fetches the avg query execution time per schema and returns the
# value in microseconds
sql_query_schema_size = """
SELECT table_schema,
SUM(data_length+index_length)/1024/1024 AS total_mb
FROM information_schema.tables
GROUP BY table_schema;
"""
try:
with closing(db.cursor()) as cursor:
cursor.execute(sql_query_schema_size)
if cursor.rowcount < 1:
self.warning("Failed to fetch records from the information schema 'tables' table.")
return None
schema_size = {}
for row in cursor.fetchall():
schema_name = str(row[0])
size = long(row[1])
# set the tag as the dictionary key
schema_size["schema:{0}".format(schema_name)] = size
return schema_size
except (pymysql.err.InternalError, pymysql.err.OperationalError) as e:
self.warning("Avg exec time performance metrics unavailable at this time: %s" % str(e))
return {}
def _compute_synthetic_results(self, results):
if ('Qcache_hits' in results) and ('Qcache_inserts' in results) and ('Qcache_not_cached' in results):
if not int(results['Qcache_hits']):
results['Qcache_utilization'] = 0
else:
results['Qcache_utilization'] = (float(results['Qcache_hits']) /
(int(results['Qcache_inserts']) +
int(results['Qcache_not_cached']) +
int(results['Qcache_hits'])) * 100)
if all(v is not None for v in (self._qcache_hits, self._qcache_inserts, self._qcache_not_cached)):
if not (int(results['Qcache_hits']) - self._qcache_hits):
results['Qcache_instant_utilization'] = 0
else:
results['Qcache_instant_utilization'] = ((float(results['Qcache_hits']) - self._qcache_hits) /
((int(results['Qcache_inserts']) - self._qcache_inserts) +
(int(results['Qcache_not_cached']) - self._qcache_not_cached) +
(int(results['Qcache_hits']) - self._qcache_hits)) * 100)
# update all three, or none - for consistent samples.
self._qcache_hits = int(results['Qcache_hits'])
self._qcache_inserts = int(results['Qcache_inserts'])
self._qcache_not_cached = int(results['Qcache_not_cached'])
| lookout/dd-agent | checks.d/mysql.py | Python | bsd-3-clause | 62,670 | 0.002266 |
############################################
# [config.py]
# CONFIGURATION SETTINGS FOR A PARTICULAR METER
#
#
# Set the long-form name of this meter
name = "*PEAK only"
#
# [Do not remove or uncomment the following line]
Cs={}
############################################
############################################
# STRUCTURE PARAMETERS
#
# Parameters subject to conscious control by the poet. Kiparsky & Hanson (1996)
# call these "formally independent of phonological structure." By contrast,
# "realization parameters"--e.g., the size of a metrical position, which positions
# are regulated, and other constraints--"determine the way the structure is
# linguistically manifested, and are dependent on the prosodic givens of languge."
#
#
####
# [Number of feet in a line]
#
#Cs['number_feet!=2'] = 1 # require dimeter
#Cs['number_feet!=3'] = 1 # require trimeter
#Cs['number_feet!=4'] = 1 # require tetrameter
#Cs['number_feet!=5'] = 1 # require pentameter
#Cs['number_feet!=6'] = 1 # require hexameter
#Cs['number_feet!=7'] = 1 # require heptameter
#
#
####
# [Headedness of the line]
#
#Cs['headedness!=falling'] = 1 # require a falling rhythm (e.g. trochaic, dactylic)
#Cs['headedness!=rising'] = 1 # require a rising rhythm (e.g., iambic, anapestic)
#
############################################
############################################
# REALIZATION PARAMETERS
#
# All subsequent constraints can be seen as "realization parameters."
# See note to "structure parameters" above for more information.
#
#############################################
# METRICAL PARSING: POSITION SIZE
#
# Select how many syllables are at least *possible* in strong or weak positions
# cf. Kiparsky & Hanson's "position size" parameter ("Parametric Theory" 1996)
#
#
######
# [Maximum position size]
#
# The maximum number of syllables allowed in strong metrical positions (i.e. "s")
maxS=2
#
# The maximum number of syllables allowed in weak metrical positions (i.e. "w")
maxW=2
#
#
######
# [Minimum position size]
#
# (Recommended) Positions are at minimum one syllable in size
splitheavies=0
#
# (Unrecommended) Allow positions to be as small as a single mora
# i.e. (a split heavy syllable can straddle two metrical positions)
#splitheavies=1
############################################
############################################
# METRICAL PARSING: METRICAL CONSTRAINTS
#
# Here you can configure the constraints used by the metrical parser.
# Each constraint is expressed in the form:
# Cs['(constraint name)']=(constraint weight)
# Constraint weights do not affect harmonic bounding (i.e. which parses
# survive as possibilities), but they do affect how those possibilities
# are sorted to select the "best" parse.
#
#
######
# [Constraints regulating the 'STRENGTH' of a syllable]
#
# A syllable is strong if it is a peak in a polysyllabic word:
# the syllables in 'liberty', stressed-unstressed-unstressed,
# are, in terms of *strength*, strong-weak-neutral, because
# the first syllable is more stressed than its neighbor;
# the second syllable less stressed; and the third equally stressed.
#
###
# [Stricter versions:]
#
# A strong metrical position should not contain any weak syllables ("troughs"):
#Cs['strength.s=>-u']=1
#
# A weak metrical position may not contain any strong syllables ("peaks"):
# [Kiparsky and Hanson believe this is Shakespeare's meter]
Cs['strength.w=>-p']=1
#
###
# [Laxer versions:]
#
# A strong metrical position should contain at least one strong syllable:
#Cs['strength.s=>p']=3
#
# A weak metrical position should contain at least one weak syllable:
#Cs['strength.w=>u']=3
#
#
#
######
# [Constraints regulating the STRESS of a syllable]
#
###
# [Stricter versions:]
#
# A strong metrical position should not contain any unstressed syllables:
# [Kiparsky and Hanson believe this is Hopkins' meter]
#Cs['stress.s=>-u']=1
#
# A weak metrical position should not contain any stressed syllables:
#Cs['stress.w=>-p']=1
#
###
# [Laxer versions:]
#
# A strong metrical position should contain at least one stressed syllable:
#Cs['stress.s=>p']=2
#
# A weak metrical position must contain at least one unstressed syllable;
#Cs['stress.w=>u']=2
#
#
#
######
# [Constraints regulating the WEIGHT of a syllable]
#
# The weight of a syllable is its "quantity": short or long.
# These constraints are designed for "quantitative verse",
# as for example in classical Latin and Greek poetry.
#
###
# [Stricter versions:]
#
# A strong metrical position should not contain any light syllables:
#Cs['weight.s=>-u']=2
#
# A weak metrical position should not contain any heavy syllables:
#Cs['weight.w=>-p']=2
#
###
# [Laxer versions:]
#
# A strong metrical position should contain at least one heavy syllable:
#Cs['weight.s=>p']=2
#
# A weak metrical position must contain at least one light syllable;
#Cs['weight.w=>u']=2
#
#
#
######
# [Constraints regulating what's permissible as a DISYLLABIC metrical position]
# [(with thanks to Sam Bowman, who programmed many of these constraints)]
#
###
# [Based on weight:]
#
# A disyllabic metrical position should not contain more than a minimal foot:
# i.e. W-resolution requires first syllable to be light and unstressed.
Cs['footmin-w-resolution']=1
#
#
# A disyllabic metrical position should not contain more than a minimal foot:
# (i.e. allowed positions are syllables weighted light-light or light-heavy)
#Cs['footmin-noHX']=1000
#
#
# A disyllabic STRONG metrical position should not contain more than a minimal foot:
# (i.e. allowed positions are syllables weighted light-light or light-heavy)
#Cs['footmin-s-noHX']=1
#
# A disyllabic metrical position should be syllables weighted light-light:
#Cs['footmin-noLH-noHX']=1
#
###
# [Categorical:]
#
# A metrical position should not contain more than one syllable:
# [use to discourage disyllabic positions]
#Cs['footmin-none']=1
#
# A strong metrical position should not contain more than one syllable:
#Cs['footmin-no-s']=1
#
# A weak metrical position should not contain more than one syllable:
#Cs['footmin-no-w']=1
#
# A metrical position should not contain more than one syllable,
# *unless* that metrical position is the *first* or *second* in the line:
# [use to discourage disyllabic positions, but not trochaic inversions,
# or an initial "extrametrical" syllable]
#Cs['footmin-none-unless-in-first-two-positions']=1
#
# A metrical position should not contain more than one syllable,
# *unless* that metrical position is the *second* in the line:
# [use to discourage disyllabic positions, but not trochaic inversions]
#Cs['footmin-none-unless-in-second-position']=1
#
# A strong metrical position should not contain more than one syllable,
# *unless* it is preceded by a disyllabic *weak* metrical position:
# [use to implement the metrical pattern described by Derek Attridge,
# in The Rhythms of English Poetry (1982), and commented on by Bruce Hayes
# in his review of the book in Language 60.1 (1984).
# e.g. Shakespeare's "when.your|SWEET.IS|ue.your|SWEET.FORM|should|BEAR"
# [this implementation is different in that it only takes into account
# double-weak beats *preceding* -- due to the way in which the parser
# throws away bounded parses as it goes, it might not be possible for now
# to write a constraint referencing future positions]
#Cs['footmin-no-s-unless-preceded-by-ww']=10
# [The version that does reference future positions; but appears to be unstable]:
#Cs['attridge-ss-not-by-ww']=10
#
###
# [For disyllabic positions crossing a word boundary...
# (i.e. having two syllables, each from a different word)...
#
# ...allow only F-resolutions:
# (both words must be function words and be in a weak metrical position)
Cs['footmin-f-resolution']=1
#
# ...it should never cross a word boundary to begin with:
#Cs['footmin-wordbound']=1000
#
# ...both words should be function words:
#Cs['footmin-wordbound-bothnotfw']=1
#
# ...at least one word should be a function word:
#Cs['footmin-wordbound-neitherfw']=1
#
# ...the left-hand syllable should be a function-word:
#Cs['footmin-wordbound-leftfw']=1
#
# ...the right-hand syllable should be a function word:
#Cs['footmin-wordbound-rightfw']=1
#
# ...neither word should be a monosyllable:
#Cs['footmin-wordbound-nomono']=1
#
# ...neither word should be a LEXICAL monosyllable
# (i.e. function words and polysyllabic words ok)
#Cs['footmin-wordbound-lexmono']=1
###
# [Miscellaneous constraints relating to disyllabic positions]
#
# A disyllabic metrical position may contain a strong syllable
# of a lexical word only if the syllable is (i) light and
# (ii) followed within the same position by an unstressed
# syllable normally belonging to the same word.
# [written by Sam Bowman]
#Cs['footmin-strongconstraint']=1
#
# The final metrical position of the line should not be 'ww'
# [use to encourage "...LI|ber|TY" rather than "...LI|ber.ty"]
#Cs['posthoc-no-final-ww']=2
#
# The final metrical position of the line should not be 'w' or 'ww'
#Cs['posthoc-no-final-w']=2
#
# A line should have all 'ww' or all 'w':
# It works by:
# Nw = Number of weak positions in the line
# Mw = Maximum number of occurrences of 'w' metrical position
# Mww = Maximum number of occurrences of 'ww' metrical position
# M = Whichever is bigger, Mw or Mww
# V = Nw - M
# Violation Score = V * [Weight]
# [use to encourage consistency of meter across line]
# [feel free to make this a decimal number, like 0.25]
#Cs['posthoc-standardize-weakpos']=1
#
#
#
######
# [MISCELLANEOUS constraints]
#
# A function word can fall only in a weak position:
#Cs['functiontow']=2
#
# An initial syllable must be in a weak position:
#Cs['initialstrong']=2
#
# The first metrical position will not be evaluated
# for any of the strength/stress/weight correspondence constraints:
# [set to 1 to be true]
#Cs['extrametrical-first-pos']=1
#
# The first two metrical positions will not be evaluated
# for any of the strength/stress/weight correspondence constraints:
# [set to 1 to be true]
Cs['skip_initial_foot']=1
#
# A word should not be an elision [use to discourage elisions]:
#Cs['word-elision']=1
#
# A weak metrical position should not contain any syllables
# that are stressed and heavy: [Meter of Finnish "Kalevala"]
#Cs['kalevala.w=>-p']=1
#
# A strong metrical position should not contain any syllables
# that are stressed and light: [Meter of Finnish "Kalevala"]
#Cs['kalevala.s=>-u']=1
############################################
| quadrismegistus/prosodic | meters/strength_and_resolution.py | Python | gpl-3.0 | 10,457 | 0.005929 |
# -*- coding: utf-8 -*-
"""
feedjack
Gustavo Picón
fjlib.py
"""
from django.conf import settings
from django.db import connection
from django.core.paginator import Paginator, InvalidPage
from django.http import Http404
from django.utils.encoding import smart_unicode
from oi.feedjack import models
from oi.feedjack import fjcache
# this is taken from django, it was removed in r8191
class ObjectPaginator(Paginator):
"""
Legacy ObjectPaginator class, for backwards compatibility.
Note that each method on this class that takes page_number expects a
zero-based page number, whereas the new API (Paginator/Page) uses one-based
page numbers.
"""
def __init__(self, query_set, num_per_page, orphans=0):
Paginator.__init__(self, query_set, num_per_page, orphans)
#import warnings
#warnings.warn("The ObjectPaginator is deprecated. Use django.core.paginator.Paginator instead.", DeprecationWarning)
# Keep these attributes around for backwards compatibility.
self.query_set = query_set
self.num_per_page = num_per_page
self._hits = self._pages = None
def validate_page_number(self, page_number):
try:
page_number = int(page_number) + 1
except ValueError:
raise PageNotAnInteger
return self.validate_number(page_number)
def get_page(self, page_number):
try:
page_number = int(page_number) + 1
except ValueError:
raise PageNotAnInteger
return self.page(page_number).object_list
def has_next_page(self, page_number):
return page_number < self.pages - 1
def has_previous_page(self, page_number):
return page_number > 0
def first_on_page(self, page_number):
"""
Returns the 1-based index of the first object on the given page,
relative to total objects found (hits).
"""
page_number = self.validate_page_number(page_number)
return (self.num_per_page * (page_number - 1)) + 1
def last_on_page(self, page_number):
"""
Returns the 1-based index of the last object on the given page,
relative to total objects found (hits).
"""
page_number = self.validate_page_number(page_number)
if page_number == self.num_pages:
return self.count
return page_number * self.num_per_page
# The old API called it "hits" instead of "count".
hits = Paginator.count
# The old API called it "pages" instead of "num_pages".
pages = Paginator.num_pages
def sitefeeds(siteobj):
""" Returns the active feeds of a site.
"""
return siteobj.subscriber_set.filter(is_active=True).select_related()
#return [subscriber['feed'] \
# for subscriber \
# in siteobj.subscriber_set.filter(is_active=True).values('feed')]
def getquery(query):
""" Performs a query and get the results.
"""
try:
conn = connection.cursor()
conn.execute(query)
data = conn.fetchall()
conn.close()
except:
data = []
return data
def get_extra_content(site, sfeeds_ids, ctx):
""" Returns extra data useful to the templates.
"""
# get the subscribers' feeds
if sfeeds_ids:
basefeeds = models.Feed.objects.filter(id__in=sfeeds_ids)
try:
ctx['feeds'] = basefeeds.order_by('name').select_related()
except:
ctx['feeds'] = []
# get the last_checked time
try:
ctx['last_modified'] = basefeeds.filter(\
last_checked__isnull=False).order_by(\
'-last_checked').select_related()[0].last_checked.ctime()
except:
ctx['last_modified'] = '??'
else:
ctx['feeds'] = []
ctx['last_modified'] = '??'
ctx['site'] = site
ctx['media_url'] = '%s/feedjack/%s' % (settings.MEDIA_URL, site.template)
def get_posts_tags(object_list, sfeeds_obj, user_id, tag_name):
""" Adds a qtags property in every post object in a page.
Use "qtags" instead of "tags" in templates to avoid innecesary DB hits.
"""
tagd = {}
user_obj = None
tag_obj = None
tags = models.Tag.objects.extra(\
select={'post_id':'%s.%s' % (\
connection.ops.quote_name('feedjack_post_tags'), \
connection.ops.quote_name('post_id'))}, \
tables=['feedjack_post_tags'], \
where=[\
'%s.%s=%s.%s' % (\
connection.ops.quote_name('feedjack_tag'), \
connection.ops.quote_name('id'), \
connection.ops.quote_name('feedjack_post_tags'), \
connection.ops.quote_name('tag_id')), \
'%s.%s IN (%s)' % (\
connection.ops.quote_name('feedjack_post_tags'), \
connection.ops.quote_name('post_id'), \
', '.join([str(post.id) for post in object_list]))])
for tag in tags:
if tag.post_id not in tagd:
tagd[tag.post_id] = []
tagd[tag.post_id].append(tag)
if tag_name and tag.name == tag_name:
tag_obj = tag
subd = {}
for sub in sfeeds_obj:
subd[sub.feed.id] = sub
for post in object_list:
if post.id in tagd:
post.qtags = tagd[post.id]
else:
post.qtags = []
post.subscriber = subd[post.feed.id]
if user_id and int(user_id) == post.feed.id:
user_obj = post.subscriber
return user_obj, tag_obj
def getcurrentsite(http_post, path_info, query_string):
""" Returns the site id and the page cache key based on the request.
"""
url = u'http://%s/%s' % (smart_unicode(http_post.rstrip('/')), \
smart_unicode(path_info.lstrip('/')))
pagecachekey = '%s?%s' % (smart_unicode(path_info), \
smart_unicode(query_string))
hostdict = fjcache.hostcache_get()
if not hostdict:
hostdict = {}
if url not in hostdict:
default, ret = None, None
for site in models.Site.objects.all():
if url.startswith(site.url):
ret = site
break
if not default or site.default_site:
default = site
if not ret:
if default:
ret = default
else:
# Somebody is requesting something, but the user didn't create
# a site yet. Creating a default one...
ret = models.Site(name='Default Feedjack Site/Planet', \
url='www.feedjack.org', \
title='Feedjack Site Title', \
description='Feedjack Site Description. ' \
'Please change this in the admin interface.')
ret.save()
hostdict[url] = ret.id
fjcache.hostcache_set(hostdict)
return hostdict[url], pagecachekey
def get_paginator(site, sfeeds_ids, page=0, tag=None, user=None):
""" Returns a paginator object and a requested page from it.
"""
if tag:
try:
localposts = models.Tag.objects.get(name=tag).post_set.filter(\
feed__in=sfeeds_ids)
except:
raise Http404
else:
localposts = models.Post.objects.filter(feed__in=sfeeds_ids)
if user:
try:
localposts = localposts.filter(feed=user)
except:
raise Http404
if site.order_posts_by == 2:
localposts = localposts.order_by('-date_created', '-date_modified')
else:
localposts = localposts.order_by('-date_modified')
paginator = ObjectPaginator(localposts.select_related(), \
site.posts_per_page)
try:
object_list = paginator.get_page(page)
except InvalidPage:
if page == 0:
object_list = []
else:
raise Http404
return (paginator, object_list)
def page_context(request, site, tag=None, user_id=None, sfeeds=None):
""" Returns the context dictionary for a page view.
"""
sfeeds_obj, sfeeds_ids = sfeeds
try:
page = int(request.GET.get('page', 0))
except ValueError:
page = 0
paginator, object_list = get_paginator(site, sfeeds_ids, \
page=page, tag=tag, user=user_id)
if object_list:
# This will hit the DB once per page instead of once for every post in
# a page. To take advantage of this the template designer must call
# the qtags property in every item, instead of the default tags
# property.
user_obj, tag_obj = get_posts_tags(object_list, sfeeds_obj, \
user_id, tag)
else:
user_obj, tag_obj = None, None
ctx = {
'object_list': object_list,
'is_paginated': paginator.pages > 1,
'results_per_page': site.posts_per_page,
'has_next': paginator.has_next_page(page),
'has_previous': paginator.has_previous_page(page),
'page': page + 1,
'next': page + 1,
'previous': page - 1,
'pages': paginator.pages,
'hits' : paginator.hits,
}
get_extra_content(site, sfeeds_ids, ctx)
from oi.feedjack import fjcloud
ctx['tagcloud'] = fjcloud.getcloud(site, user_id)
ctx['user_id'] = user_id
#Because we need Django's user, not feedjack's
#ctx['user'] = user_obj
ctx['tag'] = tag_obj
ctx['subscribers'] = sfeeds_obj
return ctx
#~
| MehmetNuri/ozgurlukicin | feedjack/fjlib.py | Python | gpl-3.0 | 9,326 | 0.006005 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "street_agitation_bot.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| Kurpilyansky/street-agitation-telegram-bot | manage.py | Python | gpl-3.0 | 818 | 0.001222 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.alexnet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.slim.nets import alexnet
slim = tf.contrib.slim
class AlexnetV2Test(tf.test.TestCase):
def testBuild(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs, num_classes)
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testFullyConvolutional(self):
batch_size = 1
height, width = 300, 400
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 4, 7, num_classes])
def testEndPoints(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = alexnet.alexnet_v2(inputs, num_classes)
expected_names = ['alexnet_v2/conv1',
'alexnet_v2/pool1',
'alexnet_v2/conv2',
'alexnet_v2/pool2',
'alexnet_v2/conv3',
'alexnet_v2/conv4',
'alexnet_v2/conv5',
'alexnet_v2/pool5',
'alexnet_v2/fc6',
'alexnet_v2/fc7',
'alexnet_v2/fc8'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
def testModelVariables(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
alexnet.alexnet_v2(inputs, num_classes)
expected_names = ['alexnet_v2/conv1/weights',
'alexnet_v2/conv1/biases',
'alexnet_v2/conv2/weights',
'alexnet_v2/conv2/biases',
'alexnet_v2/conv3/weights',
'alexnet_v2/conv3/biases',
'alexnet_v2/conv4/weights',
'alexnet_v2/conv4/biases',
'alexnet_v2/conv5/weights',
'alexnet_v2/conv5/biases',
'alexnet_v2/fc6/weights',
'alexnet_v2/fc6/biases',
'alexnet_v2/fc7/weights',
'alexnet_v2/fc7/biases',
'alexnet_v2/fc8/weights',
'alexnet_v2/fc8/biases',
]
model_variables = [v.op.name for v in slim.get_model_variables()]
self.assertSetEqual(set(model_variables), set(expected_names))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
with self.test_session():
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
predictions = tf.argmax(logits, 1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 300, 400
num_classes = 1000
with self.test_session():
train_inputs = tf.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = alexnet.alexnet_v2(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 4, 7, num_classes])
logits = tf.reduce_mean(logits, [1, 2])
predictions = tf.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self):
batch_size = 1
height, width = 224, 224
with self.test_session() as sess:
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs)
sess.run(tf.initialize_all_variables())
output = sess.run(logits)
self.assertTrue(output.any())
if __name__ == '__main__':
tf.test.main()
| ml6973/Course | tf-hands-on/slim/python/slim/nets/alexnet_test.py | Python | apache-2.0 | 5,839 | 0.008392 |
import numpy as np
import nengo
import ctn_benchmark
# define the inputs when doing number comparison task
class NumberExperiment:
def __init__(self, p):
self.p = p
self.pairs = []
self.order = []
rng = np.random.RandomState(seed=p.seed)
for i in range(1, 10):
for j in range(i + 1, 10):
order = rng.choice([-1, 1])
self.order.append(order)
if order < 0:
self.pairs.append((i, j))
else:
self.pairs.append((j, i))
rng.shuffle(self.pairs)
#self.pairs = self.pairs[:3]
self.trial_time = 1.0
self.T = len(self.pairs) * self.trial_time
def input0(self, t):
return [0]*self.p.pointer_count
def display(self, t):
index = int(t / self.trial_time)
t = t % self.trial_time
a, b = self.pairs[index % len(self.pairs)]
if 0.1<t<0.2:
self.display.im_func._nengo_html_ = '<h1>%d</h1>' % a
return
if 0.3<t<0.4:
self.display.im_func._nengo_html_ = '<h1>%d</h1>' % b
return
self.display.im_func._nengo_html_ = ''
def input1(self, t):
index = int(t / self.trial_time)
t = t % self.trial_time
a, b = self.pairs[index % len(self.pairs)]
if 0.1<t<0.2:
return [a * 0.1 - 1]
if 0.3<t<0.4:
return [b * 0.1 - 1]
return [0]
def pointer_source(self, t):
return [0, 1]
def pointer_target(self, t):
t = t % self.trial_time
v = [0]*self.p.pointer_count
if 0.1<t<0.2: v[0]=1
if 0.3<t<0.4: v[1]=1
return v
def report_finger(self, t):
return [0]
def report_compare(self, t):
t = t % self.trial_time
if 0.5<t<self.trial_time:
return [1]
else:
return [0]
def memory_clear(self, t):
t = t % self.trial_time
if 1.0 - self.p.time_clear_mem < t < 1.0:
return [1]
else:
return [0]
# define the inputs when doing the finger touching task
class FingerTouchExperiment:
def __init__(self, p):
self.p = p
self.pairs = []
rng = np.random.RandomState(seed=p.seed)
for i in range(self.p.pointer_count):
for j in range(i + 1, self.p.pointer_count):
self.pairs.append((i, j))
rng.shuffle(self.pairs)
self.trial_time = 1.0
self.T = len(self.pairs) * self.trial_time
def input0(self, t):
r=[0]*self.p.pointer_count
index = int(t / self.trial_time)
t = t % self.trial_time
if 0.1<t<0.2:
for i in self.pairs[index]:
r[i]=1
return r
def display(self, t):
self.display.im_func._nengo_html_ = ''
def input1(self, t):
return [0]
def pointer_source(self, t):
return [1,0]
def pointer_target(self, t):
return [1]*self.p.pointer_count
def report_finger(self, t):
t = t % self.trial_time
if 0.3<t<1.0:
return [1]
else:
return [0]
def report_compare(self, t):
return [0]
def memory_clear(self, t):
t = t % self.trial_time
if 1.0 - self.p.time_clear_mem < t < 1.0:
return [1]
else:
return [0]
class FingerGnosis(ctn_benchmark.Benchmark):
def params(self):
self.default('number of input areas', input_count=2)
self.default('neurons for input', N_input=200)
self.default('neurons per pointer', N_pointer=400)
self.default('neurons per decoded reference', N_reference=1000)
self.default('neurons for memory', N_memory=2000)
self.default('neurons for comparison', N_compare=400)
self.default('neurons for reporting', N_report=100)
self.default('number of pointers', pointer_count=3)
self.default('memory synapse time', memory_synapse=0.1)
self.default('clear memory time', time_clear_mem=0.1)
self.default('crosstalk', crosstalk=0.2)
self.default('task', task='compare')
self.default('evidence scale', evidence_scale=1.0)
def model(self, p):
model = nengo.Network()
if p.task == 'compare':
self.exp = NumberExperiment(p=p)
elif p.task == 'fingers':
self.exp = FingerTouchExperiment(p=p)
with model:
input0 = nengo.Node(self.exp.input0)
input1 = nengo.Node(self.exp.input1)
if hasattr(self.exp, 'display'):
display = nengo.Node(self.exp.display)
pointer_source = nengo.Node(self.exp.pointer_source)
pointer_target = nengo.Node(self.exp.pointer_target)
report_finger = nengo.Node(self.exp.report_finger)
report_compare = nengo.Node(self.exp.report_compare)
memory_clear = nengo.Node(self.exp.memory_clear)
# create neural models for the two input areas
# (fingers and magnitude)
area0 = nengo.Ensemble(p.N_input*p.pointer_count, p.pointer_count,
radius=np.sqrt(p.pointer_count),
label='area0')
area1 = nengo.Ensemble(p.N_input, 1, label='area1')
nengo.Connection(input0, area0)
nengo.Connection(input1, area1)
# define the connections to create the pointers
def matrix(n,m,pre=None,post=None,value=1):
m=[[0]*n for i in range(m)]
if pre is None: pre=range(n)
if post is None: post=range(m)
for i in range(max(len(pre),len(post))):
m[post[i%len(post)]][pre[i%len(pre)]]=value
return m
pointers = nengo.Network(label='pointers')
with pointers:
for i in range(p.pointer_count):
nengo.Ensemble(p.N_pointer,
dimensions = p.input_count*2+1,
radius = np.sqrt(p.input_count*2+1),
label='%d' % i)
for i in range(p.pointer_count):
pointer = pointers.ensembles[i]
nengo.Connection(pointer_source, pointer,
transform=matrix(
p.input_count, p.input_count*2+1,
post=[k*2 for k in range(p.input_count)]))
nengo.Connection(pointer_target,pointer,
transform=matrix(p.pointer_count,
p.input_count*2+1,
pre=[i],
post=[p.input_count*2]))
nengo.Connection(area0, pointer,
transform=matrix(p.pointer_count,
p.input_count*2+1,
pre=[i],post=[1]))
nengo.Connection(area1, pointer,
transform=matrix(1,p.input_count*2+1,
pre=[0],post=[3]))
# define the connections to extract the current value
# from the pointers
def ref_func(x):
if x[-1]<0.5: return 0
sum=0
for i in range(p.input_count):
if x[2*i]>0.5: sum+=x[2*i+1]
return sum
basis=[]
for i in range(p.pointer_count):
b=[0]*p.pointer_count
b[i]=1
basis.append(b)
b=[0]*p.pointer_count
b[i]=-1
basis.append(b)
reference=nengo.Ensemble(p.N_reference,p.pointer_count,
radius=np.sqrt(p.pointer_count),
encoders=nengo.dists.Choice(basis),
intercepts=nengo.dists.Uniform(0.1,0.9),
label='reference')
for i in range(p.pointer_count):
matrix=[p.crosstalk]*p.pointer_count
matrix[i]=1.0-p.crosstalk
pointer = pointers.ensembles[i]
nengo.Connection(pointer, reference,
function=ref_func,
transform=[[x] for x in matrix])
# add a memory to integrate the value referenced by the pointers
memory = nengo.networks.EnsembleArray(p.N_memory, p.pointer_count,
radius=1, label='memory')
nengo.Connection(reference,memory.input,transform=1)
nengo.Connection(memory.output, memory.input,
transform=1, synapse=p.memory_synapse)
# create a system to report which fingers were pressed
report = nengo.networks.EnsembleArray(p.N_report,p.pointer_count,
encoders=nengo.dists.Choice([[1]]),
intercepts=nengo.dists.Uniform(0.3,0.9),
radius=0.3, label='report')
nengo.Connection(memory.output,report.input,transform=1)
m=[[-10]*p.pointer_count for i in range(p.pointer_count)]
for i in range(p.pointer_count):
m[i][i]=0
nengo.Connection(report.output, report.input,
transform=m, synapse=0.01)
reported = nengo.networks.EnsembleArray(p.N_report, p.pointer_count,
radius=1, encoders=nengo.dists.Choice([[1]]),
intercepts=nengo.dists.Uniform(0.05,0.9),
label='reported')
nengo.Connection(report.output, reported.input,
transform=1, synapse=0.2)
nengo.Connection(reported.output, report.input, transform=-1)
nengo.Connection(reported.output, reported.input, transform=1.2)
# create a system to report whether the first
# or second number is bigger
compare = nengo.Ensemble(p.N_compare,1, label='compare', radius=1)
nengo.Connection(memory.ensembles[0],compare[0],transform=p.evidence_scale)
nengo.Connection(memory.ensembles[1],compare[0],transform=-p.evidence_scale)
# create inhibitory gates to control the two reporting systems
report_gate_f = nengo.Ensemble(50,1,
encoders=nengo.dists.Choice([[1]]),
intercepts=nengo.dists.Uniform(0.1,0.9),
label='report gate f')
report_gate_c=nengo.Ensemble(50,1,
encoders=nengo.dists.Choice([[1]]),
intercepts=nengo.dists.Uniform(0.1,0.9),
label='report gate c')
nengo.Connection(report_finger,report_gate_f,transform=-10)
nengo.Connection(report_compare,report_gate_c,transform=-10)
report_bias=nengo.Node([1], label='bias')
nengo.Connection(report_bias,report_gate_f)
nengo.Connection(report_bias,report_gate_c)
nengo.Connection(report_gate_c, compare.neurons,
transform=[[-100.0]]*p.N_compare, synapse=0.01)
for i in range(p.pointer_count):
nengo.Connection(report_gate_f, report.ensembles[i].neurons,
transform=[[-100.0]]*p.N_report, synapse=0.01)
nengo.Connection(report_gate_f, reported.ensembles[i].neurons,
transform=[[-100.0]]*p.N_report, synapse=0.01)
for ens in memory.all_ensembles + [compare]:
nengo.Connection(memory_clear, ens.neurons,
transform=[[-10]] * ens.n_neurons,
synapse=0.01)
self.p_report = nengo.Probe(report.output, synapse=0.01)
self.p_compare = nengo.Probe(compare, synapse=0.01)
self.p_memory = nengo.Probe(memory.output, synapse=0.01)
if p.backend == 'nengo_spinnaker':
import nengo_spinnaker
nengo_spinnaker.add_spinnaker_params(model.config)
for node in model.all_nodes:
if callable(node.output):
if not hasattr(node.output, '_nengo_html_'):
model.config[node].function_of_time = True
return model
def evaluate(self, p, sim, plt):
sim.run(self.exp.T)
self.record_speed(self.exp.T)
t = sim.trange()
if p.task == 'fingers':
scores = np.zeros(p.pointer_count-1, dtype=float)
count = np.zeros(p.pointer_count-1)
mags = np.zeros(p.pointer_count-1)
magnitudes = []
for i in range(len(self.exp.pairs)):
t_start = i * self.exp.trial_time
t_end = (i+1) * self.exp.trial_time
index_start = np.argmax(t > t_start)
index_end = np.argmax(t > t_end)
if t_end >= t[-1]:
index_end = len(t)
data = sim.data[self.p_report][index_start:index_end]
answers = np.max(data, axis=0)
values = [(v, ii) for ii, v in enumerate(answers)]
values.sort()
r = values[-1][1], values[-2][1]
c = self.exp.pairs[i]
delta = abs(c[0] - c[1])
count[delta - 1] += 1
if (r[0], r[1]) == (c[0], c[1]) or (r[0], r[1]) == (c[1], c[0]):
v = (values[-1][0] + values[-2][0])/2
magnitudes.append((c[0], c[1], v))
mags[delta - 1] += v
scores[delta - 1] += 1
mags = mags / scores
scores = scores / count
else:
scores = np.zeros(8, dtype=float)
count = np.zeros(8)
mags = np.zeros(8, dtype=float)
magnitudes = []
for i in range(len(self.exp.pairs)):
t_start = i * self.exp.trial_time
t_end = (i+1) * self.exp.trial_time
index_start = np.argmax(t > t_start)
index_end = np.argmax(t > t_end)
if t_end >= t[-1]:
index_end = len(t)
data = sim.data[self.p_compare][index_start:index_end]
answer = np.mean(data)
answer_value = np.max(data) if answer > 0 else np.min(data)
c = self.exp.pairs[i]
delta = abs(c[0] - c[1])
count[delta - 1] += 1
if (answer < 0 and c[0] < c[1]) or (answer > 0 and c[1] < c[0]):
scores[delta - 1] += 1
mags[delta - 1] += np.abs(answer_value)
magnitudes.append((c[0], c[1], answer_value))
mags = mags / scores
scores = scores / count
if plt is not None:
plt.subplot(2,1,1)
if p.task == 'fingers':
plt.plot(sim.trange(), sim.data[self.p_report])
elif p.task == 'compare':
plt.plot(sim.trange(), sim.data[self.p_compare])
for i, (a, b) in enumerate(self.exp.pairs):
t = self.exp.trial_time * (i + 0.5)
colors = ['#000000', '#666666']
if a < b:
colors = colors[::-1]
plt.text(t, 1.7, '%d' % a, color=colors[0])
plt.text(t, -1.7, '%d' % b, color=colors[1])
plt.subplot(2,1,2)
plt.plot(sim.trange(), sim.data[self.p_memory])
result = {}
for i, s in enumerate(scores):
result['score%d'%(i+1)] = s
for i, m in enumerate(mags):
result['mag%d'%(i+1)] = m
result['magnitudes'] = magnitudes
return result
if __name__ == '__main__':
FingerGnosis().run()
else:
model = FingerGnosis().make_model(task='compare', crosstalk=0, time_clear_mem=0.4)
| tcstewar/finger_gnosis | pointer.py | Python | gpl-2.0 | 16,229 | 0.00875 |
from PySide2 import QtGui, QtCore, QtWidgets
from design import SidUi, DdUi
from ServersData import ServersDownloadThread, servers
import sys
class SpeedInputDialog(QtWidgets.QDialog, SidUi):
def __init__(self):
QtWidgets.QDialog.__init__(self)
self.setupUi()
def get_data(self):
self.accept()
return self.world_speedBox.value(), self.unit_speedBox.value()
def showEvent(self, event):
geom = self.frameGeometry()
geom.moveCenter(QtGui.QCursor.pos())
self.setGeometry(geom)
class ServersDownloadDialog(QtWidgets.QDialog, DdUi):
def __init__(self, servers_json_path):
QtWidgets.QDialog.__init__(self)
self.downloaded = False
self.servers_amount = len(servers)
self.setupUi()
self.servers_json_path = servers_json_path
self.servers_download_function()
def servers_download_function(self):
self.get_servers_download_thread = ServersDownloadThread(self.servers_json_path)
self.connect(self.get_servers_download_thread, QtCore.SIGNAL("update_progress_text(PyObject)"), self.update_progress_text)
self.connect(self.get_servers_download_thread, QtCore.SIGNAL("update_progress_bar(PyObject)"), self.update_progress_bar)
self.connect(self.get_servers_download_thread, QtCore.SIGNAL("update_button()"), self.update_button)
self.connect(self.get_servers_download_thread, QtCore.SIGNAL("download_error(PyObject)"), self.download_error)
self.get_servers_download_thread.start()
def update_progress_text(self, text):
self.progress_text.append(text)
def update_progress_bar(self, value):
self.progress_bar.setValue(value)
def update_button(self):
self.horizontalLayout.removeWidget(self.cancelButton)
self.cancelButton.deleteLater()
self.cancelButton = None
self.downloaded = True
self.okButton = QtWidgets.QPushButton("Ok")
self.horizontalLayout.addWidget(self.okButton)
self.okButton.clicked.connect(self.ok_function)
def cancel_function(self):
reply = QtWidgets.QMessageBox.question(self, 'Message',
"Are you sure that you want to cancel downloading? This will exit the program.", QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
sys.exit()
def closeEvent(self, event):
if self.downloaded:
return event.accept()
reply = QtWidgets.QMessageBox.question(self, 'Message',
"The server config with the worlds is downloading, would you like to exit the program anyway?", QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
event.accept()
sys.exit()
else:
event.ignore()
def ok_function(self):
self.close()
def download_error(self, error_text):
QtWidgets.QMessageBox.critical(self, "Download Error", error_text)
sys.exit() | ZeX2/TWTools | CustomDialogs.py | Python | gpl-3.0 | 3,155 | 0.005071 |
from rpg.plugin import Plugin
from rpg.command import Command
from rpg.utils import path_to_str
from re import compile
from subprocess import CalledProcessError
import logging
class CPlugin(Plugin):
EXT_CPP = [r"cc", r"cxx", r"cpp", r"c\+\+", r"ii", r"ixx",
r"ipp", r"i\+\+", r"hh", r"hxx", r"hpp", r"h\+\+",
r"c", r"h"]
def patched(self, project_dir, spec, sack):
""" Finds dependencies via makedepend - This is not garanteed to be
all of them. Makedepend uses macro preprocessor and if it throws
and error makedepend didn't print deps. """
out = Command([
"find " + path_to_str(project_dir) + " -name " +
" -o -name ".join(
["'*." + ex + "'" for ex in self.EXT_CPP]
)
]).execute()
cc_makedep = ""
cc_included_files = []
for _f in out.splitlines():
try:
cc_makedep = Command("makedepend -w 1000 " + str(_f) +
" -f- 2>/dev/null").execute()
except CalledProcessError as e:
logging.warn(str(e.cmd) + "\n" + str(e.output))
continue
cc_included_files += [
s for s in cc_makedep.split()
if (s.startswith("/usr") or s.startswith("/include"))
and str(project_dir) not in s]
spec.required_files.update(cc_included_files)
spec.build_required_files.update(cc_included_files)
MOCK_C_ERR = compile(r"fatal error\: ([^:]*\.[^:]*)\: "
r"No such file or directory")
def mock_recover(self, log, spec):
""" This find dependencies makedepend didn't find. """
for err in log:
_missing = self.MOCK_C_ERR.search(err)
if _missing:
_missing = _missing.group(1)
logging.debug("Adding missing file " + _missing)
spec.required_files.update(["*" + _missing])
spec.build_required_files.update(["*" + _missing])
return True
return False
| jsilhan/rpg | rpg/plugins/lang/c.py | Python | gpl-2.0 | 2,122 | 0 |
# -*- coding: utf-8 -*-
from .env import *
from amoco.cas.expressions import regtype
from amoco.arch.core import Formatter, Token
def mnemo(i):
mn = i.mnemonic.lower()
return [(Token.Mnemonic, "{: <12}".format(mn))]
def deref(opd):
return "[%s+%d]" % (opd.a.base, opd.a.disp)
def opers(i):
s = []
for op in i.operands:
if op._is_mem:
s.append((Token.Memory, deref(op)))
elif op._is_cst:
if i.misc["imm_ref"] is not None:
s.append((Token.Address, "%s" % (i.misc["imm_ref"])))
elif op.sf:
s.append((Token.Constant, "%+d" % op.value))
else:
s.append((Token.Constant, op.__str__()))
elif op._is_reg:
s.append((Token.Register, op.__str__()))
s.append((Token.Literal, ", "))
if len(s) > 0:
s.pop()
return s
def opers_adr(i):
s = opers(i)
if i.address is None:
s[-1] = (Token.Address, ".%+d" % i.operands[-1])
else:
imm_ref = i.address + i.length + (i.operands[-1] * 8)
s[-1] = (Token.Address, "#%s" % (imm_ref))
return s
def opers_adr2(i):
s = opers(i)
if i.address is None:
s[-3] = (Token.Address, ".%+d" % i.operands[-2])
s[-1] = (Token.Address, ".%+d" % i.operands[-1])
else:
imm_ref1 = i.address + i.length * (i.operands[-2] + 1)
imm_ref2 = i.address + i.length * (i.operands[-1] + 1)
s[-3] = (Token.Address, "#%s" % (imm_ref1))
s[-1] = (Token.Address, "#%s" % (imm_ref2))
return s
format_default = (mnemo, opers)
eBPF_full_formats = {
"ebpf_jmp_": (mnemo, opers_adr),
"bpf_jmp_": (mnemo, opers_adr2),
}
eBPF_full = Formatter(eBPF_full_formats)
eBPF_full.default = format_default
| LRGH/amoco | amoco/arch/eBPF/formats.py | Python | gpl-2.0 | 1,781 | 0 |
# from http://diydrones.com/forum/topics/mission-planner-python-script?commentId=705844%3AComment%3A2035437&xg_source=msg_com_forum
import socket
import sys
import math
from math import sqrt
import clr
import time
import re, string
clr.AddReference("MissionPlanner.Utilities")
import MissionPlanner #import *
clr.AddReference("MissionPlanner.Utilities") #includes the Utilities class
from MissionPlanner.Utilities import Locationwp
HOST = 'localhost' # Symbolic name meaning all available interfaces
#SPORT = 5000 # Arbitrary non-privileged port
RPORT = 4000 # Arbitrary non-privileged port
REMOTE = ''
# Datagram (udp) socket
rsock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
print 'Sockets created'
# Bind socket to local host and port
try:
rsock.bind((HOST,RPORT))
except socket.error, msg:
#print 'Bind failed. Error Code:'
sys.stderr.write("[ERROR] %s\n" % msg[1])
rsock.close()
sys.exit()
print 'Receive Socket bind complete on ' + str(RPORT)
print 'Starting Follow'
Script.ChangeMode("Guided") # changes mode to "Guided"
print 'Guided Mode'
#keep talking with the Mission Planner server
while 1:
msg = rsock.recv(1024)
pattern = re.compile("[ ]")
parameters = pattern.split(msg)
latData = parameters[0]
lngData = parameters[1]
headingData = parameters[2]
altData = parameters[3]
float_lat = float(latData)
float_lng = float(lngData)
float_heading = float(headingData)
float_alt = float(altData)
"""Safety Manual Mode Switch"""
#while True:
if cs.mode == 'MANUAL':
Script.ChangeMode("Manual")
rsock.close()
else:
#print cs.mode
"""Follower Offset"""
XOffset= float(0) #User Input for x axis offset
YOffset= float(-2) #User Input for y axis offset
brng = math.radians(float_heading)
# brng = float_heading*math.pi/180 #User input heading angle of follower in relation to leader. 0 degrees is forward.
d = math.sqrt((XOffset**2)+(YOffset**2)) #Distance in m
MperLat = 69.172*1609.34 #meters per degree of latitude. Length of degree (miles) at equator * meters in a mile
MperLong = math.cos(float_lat)*69.172*1609.34 #meters per degree of longitude
Lat_Offset_meters = YOffset/MperLat #lat distance offset in meters
Long_Offset_meters = XOffset/MperLong #long distance offset in meters
Follower_lat = float_lat + (Long_Offset_meters*math.sin(brng)) + (Lat_Offset_meters*math.cos(brng)) #rotates lat follower offset in relation to heading of leader
Follower_long = float_lng - (Long_Offset_meters*math.cos(brng)) + (Lat_Offset_meters*math.sin(brng)) #rotates long follower offset in relation to heading of leader
Follower_alt = float_alt + 10
#Follower_alt = 10
float_lat = float(Follower_lat)
float_lng = float(Follower_long)
float_alt = float(Follower_alt) #4-5 second lag induced on altitude waypoint line, unless alt is set to 0
print(float_lat)
print(float_lng)
print(float_heading)
print(float_alt)
"""Writing Waypoints"""
item = MissionPlanner.Utilities.Locationwp() # creating waypoint
MissionPlanner.Utilities.Locationwp.lat.SetValue(item,float_lat)
MissionPlanner.Utilities.Locationwp.lng.SetValue(item,float_lng)
#MissionPlanner.Utilities.Locationwp.groundcourse.SetValue(item,float_heading)
MissionPlanner.Utilities.Locationwp.alt.SetValue(item,float_alt) #Can only use lat,lng, or alt
MAV.setGuidedModeWP(item) #set waypoint
print 'Waypoint Sent'
print time.strftime('%X %x %Z')
# exit
rsock.close()
print 'Script End'
| ryokochang/Slab-GCS | bin/Release/Scripts/example6.py | Python | gpl-3.0 | 3,744 | 0.029129 |
from oscar.test.testcases import WebTestCase
from oscar.test.factories import create_product, UserFactory
from oscar.core.compat import get_user_model
from oscar.apps.catalogue.reviews.signals import review_added
from oscar.test.contextmanagers import mock_signal_receiver
class TestACustomer(WebTestCase):
def setUp(self):
self.product = create_product()
def test_can_add_a_review_when_anonymous(self):
detail_page = self.app.get(self.product.get_absolute_url())
add_review_page = detail_page.click(linkid='write_review')
form = add_review_page.forms['add_review_form']
form['title'] = 'This is great!'
form['score'] = 5
form['body'] = 'Loving it, loving it, loving it'
form['name'] = 'John Doe'
form['email'] = 'john@example.com'
form.submit()
self.assertEqual(1, self.product.reviews.all().count())
def test_can_add_a_review_when_signed_in(self):
user = UserFactory()
detail_page = self.app.get(self.product.get_absolute_url(),
user=user)
add_review_page = detail_page.click(linkid="write_review")
form = add_review_page.forms['add_review_form']
form['title'] = 'This is great!'
form['score'] = 5
form['body'] = 'Loving it, loving it, loving it'
form.submit()
self.assertEqual(1, self.product.reviews.all().count())
def test_adding_a_review_sends_a_signal(self):
review_user = UserFactory()
detail_page = self.app.get(self.product.get_absolute_url(),
user=review_user)
with mock_signal_receiver(review_added) as receiver:
add_review_page = detail_page.click(linkid="write_review")
form = add_review_page.forms['add_review_form']
form['title'] = 'This is great!'
form['score'] = 5
form['body'] = 'Loving it, loving it, loving it'
form.submit()
self.assertEqual(receiver.call_count, 1)
self.assertEqual(1, self.product.reviews.all().count())
| itbabu/django-oscar | tests/functional/catalogue/review_tests.py | Python | bsd-3-clause | 2,113 | 0 |
#!/usr/bin/python
import cv2
import numpy as np
import sys, getopt
import matplotlib
matplotlib.use('Agg') # Force matplotlib to not use any Xwindows backend.
from matplotlib import pyplot as plt
image_path = None
def printHelp():
print 'main.py\n' \
' -i <Image Path. Ex: /home/myImage.jpg > (Mandatory)\n' \
' \n Example: python main.py -i myOriginalImage.jpg \n '
try:
opts, args = getopt.getopt(sys.argv[1:],"hi:")
except getopt.GetoptError:
printHelp()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
printHelp()
sys.exit()
elif opt in ("-i"):
image_path = arg
if image_path == None:
print "Input file missing"
printHelp()
sys.exit()
img = cv2.imread(image_path)
color = ('b','g','r')
for i,col in enumerate(color):
hist = cv2.calcHist([img],[i],None,[256],[0,256])
plt.plot(hist,color = col)
plt.xlim([0,256])
plt.savefig("hist.png")
| gustavovaliati/ci724-ppginfufpr-2016 | exerc-3a/main.py | Python | gpl-3.0 | 930 | 0.022581 |
#!/usr/bin/env python
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A simple cache management utility for Glance.
"""
from __future__ import print_function
import functools
import optparse
import os
import sys
import time
from oslo_utils import encodeutils
from oslo_utils import timeutils
from glance.common import utils
from six.moves import input
# If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir)
from glance.common import exception
import glance.image_cache.client
from glance.version import version_info as version
SUCCESS = 0
FAILURE = 1
def catch_error(action):
"""Decorator to provide sensible default error handling for actions."""
def wrap(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
ret = func(*args, **kwargs)
return SUCCESS if ret is None else ret
except exception.NotFound:
options = args[0]
print("Cache management middleware not enabled on host %s" %
options.host)
return FAILURE
except exception.Forbidden:
print("Not authorized to make this request.")
return FAILURE
except Exception as e:
options = args[0]
if options.debug:
raise
print("Failed to %s. Got error:" % action)
pieces = encodeutils.exception_to_unicode(e).split('\n')
for piece in pieces:
print(piece)
return FAILURE
return wrapper
return wrap
@catch_error('show cached images')
def list_cached(options, args):
"""%(prog)s list-cached [options]
List all images currently cached.
"""
client = get_client(options)
images = client.get_cached_images()
if not images:
print("No cached images.")
return SUCCESS
print("Found %d cached images..." % len(images))
pretty_table = utils.PrettyTable()
pretty_table.add_column(36, label="ID")
pretty_table.add_column(19, label="Last Accessed (UTC)")
pretty_table.add_column(19, label="Last Modified (UTC)")
# 1 TB takes 13 characters to display: len(str(2**40)) == 13
pretty_table.add_column(14, label="Size", just="r")
pretty_table.add_column(10, label="Hits", just="r")
print(pretty_table.make_header())
for image in images:
last_modified = image['last_modified']
last_modified = timeutils.iso8601_from_timestamp(last_modified)
last_accessed = image['last_accessed']
if last_accessed == 0:
last_accessed = "N/A"
else:
last_accessed = timeutils.iso8601_from_timestamp(last_accessed)
print(pretty_table.make_row(
image['image_id'],
last_accessed,
last_modified,
image['size'],
image['hits']))
@catch_error('show queued images')
def list_queued(options, args):
"""%(prog)s list-queued [options]
List all images currently queued for caching.
"""
client = get_client(options)
images = client.get_queued_images()
if not images:
print("No queued images.")
return SUCCESS
print("Found %d queued images..." % len(images))
pretty_table = utils.PrettyTable()
pretty_table.add_column(36, label="ID")
print(pretty_table.make_header())
for image in images:
print(pretty_table.make_row(image))
@catch_error('queue the specified image for caching')
def queue_image(options, args):
"""%(prog)s queue-image <IMAGE_ID> [options]
Queues an image for caching
"""
if len(args) == 1:
image_id = args.pop()
else:
print("Please specify one and only ID of the image you wish to ")
print("queue from the cache as the first argument")
return FAILURE
if (not options.force and
not user_confirm("Queue image %(image_id)s for caching?" %
{'image_id': image_id}, default=False)):
return SUCCESS
client = get_client(options)
client.queue_image_for_caching(image_id)
if options.verbose:
print("Queued image %(image_id)s for caching" %
{'image_id': image_id})
return SUCCESS
@catch_error('delete the specified cached image')
def delete_cached_image(options, args):
"""
%(prog)s delete-cached-image <IMAGE_ID> [options]
Deletes an image from the cache
"""
if len(args) == 1:
image_id = args.pop()
else:
print("Please specify one and only ID of the image you wish to ")
print("delete from the cache as the first argument")
return FAILURE
if (not options.force and
not user_confirm("Delete cached image %(image_id)s?" %
{'image_id': image_id}, default=False)):
return SUCCESS
client = get_client(options)
client.delete_cached_image(image_id)
if options.verbose:
print("Deleted cached image %(image_id)s" % {'image_id': image_id})
return SUCCESS
@catch_error('Delete all cached images')
def delete_all_cached_images(options, args):
"""%(prog)s delete-all-cached-images [options]
Remove all images from the cache.
"""
if (not options.force and
not user_confirm("Delete all cached images?", default=False)):
return SUCCESS
client = get_client(options)
num_deleted = client.delete_all_cached_images()
if options.verbose:
print("Deleted %(num_deleted)s cached images" %
{'num_deleted': num_deleted})
return SUCCESS
@catch_error('delete the specified queued image')
def delete_queued_image(options, args):
"""
%(prog)s delete-queued-image <IMAGE_ID> [options]
Deletes an image from the cache
"""
if len(args) == 1:
image_id = args.pop()
else:
print("Please specify one and only ID of the image you wish to ")
print("delete from the cache as the first argument")
return FAILURE
if (not options.force and
not user_confirm("Delete queued image %(image_id)s?" %
{'image_id': image_id}, default=False)):
return SUCCESS
client = get_client(options)
client.delete_queued_image(image_id)
if options.verbose:
print("Deleted queued image %(image_id)s" % {'image_id': image_id})
return SUCCESS
@catch_error('Delete all queued images')
def delete_all_queued_images(options, args):
"""%(prog)s delete-all-queued-images [options]
Remove all images from the cache queue.
"""
if (not options.force and
not user_confirm("Delete all queued images?", default=False)):
return SUCCESS
client = get_client(options)
num_deleted = client.delete_all_queued_images()
if options.verbose:
print("Deleted %(num_deleted)s queued images" %
{'num_deleted': num_deleted})
return SUCCESS
def get_client(options):
"""Return a new client object to a Glance server.
specified by the --host and --port options
supplied to the CLI
"""
return glance.image_cache.client.get_client(
host=options.host,
port=options.port,
username=options.os_username,
password=options.os_password,
tenant=options.os_tenant_name,
auth_url=options.os_auth_url,
auth_strategy=options.os_auth_strategy,
auth_token=options.os_auth_token,
region=options.os_region_name,
insecure=options.insecure)
def env(*vars, **kwargs):
"""Search for the first defined of possibly many env vars.
Returns the first environment variable defined in vars, or
returns the default defined in kwargs.
"""
for v in vars:
value = os.environ.get(v, None)
if value:
return value
return kwargs.get('default', '')
def create_options(parser):
"""Set up the CLI and config-file options that may be
parsed and program commands.
:param parser: The option parser
"""
parser.add_option('-v', '--verbose', default=False, action="store_true",
help="Print more verbose output.")
parser.add_option('-d', '--debug', default=False, action="store_true",
help="Print debugging output.")
parser.add_option('-H', '--host', metavar="ADDRESS", default="0.0.0.0",
help="Address of Glance API host. "
"Default: %default.")
parser.add_option('-p', '--port', dest="port", metavar="PORT",
type=int, default=9292,
help="Port the Glance API host listens on. "
"Default: %default.")
parser.add_option('-k', '--insecure', dest="insecure",
default=False, action="store_true",
help="Explicitly allow glance to perform \"insecure\" "
"SSL (https) requests. The server's certificate will "
"not be verified against any certificate authorities. "
"This option should be used with caution.")
parser.add_option('-f', '--force', dest="force", metavar="FORCE",
default=False, action="store_true",
help="Prevent select actions from requesting "
"user confirmation.")
parser.add_option('--os-auth-token',
dest='os_auth_token',
default=env('OS_AUTH_TOKEN'),
help='Defaults to env[OS_AUTH_TOKEN].')
parser.add_option('-A', '--os_auth_token', '--auth_token',
dest='os_auth_token',
help=optparse.SUPPRESS_HELP)
parser.add_option('--os-username',
dest='os_username',
default=env('OS_USERNAME'),
help='Defaults to env[OS_USERNAME].')
parser.add_option('-I', '--os_username',
dest='os_username',
help=optparse.SUPPRESS_HELP)
parser.add_option('--os-password',
dest='os_password',
default=env('OS_PASSWORD'),
help='Defaults to env[OS_PASSWORD].')
parser.add_option('-K', '--os_password',
dest='os_password',
help=optparse.SUPPRESS_HELP)
parser.add_option('--os-region-name',
dest='os_region_name',
default=env('OS_REGION_NAME'),
help='Defaults to env[OS_REGION_NAME].')
parser.add_option('-R', '--os_region_name',
dest='os_region_name',
help=optparse.SUPPRESS_HELP)
parser.add_option('--os-tenant-id',
dest='os_tenant_id',
default=env('OS_TENANT_ID'),
help='Defaults to env[OS_TENANT_ID].')
parser.add_option('--os_tenant_id',
dest='os_tenant_id',
help=optparse.SUPPRESS_HELP)
parser.add_option('--os-tenant-name',
dest='os_tenant_name',
default=env('OS_TENANT_NAME'),
help='Defaults to env[OS_TENANT_NAME].')
parser.add_option('-T', '--os_tenant_name',
dest='os_tenant_name',
help=optparse.SUPPRESS_HELP)
parser.add_option('--os-auth-url',
default=env('OS_AUTH_URL'),
help='Defaults to env[OS_AUTH_URL].')
parser.add_option('-N', '--os_auth_url',
dest='os_auth_url',
help=optparse.SUPPRESS_HELP)
parser.add_option('-S', '--os_auth_strategy', dest="os_auth_strategy",
metavar="STRATEGY",
help="Authentication strategy (keystone or noauth).")
def parse_options(parser, cli_args):
"""
Returns the parsed CLI options, command to run and its arguments, merged
with any same-named options found in a configuration file
:param parser: The option parser
"""
if not cli_args:
cli_args.append('-h') # Show options in usage output...
(options, args) = parser.parse_args(cli_args)
# HACK(sirp): Make the parser available to the print_help method
# print_help is a command, so it only accepts (options, args); we could
# one-off have it take (parser, options, args), however, for now, I think
# this little hack will suffice
options.__parser = parser
if not args:
parser.print_usage()
sys.exit(0)
command_name = args.pop(0)
command = lookup_command(parser, command_name)
return (options, command, args)
def print_help(options, args):
"""
Print help specific to a command
"""
if len(args) != 1:
sys.exit("Please specify a command")
parser = options.__parser
command_name = args.pop()
command = lookup_command(parser, command_name)
print(command.__doc__ % {'prog': os.path.basename(sys.argv[0])})
def lookup_command(parser, command_name):
BASE_COMMANDS = {'help': print_help}
CACHE_COMMANDS = {
'list-cached': list_cached,
'list-queued': list_queued,
'queue-image': queue_image,
'delete-cached-image': delete_cached_image,
'delete-all-cached-images': delete_all_cached_images,
'delete-queued-image': delete_queued_image,
'delete-all-queued-images': delete_all_queued_images,
}
commands = {}
for command_set in (BASE_COMMANDS, CACHE_COMMANDS):
commands.update(command_set)
try:
command = commands[command_name]
except KeyError:
parser.print_usage()
sys.exit("Unknown command: %(cmd_name)s" % {'cmd_name': command_name})
return command
def user_confirm(prompt, default=False):
"""Yes/No question dialog with user.
:param prompt: question/statement to present to user (string)
:param default: boolean value to return if empty string
is received as response to prompt
"""
if default:
prompt_default = "[Y/n]"
else:
prompt_default = "[y/N]"
answer = input("%s %s " % (prompt, prompt_default))
if answer == "":
return default
else:
return answer.lower() in ("yes", "y")
def main():
usage = """
%prog <command> [options] [args]
Commands:
help <command> Output help for one of the commands below
list-cached List all images currently cached
list-queued List all images currently queued for caching
queue-image Queue an image for caching
delete-cached-image Purges an image from the cache
delete-all-cached-images Removes all images from the cache
delete-queued-image Deletes an image from the cache queue
delete-all-queued-images Deletes all images from the cache queue
"""
version_string = version.cached_version_string()
oparser = optparse.OptionParser(version=version_string,
usage=usage.strip())
create_options(oparser)
(options, command, args) = parse_options(oparser, sys.argv[1:])
try:
start_time = time.time()
result = command(options, args)
end_time = time.time()
if options.verbose:
print("Completed in %-0.4f sec." % (end_time - start_time))
sys.exit(result)
except (RuntimeError, NotImplementedError) as e:
print("ERROR: ", e)
if __name__ == '__main__':
main()
| vuntz/glance | glance/cmd/cache_manage.py | Python | apache-2.0 | 16,590 | 0.000241 |
# -*- coding: utf-8 -*-
#
# mete0r.gpl : Manage GPL'ed source code files
# Copyright (C) 2015 mete0r <mete0r@sarangbang.or.kr>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
| mete0r/gpl | mete0r_gpl/__init__.py | Python | agpl-3.0 | 810 | 0 |
# encoding: UTF-8
import talib as ta
import numpy as np
from ctaBase import *
from ctaTemplate import CtaTemplate
import time
########################################################################
class TickBreaker(CtaTemplate):
"""跳空追击策略(MC版本转化)"""
className = 'TickBreaker'
author = u'融拓科技'
# 策略参数
forward = 5 # 正向tick数量
backward = 2 # 反向tick数量
reForward = 1 # 再次转向tick数量
maPeriod = 5 # 均线参数
# 策略变量
tickHistory = [] # 缓存tick报价的数组
maxHistory = 7 # 最大缓存数量
forwardNo = EMPTY_INT # 正向tick数量
backwardNo = EMPTY_INT # 反向tick数量
reForwardNo = EMPTY_INT # 再次转向tick数量
# 参数列表,保存了参数的名称
paramList = ['name',
'className',
'author',
'vtSymbol',
'forward',
'backward',
'reForward'
]
# 变量列表,保存了变量的名称
varList = ['inited',
'trading',
'pos',
'forwardNo',
'backwardNo',
'reForwardNo'
]
# condition1 = False # >=5个上涨tick
# condition2 = False # 2个下跌tick
# condition3 = False # 1个上涨tick
# ----------------------------------------------------------------------
def __init__(self, ctaEngine, setting):
"""Constructor"""
super(TickBreaker, self).__init__(ctaEngine, setting)
# 注意策略类中的可变对象属性(通常是list和dict等),在策略初始化时需要重新创建,
# 否则会出现多个策略实例之间数据共享的情况,有可能导致潜在的策略逻辑错误风险,
# 策略类中的这些可变对象属性可以选择不写,全都放在__init__下面,写主要是为了阅读
# 策略时方便(更多是个编程习惯的选择)
# 策略变量
self.tickHistory = [] # 缓存tick报价的数组
self.maxHistory = 7 # 最大缓存数量
self.forwardNo = EMPTY_INT # 正向tick数量
self.backwardNo = EMPTY_INT # 反向tick数量
self.reForwardNo = EMPTY_INT # 再次转向tick数量
self.oldPrice = 0 # 上一个tick的lastPrice
# ----------------------------------------------------------------------
def onInit(self):
"""初始化策略(必须由用户继承实现)"""
self.writeCtaLog(u'tick策略初始化')
self.putEvent()
# ----------------------------------------------------------------------
def onStart(self):
"""启动策略(必须由用户继承实现)"""
self.writeCtaLog(u'tick策略启动')
self.putEvent()
# ----------------------------------------------------------------------
def onStop(self):
"""停止策略(必须由用户继承实现)"""
self.writeCtaLog(u'tick策略停止')
self.putEvent()
# ----------------------------------------------------------------------
def onTick(self, tick):
"""收到行情TICK推送(必须由用户继承实现)"""
# 把最新的收盘价缓存到列表中
start = time.time()
if tick.lastPrice != self.oldPrice:
self.tickHistory.append(tick.lastPrice)
self.oldPrice = tick.lastPrice
else:
return
# 检查列表长度,如果超过缓存上限则移除最老的数据
# 这样是为了减少计算用的数据量,提高速度
if len(self.tickHistory) > self.maxHistory:
self.tickHistory.pop(0)
# 如果小于缓存上限,则说明初始化数据尚未足够,不进行后续计算
else:
return
# # 将缓存的收盘价数转化为numpy数组后,传入talib的函数SMA中计算
# closeArray = np.array(self.closeHistory)
# sma = ta.SMA(closeArray, self.maPeriod)
# # >=5个上涨tick
# condition1 = self.tickHistory[0] < self.tickHistory[1] < self.tickHistory[2] < self.tickHistory[3] < self.tickHistory[4]
# # 2个下跌tick
# condition2 = self.tickHistory[4] > self.tickHistory[5] > self.tickHistory[6]
# # 1个上涨tick
# condition3 = self.tickHistory[6] < self.tickHistory[7]
# print self.tickHistory
# print 'buy: ', int(condition1), ' ', int(condition2), ' ', int(condition3)
# buyCondition = condition1 and condition2 and condition3
#
# # >=5个下跌tick
# condition1 = self.tickHistory[0] > self.tickHistory[1] > self.tickHistory[2] > self.tickHistory[3] > self.tickHistory[4]
# # 2个上涨tick
# condition2 = self.tickHistory[4] < self.tickHistory[5] < self.tickHistory[6]
# # 1个下跌tick
# condition3 = self.tickHistory[6] > self.tickHistory[7]
# print 'sell: ', int(condition1), ' ', int(condition2), ' ', int(condition3)
#
# sellCondition = condition1 and condition2 and condition3
# >=5个上涨tick
condition1 = self.tickHistory[0] < self.tickHistory[1] < self.tickHistory[2] < self.tickHistory[3]
# 2个下跌tick
condition2 = self.tickHistory[3] > self.tickHistory[4] > self.tickHistory[5]
# 1个上涨tick
condition3 = self.tickHistory[5] < self.tickHistory[6]
# print self.tickHistory
# print 'buy: ', int(condition1), ' ', int(condition2), ' ', int(condition3)
buyCondition = condition1 and condition2 and condition3
# >=5个下跌tick
condition1 = self.tickHistory[0] > self.tickHistory[1] > self.tickHistory[2] > self.tickHistory[3]
# 2个上涨tick
condition2 = self.tickHistory[3] < self.tickHistory[4] < self.tickHistory[5]
# 1个下跌tick
condition3 = self.tickHistory[5] > self.tickHistory[6]
# print 'sell: ', int(condition1), ' ', int(condition2), ' ', int(condition3)
sellShortCondition = condition1 and condition2 and condition3
# 金叉和死叉的条件是互斥
if buyCondition:
# 如果金叉时手头没有持仓,则直接做多
if self.pos == 0:
self.buy(tick.lastPrice, 1)
# 如果有空头持仓,则先平空,再做多
elif self.pos < 0:
self.cover(tick.lastPrice, 1)
self.buy(tick.lastPrice, 1)
# 死叉和金叉相反
elif sellShortCondition:
if self.pos == 0:
self.short(tick.lastPrice, 1)
elif self.pos > 0:
self.sell(tick.lastPrice, 1)
self.short(tick.lastPrice, 1)
sellCondition = self.tickHistory[4] > self.tickHistory[5] > self.tickHistory[6]
buyCoverCondition = self.tickHistory[4] < self.tickHistory[5] < self.tickHistory[6]
# if self.pos > 0 and sellCondition:
# self.sell(tick.lastPrice, 1)
#
# if self.pos < 0 and buyCoverCondition:
# self.cover(tick.lastPrice, 1)
# print time.time() - start
# 发出状态更新事件
self.putEvent()
# ----------------------------------------------------------------------
def onBar(self, bar):
"""收到Bar推送(必须由用户继承实现)"""
pass
# ----------------------------------------------------------------------
def onOrder(self, order):
"""收到委托变化推送(必须由用户继承实现)"""
# 对于无需做细粒度委托控制的策略,可以忽略onOrder
pass
# ----------------------------------------------------------------------
def onTrade(self, trade):
"""收到成交推送(必须由用户继承实现)"""
# 对于无需做细粒度委托控制的策略,可以忽略onOrder
pass | freeitaly/Trading-System | vn.trader/ctaAlgo/strategyTickBreaker.py | Python | mit | 8,133 | 0.00262 |
"""sandbox URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework import routers
from library import views as library_views
router = routers.DefaultRouter()
router.register('authors', library_views.AuthorViewSet)
router.register('books', library_views.BookViewSet)
urlpatterns = [
url(r'^', include('library.urls')),
url(r'^api/', include(router.urls)),
url(r'^admin/', admin.site.urls),
]
| nshafer/django-hashid-field | sandbox/sandbox/urls.py | Python | mit | 1,076 | 0 |
from datetime import timedelta
from django.db.models import Sum
from django.utils.duration import duration_string
from rest_framework_json_api.serializers import (
CharField,
ModelSerializer,
SerializerMethodField,
)
from timed.projects.models import Project
from timed.tracking.models import Report
from .models import Order, Package
class SubscriptionProjectSerializer(ModelSerializer):
purchased_time = SerializerMethodField(source="get_purchased_time")
spent_time = SerializerMethodField(source="get_spent_time")
def get_purchased_time(self, obj):
"""
Calculate purchased time for given project.
Only acknowledged hours are included.
"""
orders = Order.objects.filter(project=obj, acknowledged=True)
data = orders.aggregate(purchased_time=Sum("duration"))
return duration_string(data["purchased_time"] or timedelta(0))
def get_spent_time(self, obj):
"""
Calculate spent time for given project.
Reports which are not billable or are in review are excluded.
"""
reports = Report.objects.filter(
task__project=obj, not_billable=False, review=False
)
data = reports.aggregate(spent_time=Sum("duration"))
return duration_string(data["spent_time"] or timedelta())
included_serializers = {
"billing_type": "timed.projects.serializers.BillingTypeSerializer",
"cost_center": "timed.projects.serializers.CostCenterSerializer",
"customer": "timed.projects.serializers.CustomerSerializer",
"orders": "timed.subscription.serializers.OrderSerializer",
}
class Meta:
model = Project
resource_name = "subscription-projects"
fields = (
"name",
"billing_type",
"cost_center",
"purchased_time",
"spent_time",
"customer",
"orders",
)
class PackageSerializer(ModelSerializer):
price = CharField()
"""CharField needed as it includes currency."""
included_serializers = {
"billing_type": "timed.projects.serializers.BillingTypeSerializer"
}
class Meta:
model = Package
resource_name = "subscription-packages"
fields = ("duration", "price", "billing_type")
class OrderSerializer(ModelSerializer):
included_serializers = {
"project": ("timed.subscription.serializers" ".SubscriptionProjectSerializer")
}
class Meta:
model = Order
resource_name = "subscription-orders"
fields = ("duration", "acknowledged", "ordered", "project")
| adfinis-sygroup/timed-backend | timed/subscription/serializers.py | Python | agpl-3.0 | 2,642 | 0.000379 |
# Copyright 2013 IBM Corp.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Authors:
# Erik Zaadi <erikz@il.ibm.com>
# Avishay Traeger <avishay@il.ibm.com>
import copy
from mox3 import mox
from oslo_config import cfg
from cinder import context
from cinder import exception
from cinder.i18n import _
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.ibm import xiv_ds8k
from cinder.volume import volume_types
FAKE = "fake"
CANNOT_DELETE = "Can not delete"
TOO_BIG_VOLUME_SIZE = 12000
POOL_SIZE = 100
CONSISTGROUP_ID = 1
VOLUME = {'size': 16,
'name': FAKE,
'id': 1,
'consistencygroup_id': CONSISTGROUP_ID,
'status': 'available'}
MANAGED_FAKE = "managed_fake"
MANAGED_VOLUME = {'size': 16,
'name': MANAGED_FAKE,
'id': 2}
REPLICA_FAKE = "repicated_fake"
REPLICATED_VOLUME = {'size': 64,
'name': REPLICA_FAKE,
'id': 2}
CONTEXT = {}
CONSISTGROUP = {'id': CONSISTGROUP_ID, }
CG_SNAPSHOT_ID = 1
CG_SNAPSHOT = {'id': CG_SNAPSHOT_ID,
'consistencygroup_id': CONSISTGROUP_ID}
CONNECTOR = {'initiator': "iqn.2012-07.org.fake:01:948f189c4695", }
CONF = cfg.CONF
class XIVDS8KFakeProxyDriver(object):
"""Fake IBM XIV and DS8K Proxy Driver."""
def __init__(self, xiv_ds8k_info, logger, expt, driver=None):
"""Initialize Proxy."""
self.xiv_ds8k_info = xiv_ds8k_info
self.logger = logger
self.exception = expt
self.xiv_ds8k_portal = \
self.xiv_ds8k_iqn = FAKE
self.volumes = {}
self.snapshots = {}
self.driver = driver
def setup(self, context):
if self.xiv_ds8k_info['xiv_ds8k_user'] != self.driver\
.configuration.san_login:
raise self.exception.NotAuthorized()
if self.xiv_ds8k_info['xiv_ds8k_address'] != self.driver\
.configuration.san_ip:
raise self.exception.HostNotFound(host='fake')
def create_volume(self, volume):
if volume['size'] > POOL_SIZE:
raise self.exception.VolumeBackendAPIException(data='blah')
self.volumes[volume['name']] = volume
def volume_exists(self, volume):
return self.volumes.get(volume['name'], None) is not None
def delete_volume(self, volume):
if self.volumes.get(volume['name'], None) is not None:
del self.volumes[volume['name']]
def manage_volume_get_size(self, volume, existing_ref):
if self.volumes.get(existing_ref['source-name'], None) is None:
raise self.exception.VolumeNotFound(volume_id=volume['id'])
return self.volumes[existing_ref['source-name']]['size']
def manage_volume(self, volume, existing_ref):
if self.volumes.get(existing_ref['source-name'], None) is None:
raise self.exception.VolumeNotFound(volume_id=volume['id'])
volume['size'] = MANAGED_VOLUME['size']
return {}
def unmanage_volume(self, volume):
pass
def initialize_connection(self, volume, connector):
if not self.volume_exists(volume):
raise self.exception.VolumeNotFound(volume_id=volume['id'])
lun_id = volume['id']
self.volumes[volume['name']]['attached'] = connector
return {'driver_volume_type': 'iscsi',
'data': {'target_discovered': True,
'target_discovered': True,
'target_portal': self.xiv_ds8k_portal,
'target_iqn': self.xiv_ds8k_iqn,
'target_lun': lun_id,
'volume_id': volume['id'],
'multipath': True,
'provider_location': "%s,1 %s %s" % (
self.xiv_ds8k_portal,
self.xiv_ds8k_iqn,
lun_id), },
}
def terminate_connection(self, volume, connector):
if not self.volume_exists(volume):
raise self.exception.VolumeNotFound(volume_id=volume['id'])
if not self.is_volume_attached(volume, connector):
raise self.exception.NotFound(_('Volume not found for '
'instance %(instance_id)s.')
% {'instance_id': 'fake'})
del self.volumes[volume['name']]['attached']
def is_volume_attached(self, volume, connector):
if not self.volume_exists(volume):
raise self.exception.VolumeNotFound(volume_id=volume['id'])
return (self.volumes[volume['name']].get('attached', None)
== connector)
def reenable_replication(self, context, volume):
model_update = {}
if volume['replication_status'] == 'inactive':
model_update['replication_status'] = 'active'
elif volume['replication_status'] == 'invalid_status_val':
raise exception.CinderException()
model_update['replication_extended_status'] = 'some_status'
model_update['replication_driver_data'] = 'some_data'
return model_update
def get_replication_status(self, context, volume):
if volume['replication_status'] == 'invalid_status_val':
raise exception.CinderException()
return {'replication_status': 'active'}
def promote_replica(self, context, volume):
if volume['replication_status'] == 'invalid_status_val':
raise exception.CinderException()
return {'replication_status': 'inactive'}
def create_replica_test_volume(self, volume, src_vref):
if volume['size'] != src_vref['size']:
raise exception.InvalidVolume(
reason="Target and source volumes have different size.")
return
def retype(self, ctxt, volume, new_type, diff, host):
volume['easytier'] = new_type['extra_specs']['easytier']
return True, volume
def create_consistencygroup(self, ctxt, group):
volumes = [volume for k, volume in self.volumes.items()
if volume['consistencygroup_id'] == group['id']]
if volumes:
raise exception.CinderException(
message='The consistency group id of volume may be wrong.')
return {'status': 'available'}
def delete_consistencygroup(self, ctxt, group):
volumes = []
for volume in self.volumes.values():
if (group.get('id', None)
== volume.get('consistencygroup_id', None)):
if volume['name'] == CANNOT_DELETE:
raise exception.VolumeBackendAPIException(
message='Volume can not be deleted')
else:
volume['status'] = 'deleted'
volumes.append(volume)
# Delete snapshots in consistency group
self.snapshots = {k: snap for k, snap in self.snapshots.items()
if not(snap.get('consistencygroup_id', None)
== group.get('id', None))}
# Delete volume in consistency group
self.volumes = {k: vol for k, vol in self.volumes.items()
if not(vol.get('consistencygroup_id', None)
== group.get('id', None))}
return {'status': 'deleted'}, volumes
def create_cgsnapshot(self, ctxt, cgsnapshot):
snapshots = []
for volume in self.volumes.values():
if (cgsnapshot.get('consistencygroup_id', None)
== volume.get('consistencygroup_id', None)):
if volume['size'] > POOL_SIZE / 2:
raise self.exception.VolumeBackendAPIException(data='blah')
snapshot = copy.deepcopy(volume)
snapshot['name'] = CANNOT_DELETE \
if snapshot['name'] == CANNOT_DELETE \
else snapshot['name'] + 'Snapshot'
snapshot['status'] = 'available'
snapshot['cgsnapshot_id'] = cgsnapshot.get('id', None)
snapshot['consistencygroup_id'] = \
cgsnapshot.get('consistencygroup_id', None)
self.snapshots[snapshot['name']] = snapshot
snapshots.append(snapshot)
return {'status': 'available'}, snapshots
def delete_cgsnapshot(self, ctxt, cgsnapshot):
snapshots = []
for snapshot in self.snapshots.values():
if (cgsnapshot.get('id', None)
== snapshot.get('cgsnapshot_id', None)):
if snapshot['name'] == CANNOT_DELETE:
raise exception.VolumeBackendAPIException(
message='Snapshot can not be deleted')
else:
snapshot['status'] = 'deleted'
snapshots.append(snapshot)
# Delete snapshots in consistency group
self.snapshots = {k: snap for k, snap in self.snapshots.items()
if not(snap.get('consistencygroup_id', None)
== cgsnapshot.get('cgsnapshot_id', None))}
return {'status': 'deleted'}, snapshots
class XIVDS8KVolumeDriverTest(test.TestCase):
"""Test IBM XIV and DS8K volume driver."""
def setUp(self):
"""Initialize IBM XIV and DS8K Driver."""
super(XIVDS8KVolumeDriverTest, self).setUp()
configuration = mox.MockObject(conf.Configuration)
configuration.san_is_local = False
configuration.xiv_ds8k_proxy = \
'cinder.tests.unit.test_ibm_xiv_ds8k.XIVDS8KFakeProxyDriver'
configuration.xiv_ds8k_connection_type = 'iscsi'
configuration.xiv_chap = 'disabled'
configuration.san_ip = FAKE
configuration.management_ips = FAKE
configuration.san_login = FAKE
configuration.san_clustername = FAKE
configuration.san_password = FAKE
configuration.append_config_values(mox.IgnoreArg())
self.driver = xiv_ds8k.XIVDS8KDriver(
configuration=configuration)
def test_initialized_should_set_xiv_ds8k_info(self):
"""Test that the san flags are passed to the IBM proxy."""
self.assertEqual(
self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_user'],
self.driver.configuration.san_login)
self.assertEqual(
self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_pass'],
self.driver.configuration.san_password)
self.assertEqual(
self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_address'],
self.driver.configuration.san_ip)
self.assertEqual(
self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_vol_pool'],
self.driver.configuration.san_clustername)
def test_setup_should_fail_if_credentials_are_invalid(self):
"""Test that the xiv_ds8k_proxy validates credentials."""
self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_user'] = 'invalid'
self.assertRaises(exception.NotAuthorized, self.driver.do_setup, None)
def test_setup_should_fail_if_connection_is_invalid(self):
"""Test that the xiv_ds8k_proxy validates connection."""
self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_address'] = \
'invalid'
self.assertRaises(exception.HostNotFound, self.driver.do_setup, None)
def test_create_volume(self):
"""Test creating a volume."""
self.driver.do_setup(None)
self.driver.create_volume(VOLUME)
has_volume = self.driver.xiv_ds8k_proxy.volume_exists(VOLUME)
self.assertTrue(has_volume)
self.driver.delete_volume(VOLUME)
def test_volume_exists(self):
"""Test the volume exist method with a volume that doesn't exist."""
self.driver.do_setup(None)
self.assertFalse(
self.driver.xiv_ds8k_proxy.volume_exists({'name': FAKE})
)
def test_delete_volume(self):
"""Verify that a volume is deleted."""
self.driver.do_setup(None)
self.driver.create_volume(VOLUME)
self.driver.delete_volume(VOLUME)
has_volume = self.driver.xiv_ds8k_proxy.volume_exists(VOLUME)
self.assertFalse(has_volume)
def test_delete_volume_should_fail_for_not_existing_volume(self):
"""Verify that deleting a non-existing volume is OK."""
self.driver.do_setup(None)
self.driver.delete_volume(VOLUME)
def test_create_volume_should_fail_if_no_pool_space_left(self):
"""Vertify that the xiv_ds8k_proxy validates volume pool space."""
self.driver.do_setup(None)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume,
{'name': FAKE,
'id': 1,
'size': TOO_BIG_VOLUME_SIZE})
def test_initialize_connection(self):
"""Test that inititialize connection attaches volume to host."""
self.driver.do_setup(None)
self.driver.create_volume(VOLUME)
self.driver.initialize_connection(VOLUME, CONNECTOR)
self.assertTrue(
self.driver.xiv_ds8k_proxy.is_volume_attached(VOLUME, CONNECTOR))
self.driver.terminate_connection(VOLUME, CONNECTOR)
self.driver.delete_volume(VOLUME)
def test_initialize_connection_should_fail_for_non_existing_volume(self):
"""Verify that initialize won't work for non-existing volume."""
self.driver.do_setup(None)
self.assertRaises(exception.VolumeNotFound,
self.driver.initialize_connection,
VOLUME,
CONNECTOR)
def test_terminate_connection(self):
"""Test terminating a connection."""
self.driver.do_setup(None)
self.driver.create_volume(VOLUME)
self.driver.initialize_connection(VOLUME, CONNECTOR)
self.driver.terminate_connection(VOLUME, CONNECTOR)
self.assertFalse(self.driver.xiv_ds8k_proxy.is_volume_attached(
VOLUME,
CONNECTOR))
self.driver.delete_volume(VOLUME)
def test_terminate_connection_should_fail_on_non_existing_volume(self):
"""Test that terminate won't work for non-existing volumes."""
self.driver.do_setup(None)
self.assertRaises(exception.VolumeNotFound,
self.driver.terminate_connection,
VOLUME,
CONNECTOR)
def test_manage_existing_get_size(self):
"""Test that manage_existing_get_size returns the expected size. """
self.driver.do_setup(None)
self.driver.create_volume(MANAGED_VOLUME)
existing_ref = {'source-name': MANAGED_VOLUME['name']}
return_size = self.driver.manage_existing_get_size(
VOLUME,
existing_ref)
self.assertEqual(return_size, MANAGED_VOLUME['size'])
# cover both case, whether driver renames the volume or not
self.driver.delete_volume(VOLUME)
self.driver.delete_volume(MANAGED_VOLUME)
def test_manage_existing_get_size_should_fail_on_non_existing_volume(self):
"""Test that manage_existing_get_size fails on non existing volume. """
self.driver.do_setup(None)
# on purpose - do NOT create managed volume
existing_ref = {'source-name': MANAGED_VOLUME['name']}
self.assertRaises(exception.VolumeNotFound,
self.driver.manage_existing_get_size,
VOLUME,
existing_ref)
def test_manage_existing(self):
"""Test that manage_existing returns successfully. """
self.driver.do_setup(None)
self.driver.create_volume(MANAGED_VOLUME)
existing_ref = {'source-name': MANAGED_VOLUME['name']}
self.driver.manage_existing(VOLUME, existing_ref)
self.assertEqual(VOLUME['size'], MANAGED_VOLUME['size'])
# cover both case, whether driver renames the volume or not
self.driver.delete_volume(VOLUME)
self.driver.delete_volume(MANAGED_VOLUME)
def test_manage_existing_should_fail_on_non_existing_volume(self):
"""Test that manage_existing fails on non existing volume. """
self.driver.do_setup(None)
# on purpose - do NOT create managed volume
existing_ref = {'source-name': MANAGED_VOLUME['name']}
self.assertRaises(exception.VolumeNotFound,
self.driver.manage_existing,
VOLUME,
existing_ref)
def test_reenable_replication(self):
"""Test that reenable_replication returns successfully. """
self.driver.do_setup(None)
# assume the replicated volume is inactive
replicated_volume = copy.deepcopy(REPLICATED_VOLUME)
replicated_volume['replication_status'] = 'inactive'
model_update = self.driver.reenable_replication(
CONTEXT,
replicated_volume
)
self.assertEqual(
model_update['replication_status'],
'active'
)
self.assertTrue('replication_extended_status' in model_update)
self.assertTrue('replication_driver_data' in model_update)
def test_reenable_replication_fail_on_cinder_exception(self):
"""Test that reenable_replication fails on driver raising exception."""
self.driver.do_setup(None)
replicated_volume = copy.deepcopy(REPLICATED_VOLUME)
# on purpose - set invalid value to replication_status
# expect an exception.
replicated_volume['replication_status'] = 'invalid_status_val'
self.assertRaises(
exception.CinderException,
self.driver.reenable_replication,
CONTEXT,
replicated_volume
)
def test_get_replication_status(self):
"""Test that get_replication_status return successfully. """
self.driver.do_setup(None)
# assume the replicated volume is inactive
replicated_volume = copy.deepcopy(REPLICATED_VOLUME)
replicated_volume['replication_status'] = 'inactive'
model_update = self.driver.get_replication_status(
CONTEXT,
replicated_volume
)
self.assertEqual(
model_update['replication_status'],
'active'
)
def test_get_replication_status_fail_on_exception(self):
"""Test that get_replication_status fails on exception"""
self.driver.do_setup(None)
replicated_volume = copy.deepcopy(REPLICATED_VOLUME)
# on purpose - set invalid value to replication_status
# expect an exception.
replicated_volume['replication_status'] = 'invalid_status_val'
self.assertRaises(
exception.CinderException,
self.driver.get_replication_status,
CONTEXT,
replicated_volume
)
def test_promote_replica(self):
"""Test that promote_replica returns successfully. """
self.driver.do_setup(None)
replicated_volume = copy.deepcopy(REPLICATED_VOLUME)
# assume the replication_status should be active
replicated_volume['replication_status'] = 'active'
model_update = self.driver.promote_replica(
CONTEXT,
replicated_volume
)
# after promoting, replication_status should be inactive
self.assertEqual(
model_update['replication_status'],
'inactive'
)
def test_promote_replica_fail_on_cinder_exception(self):
"""Test that promote_replica fails on CinderException. """
self.driver.do_setup(None)
replicated_volume = copy.deepcopy(REPLICATED_VOLUME)
# on purpose - set invalid value to replication_status
# expect an exception.
replicated_volume['replication_status'] = 'invalid_status_val'
self.assertRaises(
exception.CinderException,
self.driver.promote_replica,
CONTEXT,
replicated_volume
)
def test_create_replica_test_volume(self):
"""Test that create_replica_test_volume returns successfully."""
self.driver.do_setup(None)
tgt_volume = copy.deepcopy(VOLUME)
src_volume = copy.deepcopy(REPLICATED_VOLUME)
tgt_volume['size'] = src_volume['size']
model_update = self.driver.create_replica_test_volume(
tgt_volume,
src_volume
)
self.assertTrue(model_update is None)
def test_create_replica_test_volume_fail_on_diff_size(self):
"""Test that create_replica_test_volume fails on diff size."""
self.driver.do_setup(None)
tgt_volume = copy.deepcopy(VOLUME)
src_volume = copy.deepcopy(REPLICATED_VOLUME)
self.assertRaises(
exception.InvalidVolume,
self.driver.create_replica_test_volume,
tgt_volume,
src_volume
)
def test_retype(self):
"""Test that retype returns successfully."""
self.driver.do_setup(None)
# prepare parameters
ctxt = context.get_admin_context()
host = {
'host': 'foo',
'capabilities': {
'location_info': 'xiv_ds8k_fake_1',
'extent_size': '1024'
}
}
key_specs_old = {'easytier': False, 'warning': 2, 'autoexpand': True}
key_specs_new = {'easytier': True, 'warning': 5, 'autoexpand': False}
old_type_ref = volume_types.create(ctxt, 'old', key_specs_old)
new_type_ref = volume_types.create(ctxt, 'new', key_specs_new)
diff, equal = volume_types.volume_types_diff(
ctxt,
old_type_ref['id'],
new_type_ref['id'],
)
volume = copy.deepcopy(VOLUME)
old_type = volume_types.get_volume_type(ctxt, old_type_ref['id'])
volume['volume_type'] = old_type
volume['host'] = host
new_type = volume_types.get_volume_type(ctxt, new_type_ref['id'])
self.driver.create_volume(volume)
ret = self.driver.retype(ctxt, volume, new_type, diff, host)
self.assertTrue(ret)
self.assertTrue(volume['easytier'])
def test_retype_fail_on_exception(self):
"""Test that retype fails on exception."""
self.driver.do_setup(None)
# prepare parameters
ctxt = context.get_admin_context()
host = {
'host': 'foo',
'capabilities': {
'location_info': 'xiv_ds8k_fake_1',
'extent_size': '1024'
}
}
key_specs_old = {'easytier': False, 'warning': 2, 'autoexpand': True}
old_type_ref = volume_types.create(ctxt, 'old', key_specs_old)
new_type_ref = volume_types.create(ctxt, 'new')
diff, equal = volume_types.volume_types_diff(
ctxt,
old_type_ref['id'],
new_type_ref['id'],
)
volume = copy.deepcopy(VOLUME)
old_type = volume_types.get_volume_type(ctxt, old_type_ref['id'])
volume['volume_type'] = old_type
volume['host'] = host
new_type = volume_types.get_volume_type(ctxt, new_type_ref['id'])
self.driver.create_volume(volume)
self.assertRaises(
KeyError,
self.driver.retype,
ctxt, volume, new_type, diff, host
)
def test_create_consistencygroup(self):
"""Test that create_consistencygroup return successfully."""
self.driver.do_setup(None)
ctxt = context.get_admin_context()
# Create consistency group
model_update = self.driver.create_consistencygroup(ctxt, CONSISTGROUP)
self.assertEqual('available',
model_update['status'],
"Consistency Group created failed")
def test_create_consistencygroup_fail_on_cg_not_empty(self):
"""Test create_consistencygroup with empty consistency group."""
self.driver.do_setup(None)
ctxt = context.get_admin_context()
# Create volumes
# And add the volumes into the consistency group before creating cg
self.driver.create_volume(VOLUME)
self.assertRaises(exception.CinderException,
self.driver.create_consistencygroup,
ctxt, CONSISTGROUP)
def test_delete_consistencygroup(self):
"""Test that delete_consistencygroup return successfully."""
self.driver.do_setup(None)
ctxt = context.get_admin_context()
# Create consistency group
self.driver.create_consistencygroup(ctxt, CONSISTGROUP)
# Create volumes and add them to consistency group
self.driver.create_volume(VOLUME)
# Delete consistency group
model_update, volumes = \
self.driver.delete_consistencygroup(ctxt, CONSISTGROUP)
# Verify the result
self.assertEqual('deleted',
model_update['status'],
'Consistency Group deleted failed')
for volume in volumes:
self.assertEqual('deleted',
volume['status'],
'Consistency Group deleted failed')
def test_delete_consistencygroup_fail_on_volume_not_delete(self):
"""Test delete_consistencygroup with volume delete failure."""
self.driver.do_setup(None)
ctxt = context.get_admin_context()
# Create consistency group
self.driver.create_consistencygroup(ctxt, CONSISTGROUP)
# Set the volume not to be deleted
volume = copy.deepcopy(VOLUME)
volume['name'] = CANNOT_DELETE
# Create volumes and add them to consistency group
self.driver.create_volume(volume)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_consistencygroup,
ctxt, CONSISTGROUP)
def test_create_cgsnapshot(self):
"""Test that create_cgsnapshot return successfully."""
self.driver.do_setup(None)
ctxt = context.get_admin_context()
# Create consistency group
self.driver.create_consistencygroup(ctxt, CONSISTGROUP)
# Create volumes and add them to consistency group
self.driver.create_volume(VOLUME)
# Create consistency group snapshot
model_update, snapshots = \
self.driver.create_cgsnapshot(ctxt, CG_SNAPSHOT)
# Verify the result
self.assertEqual('available',
model_update['status'],
'Consistency Group Snapshot created failed')
for snap in snapshots:
self.assertEqual('available',
snap['status'])
# Clean the environment
self.driver.delete_cgsnapshot(ctxt, CG_SNAPSHOT)
self.driver.delete_consistencygroup(ctxt, CONSISTGROUP)
def test_create_cgsnapshot_fail_on_no_pool_space_left(self):
"""Test that create_cgsnapshot return fail when no pool space left."""
self.driver.do_setup(None)
ctxt = context.get_admin_context()
# Create consistency group
self.driver.create_consistencygroup(ctxt, CONSISTGROUP)
# Set the volume size
volume = copy.deepcopy(VOLUME)
volume['size'] = POOL_SIZE / 2 + 1
# Create volumes and add them to consistency group
self.driver.create_volume(volume)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cgsnapshot,
ctxt, CG_SNAPSHOT)
# Clean the environment
self.driver.volumes = None
self.driver.delete_consistencygroup(ctxt, CONSISTGROUP)
def test_delete_cgsnapshot(self):
"""Test that delete_cgsnapshot return successfully."""
self.driver.do_setup(None)
ctxt = context.get_admin_context()
# Create consistency group
self.driver.create_consistencygroup(ctxt, CONSISTGROUP)
# Create volumes and add them to consistency group
self.driver.create_volume(VOLUME)
# Create consistency group snapshot
self.driver.create_cgsnapshot(ctxt, CG_SNAPSHOT)
# Delete consistency group snapshot
model_update, snapshots = \
self.driver.delete_cgsnapshot(ctxt, CG_SNAPSHOT)
# Verify the result
self.assertEqual('deleted',
model_update['status'],
'Consistency Group Snapshot deleted failed')
for snap in snapshots:
self.assertEqual('deleted',
snap['status'])
# Clean the environment
self.driver.delete_consistencygroup(ctxt, CONSISTGROUP)
def test_delete_cgsnapshot_fail_on_snapshot_not_delete(self):
"""Test delete_cgsnapshot when the snapshot cannot be deleted."""
self.driver.do_setup(None)
ctxt = context.get_admin_context()
# Create consistency group
self.driver.create_consistencygroup(ctxt, CONSISTGROUP)
# Set the snapshot not to be deleted
volume = copy.deepcopy(VOLUME)
volume['name'] = CANNOT_DELETE
# Create volumes and add them to consistency group
self.driver.create_volume(volume)
# Create consistency group snapshot
self.driver.create_cgsnapshot(ctxt, CG_SNAPSHOT)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_cgsnapshot,
ctxt, CG_SNAPSHOT)
| tlakshman26/cinder-https-changes | cinder/tests/unit/test_ibm_xiv_ds8k.py | Python | apache-2.0 | 30,480 | 0 |
"""
Pseudo code
Breadth-First-Search(Graph, root):
create empty set S
create empty queue Q
root.parent = NIL
Q.enqueue(root)
while Q is not empty:
current = Q.dequeue()
if current is the goal:
return current
for each node n that is adjacent to current:
if n is not in S:
add n to S
n.parent = current
Q.enqueue(n)
Implementation
"""
from collections import deque
from directional_graph import Graph
def BFS(Graph, s):
graph = Graph.graph()
if s not in graph:
raise Exception("Edge %s not in graph" % s)
q = deque([s])
visited = set([s])
while len(q) != 0:
node = q.pop()
for each in graph[node]:
print visited
if each not in visited:
visited.add(each)
q.append(each)
return visited
if __name__ == "__main__":
g = {
"a": {"d": 4},
"b": {"c": 2},
"c": {"b": 2, "c": 5, "d": 1, "e": 7},
"d": {"a": 4, "c": 1},
"e": {"c": 7}
}
graph = Graph(g)
print("Vertices of graph:")
print(graph.list_vertices())
print("\nEdges of graph:")
print(graph.list_edges())
print("\nAdding a vertice")
graph.add_vertex("g")
print (graph.list_vertices())
graph.add_edge(("g", "a"))
graph.add_edge(("a", "c"))
graph.add_edge(("g", "c"))
print("\nEdges of graph:")
print(graph.list_edges())
print (graph.list_vertices())
print(graph.graph())
print(graph.has_edge(("a", "c")))
print(graph.graph())
print("\nDeleting edge (a, d):")
graph.delete_edge(("a", "d"))
print(graph.list_edges())
print (graph.list_vertices())
print(graph.graph())
# print("\nDeleting vertex a:")
# graph.delete_vertex("a")
print (graph.list_vertices())
print(graph.list_edges())
print(graph.graph())
print("\nPath between b to e:")
print(graph.find_path("b", "e"))
print("\nSetting edge weight for (c, e):")
graph.set_edge_weight(("c", "e"), 2)
print(graph.graph())
print '\n'
print (BFS(graph, 'e'))
| codervikash/algorithms | Python/Graphs/breath_first_traversal.py | Python | mit | 2,188 | 0.002285 |
from util.arguments import Arguments
from discord.ext import commands
from shlex import split
import random
class Choices:
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=['choose'], description='Randomly picks a 1 of the given choices.')
async def choices(self, *, msg):
parser = Arguments(allow_abbrev=False, prog='choices')
parser.add_argument('choices', nargs='+', help='The choices to randomly pick from.')
try:
args = parser.parse_args(split(msg))
except SystemExit:
await self.bot.say('```%s```' % parser.format_help())
return
except Exception as e:
await self.bot.say('```%s```' % str(e))
return
choice = args.choices[random.SystemRandom().randint(0, len(args.choices) - 1)]
await self.bot.say('**%s** has randomly been selected.' % choice)
def setup(bot):
bot.add_cog(Choices(bot))
| duke605/RunePy | commands/choices.py | Python | mit | 957 | 0.003135 |
import logging
import socket
from . import arcade
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
class Switch:
remote_ip = None
remote_port = 9999
state = None
commands = {'info': '{"system":{"get_sysinfo":{}}}',
'on': u'{"system":{"set_relay_state":{"state":1}}}',
'off': '{"system":{"set_relay_state":{"state":0}}}',
'cloudinfo': '{"cnCloud":{"get_info":{}}}',
'wlanscan': '{"netif":{"get_scaninfo":{"refresh":0}}}',
'time': '{"time":{"get_time":{}}}',
'schedule': '{"schedule":{"get_rules":{}}}',
'countdown': '{"count_down":{"get_rules":{}}}',
'antitheft': '{"anti_theft":{"get_rules":{}}}',
'reboot': '{"system":{"reboot":{"delay":1}}}',
'reset': '{"system":{"reset":{"delay":1}}}'
}
def __init__(self, server, port=80):
self.remote_ip = server
self.remote_port = int(port)
def activate(self):
self.switch_requester(self.commands.get('on'))
def deactivate(self):
self.switch_requester(self.commands.get('off'))
def info(self):
self.switch_requester(self.commands.get('info'))
def switch_requester(self, content=None):
if content is None:
print("Fail")
return False
else:
try:
sock_tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock_tcp.connect((self.remote_ip, self.remote_port))
print("Sending: ", content)
# sock_tcp.send(bytes(self.encrypt(content), 'utf8'))
sock_tcp.send(self.encrypt(content).encode('utf8'))
data = sock_tcp.recv(2048)
sock_tcp.close()
print("Sent: ", content)
print("Received: ", str(self.decrypt(data[4:])))
except socket.error:
return False
return False
def encrypt(self, string):
key = 171
result = "\0\0\0\0"
for i in string:
a = key ^ ord(i)
key = a
result += chr(a)
return result
def decrypt(self, string):
key = 171
result = ""
string = string.decode('utf8')
for i in string:
i = str(i)
a = key ^ ord(i)
key = ord(i)
result += chr(a)
return result
| droberin/blackhouse | blackhouse/__init__.py | Python | mit | 2,518 | 0.000397 |
import ctypes
class _DpxGenericHeaderBigEndian(ctypes.BigEndianStructure):
_fields_ = [
('Magic', ctypes.c_char * 4),
('ImageOffset', ctypes.c_uint32),
('Version', ctypes.c_char * 8),
('FileSize', ctypes.c_uint32),
('DittoKey', ctypes.c_uint32),
('GenericSize', ctypes.c_uint32),
('IndustrySize', ctypes.c_uint32),
('UserSize', ctypes.c_uint32),
('FileName', ctypes.c_char * 100),
('TimeDate', ctypes.c_char * 24),
('Creator', ctypes.c_char * 100),
('Project', ctypes.c_char * 200),
('Copyright', ctypes.c_char * 200),
('EncryptKey', ctypes.c_uint32),
('Reserved', ctypes.c_char * 104)
]
class _DpxGenericImageElementBigEndian(ctypes.BigEndianStructure):
_fields_ = [
('DataSign', ctypes.c_uint32),
('LowData', ctypes.c_int32),
('LowQuantity', ctypes.c_float),
('HighData', ctypes.c_int32),
('HighQuantity', ctypes.c_float),
('Descriptor', ctypes.c_byte),
('Transfer', ctypes.c_byte),
('Colorimetric', ctypes.c_byte),
('BitSize', ctypes.c_byte),
('Packing', ctypes.c_uint16),
('Encoding', ctypes.c_uint16),
('DataOffset', ctypes.c_uint32),
('EndOfLinePadding', ctypes.c_uint32),
('EndOfImagePadding', ctypes.c_uint32),
('Description', ctypes.c_char * 32)
]
class _DpxGenericImageHeaderBigEndian(ctypes.BigEndianStructure):
_fields_ = [
('Orientation', ctypes.c_uint16),
('NumberElements', ctypes.c_uint16),
('PixelsPerLine', ctypes.c_uint32),
('LinesPerElement', ctypes.c_uint32),
('ImageElement', _DpxGenericImageElementBigEndian * 8),
('Reserved', ctypes.c_char * 52)
]
class _DpxGenericOrientationHeaderBigEndian(ctypes.BigEndianStructure):
_fields_ = [
('XOffset', ctypes.c_uint32),
('YOffset', ctypes.c_uint32),
('XCenter', ctypes.c_float),
('YCenter', ctypes.c_float),
('XOriginalSize', ctypes.c_uint32),
('YOriginalSize', ctypes.c_uint32),
('FileName', ctypes.c_char * 100),
('TimeDate', ctypes.c_char * 24),
('InputName', ctypes.c_char * 32),
('InputSN', ctypes.c_char * 32),
('Border', ctypes.c_uint16 * 4),
('AspectRatio', ctypes.c_uint32 * 2),
('Reserved', ctypes.c_byte * 28)
]
class _DpxIndustryFilmInfoHeaderBigEndian(ctypes.BigEndianStructure):
_fields_ = [
('FilmMfgId', ctypes.c_char * 2),
('FilmType', ctypes.c_char * 2),
('Offset', ctypes.c_char * 2),
('Prefix', ctypes.c_char * 6),
('Count', ctypes.c_char * 4),
('Format', ctypes.c_char * 32),
('FramePosition', ctypes.c_uint32),
('SequenceLen', ctypes.c_int32),
('HeldCount', ctypes.c_int32),
('FrameRate', ctypes.c_float),
('ShutterAngle', ctypes.c_float),
('FrameId', ctypes.c_char * 32),
('SlateInfo', ctypes.c_char * 100),
('Reserved', ctypes.c_byte * 56)
]
class _DpxIndustryTelevisionInfoHeaderBigEndian(ctypes.BigEndianStructure):
_fields_ = [
('TimeCode', ctypes.c_uint32),
('UserBits', ctypes.c_uint32),
('Interlace', ctypes.c_byte),
('FieldNumber', ctypes.c_byte),
('VideoSignal', ctypes.c_byte),
('Padding', ctypes.c_byte),
('HorzSampleRate', ctypes.c_float),
('VertSampleRate', ctypes.c_float),
('FrameRate', ctypes.c_float),
('TimeOffset', ctypes.c_float),
('Gamma', ctypes.c_float),
('BlackLevel', ctypes.c_float),
('BlackGain', ctypes.c_float),
('Breakpoint', ctypes.c_float),
('WhiteLevel', ctypes.c_float),
('IntegrationTimes', ctypes.c_float),
('Reserved', ctypes.c_byte * 76)
]
class DpxHeaderBigEndian(ctypes.BigEndianStructure):
_fields_ = [
('FileHeader', _DpxGenericHeaderBigEndian),
('ImageHeader', _DpxGenericImageHeaderBigEndian),
('OrientHeader', _DpxGenericOrientationHeaderBigEndian),
('FilmHeader', _DpxIndustryFilmInfoHeaderBigEndian),
('TvHeader', _DpxIndustryTelevisionInfoHeaderBigEndian)
]
| plinecom/pydpx_meta | pydpx_meta/low_header_big_endian.py | Python | mit | 4,246 | 0 |
import random
from gatesym import core, gates, test_utils
from gatesym.blocks import latches
def test_gated_d_latch():
network = core.Network()
clock = gates.Switch(network)
data = gates.Switch(network)
latch = latches.gated_d_latch(data, clock)
network.drain()
assert not latch.read()
data.write(True)
network.drain()
assert not latch.read()
clock.write(True)
network.drain()
assert latch.read()
data.write(False)
network.drain()
assert not latch.read()
def test_ms_d_flop_basic():
network = core.Network()
clock = gates.Switch(network)
data = gates.Switch(network)
flop = latches.ms_d_flop(data, clock)
network.drain()
assert not flop.read()
# clock a 1 through
data.write(True)
network.drain()
assert not flop.read()
clock.write(True)
network.drain()
assert not flop.read()
clock.write(False)
network.drain()
assert flop.read()
# and back to 0
data.write(False)
network.drain()
assert flop.read()
clock.write(True)
network.drain()
assert flop.read()
clock.write(False)
network.drain()
assert not flop.read()
def test_ms_d_flop_timing():
network = core.Network()
clock = gates.Switch(network)
data = gates.Switch(network)
flop = latches.ms_d_flop(data, clock)
network.drain()
assert not flop.read()
# clock a 1 through
data.write(True)
network.drain()
assert not flop.read() # data has no impact
clock.write(True)
network.drain()
assert not flop.read() # clock high data in
clock.write(False)
data.write(False)
network.drain()
assert flop.read() # clock low stored data out
# and back to 0
data.write(False)
network.drain()
assert flop.read() # data has no impact
clock.write(True)
network.drain()
assert flop.read() # clock high data in
clock.write(False)
data.write(True)
network.drain()
assert not flop.read() # clock low stored data out
def test_register():
network = core.Network()
clock = gates.Switch(network)
data = test_utils.BinaryIn(network, 8)
register = latches.register(data, clock)
res = test_utils.BinaryOut(register)
network.drain()
assert res.read() == 0
# clock a value through
v1 = random.randrange(256)
data.write(v1)
network.drain()
assert res.read() == 0
clock.write(True)
network.drain()
assert res.read() == 0
clock.write(False)
network.drain()
assert res.read() == v1
# and a different value
v2 = random.randrange(256)
data.write(v2)
network.drain()
assert res.read() == v1
clock.write(True)
network.drain()
assert res.read() == v1
clock.write(False)
network.drain()
assert res.read() == v2
| tolomea/gatesym | gatesym/tests/blocks/test_latches.py | Python | mit | 2,835 | 0 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask_wtf import FlaskForm # type: ignore
from wtforms import ( # type: ignore
StringField,
TextAreaField,
SubmitField,
FieldList,
FormField,
IntegerField,
HiddenField,
BooleanField,
)
from wtforms import validators
from data.models import VulnerabilityGitCommits, VulnerabilityResources
from data.models.base import db
class BaseForm(FlaskForm):
@property
def non_hidden_fields(self):
for field in self:
if isinstance(field, HiddenField):
continue
yield field
class ModelFieldList(FieldList):
def __init__(self, *args, **kwargs):
self.model = kwargs.pop("model", None)
super().__init__(*args, **kwargs)
if not self.model:
raise ValueError("ModelFieldList requires model to be set")
def populate_obj(self, obj, name):
if not hasattr(obj, name):
setattr(obj, name, [])
while len(getattr(obj, name)) < len(self.entries):
new_model = self.model()
db.session.add(new_model)
getattr(obj, name).append(new_model)
while len(getattr(obj, name)) > len(self.entries):
db.session.delete(getattr(obj, name).pop())
super().populate_obj(obj, name)
class CommitLinksForm(FlaskForm):
repo_url = StringField(
"Git Repo URL", validators=[validators.Optional(), validators.URL()]
)
commit_hash = StringField("Commit Hash", validators=[])
# Commit data is optional -> otherwise use: validators.DataRequired(),
commit_link = StringField(
"Main commit link", validators=[validators.Optional(), validators.URL()]
)
repo_name = StringField("Repository Name", validators=[])
class Meta:
csrf = False
class VulnerabilityResourcesForm(FlaskForm):
link = StringField("Link", validators=[validators.DataRequired(), validators.URL()])
class Meta:
csrf = False
class VulnerabilityDetailsForm(FlaskForm):
commits = ModelFieldList(
FormField(CommitLinksForm),
model=VulnerabilityGitCommits,
min_entries=1,
default=[VulnerabilityGitCommits],
)
# Changing the CVE ID is disabled for now.
# The filters argument is used to have Null fields instead of empty strings.
# This is important since the cve_id is supposed to be unique OR Null.
# cve_id = StringField(
# "CVE-ID",
# filters=[lambda x: x and str(x).upper().strip(), lambda x: x or None],
# validators=[
# validators.Optional(),
# validators.Regexp(r"^CVE-\d{4}-\d+$")
# ],
# )
comment = TextAreaField(
"High-Level Bug Overview", validators=[validators.DataRequired()]
)
resources = ModelFieldList(
FormField(VulnerabilityResourcesForm), model=VulnerabilityResources
)
submit = SubmitField("Propose change")
class VulnerabilityProposalReject(FlaskForm):
review_feedback = TextAreaField(
"Feedback what should be changed", validators=[validators.DataRequired()]
)
submit_reject = SubmitField("Ask for improvements")
class VulnerabilityProposalApprove(FlaskForm):
submit_approve = SubmitField("Approve proposal")
class VulnerabilityProposalAssign(FlaskForm):
submit_assign = SubmitField("Take review")
class VulnerabilityProposalUnassign(FlaskForm):
submit_unassign = SubmitField("Unassign from this review")
class VulnerabilityProposalPublish(FlaskForm):
submit_publish = SubmitField("Publish entry")
class VulnerabilityDeleteForm(FlaskForm):
delete_entry = IntegerField("Delete entry", [validators.DataRequired()])
submit = SubmitField()
class UserProfileForm(BaseForm):
full_name = StringField(
"Name",
description=(
'<small class="form-text text-muted">'
"What should be shown next to your contributions.</small>"
),
)
hide_name = BooleanField("Hide Name")
profile_picture = StringField(
"Profile Picture URL", validators=[validators.Optional(), validators.URL()]
)
hide_picture = BooleanField("Hide Profile Picture")
| google/vulncode-db | data/forms/__init__.py | Python | apache-2.0 | 4,727 | 0.001269 |
import socket,sys,os,hashlib,codecs,time # Import socket module
#filecodec = 'cp037'
filecodec = None
buffersize = 1024
failed = False
def filehash(filepath):
openedFile = codecs.open(filepath,'rb',filecodec)
# readFile = openedFile.read().encode()
readFile = openedFile.read()
openedFile.close()
sha1Hash = hashlib.sha1(readFile)
sha1Hashed = sha1Hash.hexdigest()
return sha1Hashed
def namehash(strtohash):
sha1Hash = hashlib.sha1(strtohash.encode())
sha1Hashed = sha1Hash.hexdigest()
return sha1Hashed
c = socket.socket() # Create a socket object
host = socket.gethostname() # Get local machine name
port = 12345 # Reserve a port for your service.
connected = False
while not connected:
try:
c.connect((host, port))
connected = True
except Exception as ex:
print("An error occured when connecting: " + str(ex))
time.sleep(5)
try:
print('Connected to ', host)
gotfname = False
tries = 0
while not gotfname:
fnamehash = c.recv(buffersize).decode()
c.send("Next".encode())
fname = c.recv(buffersize).decode()
tmphash = namehash(fname)
tries = tries + 1
if tmphash == fnamehash:
c.send("Y".encode())
gotfname = True
print("Filename Valid")
elif tries >= 5:
print("Filename Invalid")
c.send("N".encode())
print("An error occured when receiving the filename")
c.close()
else:
print("Filename Invalid")
print("Attempting to get the filename again ...")
print()
c.send("N".encode())
umoded = c.recv(buffersize).decode()
if umoded == "y":
umode = True
else:
umode = False
c.send("Start".encode())
exist = False
gothash = False
while not gothash:
cfhash = c.recv(buffersize).decode()
c.send(cfhash.encode())
returndata = c.recv(buffersize).decode()
if returndata == "y":
gothash = True
try:
if cfhash == filehash(fname):
exist = True
except:
pass
if not exist:
c.send("n".encode())
print("File not found or out of date, downloading new version...")
gotfile = False
tries = 0
while not (gotfile or failed):
try:
try:
os.remove(fname + ".tmp")
except:
pass
flen = int(c.recv(buffersize).decode())
c.send("Continue".encode())
fhash = c.recv(buffersize).decode()
f = codecs.open(fname + ".tmp",'wb',filecodec)
c.send("Ready.".encode())
print("Receiving file: " + fname)
print("File Length: " + str(flen) + " Chunk(s)")
flenc = 0
print()
while flenc < flen:
sys.stdout.write("\rReceiving Chunk " + str(flenc + 1) + "...")
# l = c.recv(buffersize).decode(filecodec)
l = c.recv(buffersize)
if (l):
f.write(l)
flenc = flenc + 1
f.close()
print("Done Receiving")
ofhash = filehash(fname + ".tmp")
tries = tries + 1
if ofhash == fhash:
print("File Valid")
c.send("Y".encode())
gotfile = True
elif tries >= 5:
print("File Invalid")
c.send("N".encode())
print("An error occured when receiving the file")
failed = True
c.close()
else:
print("File Invalid")
print("Attempting to restart the download...")
print()
c.send("N".encode())
except Exception as ex:
try:
f.close()
except:
pass
try:
c.send("N".encode())
except:
pass
print("An error occured when receiving the file: " + str(ex))
if not failed:
print("Saving File...")
if umode:
try:
os.remove(__file__)
except:
pass
try:
os.remove(fname)
except:
pass
os.rename(fname + ".tmp", fname)
print("Done Saving")
else:
c.send("y".encode())
print("File already exists and is up to date")
if not failed:
print(c.recv(buffersize).decode())
c.close()
if umode:
os.system(fname)
sys.exit()
except Exception as ex:
try:
c.close()
except:
pass
try:
f.close()
except:
pass
try:
os.remove(fname + ".tmp")
except:
pass
print("An error occured: " + str(ex))
input()
| TNT-Samuel/Coding-Projects | File Sending/V1.0/output/receivefile.py | Python | gpl-3.0 | 5,213 | 0.005179 |
# -*- coding: utf-8 -*-
import operator
import os
import re
import subprocess
import time
import urllib
from xml.dom.minidom import parseString as parse_xml
from module.network.CookieJar import CookieJar
from module.network.HTTPRequest import HTTPRequest
from ..internal.Hoster import Hoster
from ..internal.misc import exists, isexecutable, json, reduce, renice, replace_patterns, which
from ..internal.Plugin import Abort, Skip
class BIGHTTPRequest(HTTPRequest):
"""
Overcome HTTPRequest's load() size limit to allow
loading very big web pages by overrding HTTPRequest's write() function
"""
# @TODO: Add 'limit' parameter to HTTPRequest in v0.4.10
def __init__(self, cookies=None, options=None, limit=2000000):
self.limit = limit
HTTPRequest.__init__(self, cookies=cookies, options=options)
def write(self, buf):
""" writes response """
if self.limit and self.rep.tell() > self.limit or self.abort:
rep = self.getResponse()
if self.abort:
raise Abort()
f = open("response.dump", "wb")
f.write(rep)
f.close()
raise Exception("Loaded Url exceeded limit")
self.rep.write(buf)
class Ffmpeg(object):
_RE_DURATION = re.compile(r'Duration: (\d{2}):(\d{2}):(\d{2})\.(\d{2}),')
_RE_TIME = re.compile(r'time=(\d{2}):(\d{2}):(\d{2})\.(\d{2})')
_RE_VERSION = re.compile((r'ffmpeg version (.+?) '))
CMD = None
priority = 0
streams = []
start_time = (0, 0)
output_filename = None
error_message = ""
def __init__(self, priority, plugin=None):
self.plugin = plugin
self.priority = priority
self.streams = []
self.start_time = (0, 0)
self.output_filename = None
self.error_message = ""
self.find()
@classmethod
def find(cls):
"""
Check for ffmpeg
"""
if cls.CMD is not None:
return True
try:
if os.name == "nt":
ffmpeg = os.path.join(pypath, "ffmpeg.exe") if isexecutable(os.path.join(pypath, "ffmpeg.exe")) \
else "ffmpeg.exe"
else:
ffmpeg = "ffmpeg"
cmd = which(ffmpeg) or ffmpeg
p = subprocess.Popen([cmd, "-version"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = (_r.strip() if _r else "" for _r in p.communicate())
except OSError:
return False
m = cls._RE_VERSION.search(out)
if m is not None:
cls.VERSION = m.group(1)
cls.CMD = cmd
return True
@property
def found(self):
return self.CMD is not None
def add_stream(self, streams):
if isinstance(streams, list):
self.streams.extend(streams)
else:
self.streams.append(streams)
def set_start_time(self, start_time):
self.start_time = start_time
def set_output_filename(self, output_filename):
self.output_filename = output_filename
def run(self):
if self.CMD is None or self.output_filename is None:
return False
maps = []
args = []
meta = []
for i, stream in enumerate(self.streams):
args.extend(["-i", stream[1]])
maps.extend(["-map", "%s:%s:0" % (i, stream[0])])
if stream[0] == 's':
meta.extend(["-metadata:s:s:0:%s" % i, "language=%s" % stream[2]])
args.extend(maps)
args.extend(meta)
args.extend(["-y",
"-vcodec", "copy",
"-acodec", "copy",
"-scodec", "copy",
"-ss", "00:%s:%s.00" % (self.start_time[0], self.start_time[1]),
"-sub_charenc", "utf8"])
call = [self.CMD] + args + [self.output_filename]
p = subprocess.Popen(
call,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
renice(p.pid, self.priority)
duration = self._find_duration(p)
if duration:
last_line = self._progress(p, duration)
else:
last_line = ""
out, err = (_r.strip() if _r else "" for _r in p.communicate())
if err or p.returncode:
self.error_message = last_line
return False
else:
self.error_message = ""
return True
def _find_duration(self, process):
duration = 0
while True:
line = process.stderr.readline() #: ffmpeg writes to stderr
#: Quit loop on eof
if not line:
break
m = self._RE_DURATION.search(line)
if m is not None:
duration = sum(int(v) * [60 * 60 * 100, 60 * 100, 100, 1][i]
for i, v in enumerate(m.groups()))
break
return duration
def _progress(self, process, duration):
line = ""
last_line = ""
while True:
c = process.stderr.read(1) #: ffmpeg writes to stderr
#: Quit loop on eof
if not c:
break
elif c == "\r":
last_line = line.strip('\r\n')
line = ""
m = self._RE_TIME.search(last_line)
if m is not None:
current_time = sum(int(v) * [60 * 60 * 100, 60 * 100, 100, 1][i]
for i, v in enumerate(m.groups()))
if self.plugin:
progress = current_time * 100 / duration
self.plugin.pyfile.setProgress(progress)
else:
line += c
continue
return last_line #: Last line may contain error message
class YoutubeCom(Hoster):
__name__ = "YoutubeCom"
__type__ = "hoster"
__version__ = "0.68"
__status__ = "testing"
__pattern__ = r'https?://(?:[^/]*\.)?(?:youtu\.be/|youtube\.com/watch\?(?:.*&)?v=)[\w\-]+'
__config__ = [("activated", "bool", "Activated", True),
("quality", "sd;hd;fullhd;240p;360p;480p;720p;1080p;1440p;2160p;3072p;4320p", "Quality Setting", "hd"),
("vfmt", "int", "Video FMT/ITAG Number (0 for auto)", 0),
("afmt", "int", "Audio FMT/ITAG Number (0 for auto)", 0),
(".mp4", "bool", "Allow .mp4", True),
(".flv", "bool", "Allow .flv", True),
(".webm", "bool", "Allow .webm", True),
(".mkv", "bool", "Allow .mkv", True),
(".3gp", "bool", "Allow .3gp", False),
("aac", "bool", "Allow aac audio (DASH video only)", True),
("vorbis", "bool", "Allow vorbis audio (DASH video only)", True),
("opus", "bool", "Allow opus audio (DASH video only)", True),
("ac3", "bool", "Allow ac3 audio (DASH video only)", True),
("dts", "bool", "Allow dts audio (DASH video only)", True),
("3d", "bool", "Prefer 3D", False),
("subs_dl", "off;all_specified;first_available", "Download subtitles", "off"),
("subs_dl_langs", "str", "Subtitle language codes (ISO639-1) to download (comma separated)", ""),
("subs_embed", "bool", "Embed subtitles inside the output file (.mp4 and .mkv only)", False),
("priority", "int", "ffmpeg process priority", 0)]
__description__ = """Youtube.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("spoob", "spoob@pyload.org"),
("zoidberg", "zoidberg@mujmail.cz"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
URL_REPLACEMENTS = [(r'youtu\.be/', 'youtube.com/watch?v=')]
#: Invalid characters that must be removed from the file name
invalid_chars = u'\u2605:?><"|\\'
#: name, width, height, quality ranking, 3D, type
formats = {
# 3gp
17: {'ext': ".3gp", 'width': 176, 'height': 144, 'qi': 0, '3d': False, 'type': "av"},
36: {'ext': ".3gp", 'width': 400, 'height': 240, 'qi': 1, '3d': False, 'type': "av"},
# flv
5: {'ext': ".flv", 'width': 400, 'height': 240, 'qi': 1, '3d': False, 'type': "av"},
6: {'ext': ".flv", 'width': 640, 'height': 400, 'qi': 4, '3d': False, 'type': "av"},
34: {'ext': ".flv", 'width': 640, 'height': 360, 'qi': 4, '3d': False, 'type': "av"},
35: {'ext': ".flv", 'width': 854, 'height': 480, 'qi': 6, '3d': False, 'type': "av"},
# mp4
83: {'ext': ".mp4", 'width': 400, 'height': 240, 'qi': 1, '3d': True, 'type': "av"},
18: {'ext': ".mp4", 'width': 480, 'height': 360, 'qi': 2, '3d': False, 'type': "av"},
82: {'ext': ".mp4", 'width': 640, 'height': 360, 'qi': 3, '3d': True, 'type': "av"},
22: {'ext': ".mp4", 'width': 1280, 'height': 720, 'qi': 8, '3d': False, 'type': "av"},
136: {'ext': ".mp4", 'width': 1280, 'height': 720, 'qi': 8, '3d': False, 'type': "v"},
84: {'ext': ".mp4", 'width': 1280, 'height': 720, 'qi': 8, '3d': True, 'type': "av"},
37: {'ext': ".mp4", 'width': 1920, 'height': 1080, 'qi': 9, '3d': False, 'type': "av"},
137: {'ext': ".mp4", 'width': 1920, 'height': 1080, 'qi': 9, '3d': False, 'type': "v"},
85: {'ext': ".mp4", 'width': 1920, 'height': 1080, 'qi': 9, '3d': True, 'type': "av"},
264: {'ext': ".mp4", 'width': 2560, 'height': 1440, 'qi': 10, '3d': False, 'type': "v"},
266: {'ext': ".mp4", 'width': 3840, 'height': 2160, 'qi': 11, '3d': False, 'type': "v"},
38: {'ext': ".mp4", 'width': 4096, 'height': 3072, 'qi': 12 , '3d': False, 'type': "av"},
# webm
43: {'ext': ".webm", 'width': 640, 'height': 360, 'qi': 3, '3d': False, 'type': "av"},
100: {'ext': ".webm", 'width': 640, 'height': 360, 'qi': 3, '3d': True, 'type': "av"},
101: {'ext': ".webm", 'width': 640, 'height': 360, 'qi': 4, '3d': True, 'type': "av"},
44: {'ext': ".webm", 'width': 854, 'height': 480, 'qi': 5, '3d': False, 'type': "av"},
45: {'ext': ".webm", 'width': 1280, 'height': 720, 'qi': 7, '3d': False, 'type': "av"},
247: {'ext': ".webm", 'width': 1280, 'height': 720, 'qi': 7, '3d': False, 'type': "v"},
102: {'ext': ".webm", 'width': 1280, 'height': 720, 'qi': 8, '3d': True, 'type': "av"},
46: {'ext': ".webm", 'width': 1920, 'height': 1080, 'qi': 9, '3d': False, 'type': "av"},
248: {'ext': ".webm", 'width': 1920, 'height': 1080, 'qi': 9, '3d': False, 'type': "v"},
271: {'ext': ".webm", 'width': 2560, 'height': 1440, 'qi': 10, '3d': False, 'type': "v"},
313: {'ext': ".webm", 'width': 3840, 'height': 2160, 'qi': 11, '3d': False, 'type': "v"},
272: {'ext': ".webm", 'width': 7680, 'height': 4320, 'qi': 13, '3d': False, 'type': "v"},
# audio
139: {'ext': ".mp4", 'qi': 1, 'acodec': "aac", 'type': "a"},
140: {'ext': ".mp4", 'qi': 2, 'acodec': "aac", 'type': "a"},
141: {'ext': ".mp4", 'qi': 3, 'acodec': "aac", 'type': "a"},
256: {'ext': ".mp4", 'qi': 4, 'acodec': "aac", 'type': "a"},
258: {'ext': ".mp4", 'qi': 5, 'acodec': "aac", 'type': "a"},
325: {'ext': ".mp4", 'qi': 6, 'acodec': "dts", 'type': "a"},
328: {'ext': ".mp4", 'qi': 7, 'acodec': "ac3", 'type': "a"},
171: {'ext': ".webm", 'qi': 1, 'acodec': "vorbis", 'type': 'a'},
172: {'ext': ".webm", 'qi': 2, 'acodec': "vorbis", 'type': 'a'},
249: {'ext': ".webm", 'qi': 3, 'acodec': "opus", 'type': 'a'},
250: {'ext': ".webm", 'qi': 4, 'acodec': "opus", 'type': 'a'},
251: {'ext': ".webm", 'qi': 5, 'acodec': "opus", 'type': 'a'}
}
def _decrypt_signature(self, encrypted_sig):
"""Turn the encrypted 's' field into a working signature"""
# try:
# player_url = json.loads(re.search(r'"assets":.+?"js":\s*("[^"]+")',self.data).group(1))
# except (AttributeError, IndexError):
# self.fail(_("Player URL not found"))
player_url = self.player_config['assets']['js']
if player_url.startswith("//"):
player_url = 'https:' + player_url
if not player_url.endswith(".js"):
self.fail(_("Unsupported player type %s") % player_url)
cache_info = self.db.retrieve("cache")
cache_dirty = False
if cache_info is None or 'version' not in cache_info or cache_info[
'version'] != self.__version__:
cache_info = {'version': self.__version__,
'cache': {}}
cache_dirty = True
if player_url in cache_info['cache'] and time.time() < cache_info['cache'][player_url]['time'] + 24 * 60 * 60:
self.log_debug("Using cached decode function to decrypt the URL")
decrypt_func = lambda s: ''.join(s[_i] for _i in cache_info['cache'][player_url]['decrypt_map'])
decrypted_sig = decrypt_func(encrypted_sig)
else:
player_data = self.load(self.fixurl(player_url))
m = re.search(r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(', player_data) or \
re.search(r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(', player_data)
try:
function_name = m.group('sig')
except (AttributeError, IndexError):
self.fail(_("Signature decode function name not found"))
try:
jsi = JSInterpreter(player_data)
decrypt_func = lambda s: jsi.extract_function(function_name)([s])
#: Since Youtube just scrambles the order of the characters in the signature
#: and does not change any byte value, we can store just a transformation map as a cached function
decrypt_map = [ord(c) for c in decrypt_func(''.join(map(unichr, range(len(encrypted_sig)))))]
cache_info['cache'][player_url] = {'decrypt_map': decrypt_map,
'time': time.time()}
cache_dirty = True
decrypted_sig = decrypt_func(encrypted_sig)
except (JSInterpreterError, AssertionError), e:
self.log_error(_("Signature decode failed"), e)
self.fail(e.message)
#: Remove old records from cache
for _k in list(cache_info['cache'].keys()):
if time.time() >= cache_info['cache'][_k]['time'] + 24 * 60 * 60:
cache_info['cache'].pop(_k, None)
cache_dirty = True
if cache_dirty:
self.db.store("cache", cache_info)
return decrypted_sig
def _handle_video(self):
use3d = self.config.get('3d')
if use3d:
quality = {'sd': 82, 'hd': 84, 'fullhd': 85, '240p': 83, '360p': 82, '480p': 82, '720p': 84,
'1080p': 85, '1440p': 85, '2160p': 85, '3072p': 85, '4320p': 85}
else:
quality = {'sd': 18, 'hd': 22, 'fullhd': 37, '240p': 5, '360p': 18, '480p': 35, '720p': 22,
'1080p': 37, '1440p': 264, '2160p': 266, '3072p': 38, '4320p': 272}
desired_fmt = self.config.get('vfmt') or quality.get(self.config.get('quality'), 0)
is_video = lambda x: 'v' in self.formats[x]['type']
if desired_fmt not in self.formats or not is_video(desired_fmt):
self.log_warning(_("VIDEO ITAG %d unknown, using default") % desired_fmt)
desired_fmt = 22
#: Build dictionary of supported itags (3D/2D)
allowed_suffix = lambda x: self.config.get(self.formats[x]['ext'])
video_streams = dict([(_s[0], _s[1:]) for _s in self.streams
if _s[0] in self.formats and allowed_suffix(_s[0]) and
is_video(_s[0]) and self.formats[_s[0]]['3d'] == use3d])
if not video_streams:
self.fail(_("No available video stream meets your preferences"))
self.log_debug("DESIRED VIDEO STREAM: ITAG:%d (%s %dx%d Q:%d 3D:%s) %sfound, %sallowed" %
(desired_fmt, self.formats[desired_fmt]['ext'], self.formats[desired_fmt]['width'],
self.formats[desired_fmt]['height'], self.formats[desired_fmt]['qi'],
self.formats[desired_fmt]['3d'], "" if desired_fmt in video_streams else "NOT ",
"" if allowed_suffix(desired_fmt) else "NOT "))
#: Return fmt nearest to quality index
if desired_fmt in video_streams and allowed_suffix(desired_fmt):
chosen_fmt = desired_fmt
else:
quality_index = lambda x: self.formats[x]['qi'] #: Select quality index
quality_distance = lambda x, y: abs(quality_index(x) - quality_index(y))
self.log_debug("Choosing nearest stream: %s" % [(_s, allowed_suffix(_s), quality_distance(_s, desired_fmt))
for _s in video_streams.keys()])
chosen_fmt = reduce(lambda x, y: x if quality_distance(x, desired_fmt) <= quality_distance(y, desired_fmt)
and quality_index(x) > quality_index(y) else y, video_streams.keys())
self.log_debug("CHOSEN VIDEO STREAM: ITAG:%d (%s %dx%d Q:%d 3D:%s)" %
(chosen_fmt, self.formats[chosen_fmt]['ext'], self.formats[chosen_fmt]['width'],
self.formats[chosen_fmt]['height'], self.formats[chosen_fmt]['qi'],
self.formats[chosen_fmt]['3d']))
url = video_streams[chosen_fmt][0]
if video_streams[chosen_fmt][1]:
if video_streams[chosen_fmt][2]:
signature = self._decrypt_signature(video_streams[chosen_fmt][1])
else:
signature = video_streams[chosen_fmt][1]
url += "&signature=" + signature
if "&ratebypass=" not in url:
url += "&ratebypass=yes"
file_suffix = self.formats[chosen_fmt]['ext'] if chosen_fmt in self.formats else ".flv"
if 'a' not in self.formats[chosen_fmt]['type']:
file_suffix = ".video" + file_suffix
self.pyfile.name = self.file_name + file_suffix
try:
filename = self.download(url, disposition=False)
except Skip, e:
filename = os.path.join(self.pyload.config.get("general", "download_folder"),
self.pyfile.package().folder,
self.pyfile.name)
self.log_info(_("Download skipped: %s due to %s") % (self.pyfile.name, e.message))
return filename, chosen_fmt
def _handle_audio(self, video_fmt):
desired_fmt = self.config.get('afmt') or 141
is_audio = lambda x: self.formats[x]['type'] == "a"
if desired_fmt not in self.formats or not is_audio(desired_fmt):
self.log_warning(_("AUDIO ITAG %d unknown, using default") % desired_fmt)
desired_fmt = 141
#: Build dictionary of supported audio itags
allowed_codec = lambda x: self.config.get(self.formats[x]['acodec'])
allowed_suffix = lambda x: self.config.get(".mkv") or \
self.config.get(self.formats[x]['ext']) and \
self.formats[x]['ext'] == self.formats[video_fmt]['ext']
audio_streams = dict([(_s[0], _s[1:]) for _s in self.streams
if _s[0] in self.formats and is_audio(_s[0]) and
allowed_codec(_s[0]) and allowed_suffix(_s[0])])
if not audio_streams:
self.fail(_("No available audio stream meets your preferences"))
if desired_fmt in audio_streams and allowed_suffix(desired_fmt):
chosen_fmt = desired_fmt
else:
quality_index = lambda x: self.formats[x]['qi'] #: Select quality index
quality_distance = lambda x, y: abs(quality_index(x) - quality_index(y))
self.log_debug("Choosing nearest stream: %s" % [(_s, allowed_suffix(_s), quality_distance(_s, desired_fmt))
for _s in audio_streams.keys()])
chosen_fmt = reduce(lambda x, y: x if quality_distance(x, desired_fmt) <= quality_distance(y, desired_fmt)
and quality_index(x) > quality_index(y) else y, audio_streams.keys())
self.log_debug("CHOSEN AUDIO STREAM: ITAG:%d (%s %s Q:%d)" %
(chosen_fmt, self.formats[chosen_fmt]['ext'], self.formats[chosen_fmt]['acodec'],
self.formats[chosen_fmt]['qi']))
url = audio_streams[chosen_fmt][0]
if audio_streams[chosen_fmt][1]:
if audio_streams[chosen_fmt][2]:
signature = self._decrypt_signature(audio_streams[chosen_fmt][1])
else:
signature = audio_streams[chosen_fmt][1]
url += "&signature=" + signature
if "&ratebypass=" not in url:
url += "&ratebypass=yes"
file_suffix = ".audio" + self.formats[chosen_fmt]['ext'] if chosen_fmt in self.formats else ".m4a"
self.pyfile.name = self.file_name + file_suffix
try:
filename = self.download(url, disposition=False)
except Skip, e:
filename = os.path.join(self.pyload.config.get("general", "download_folder"),
self.pyfile.package().folder,
self.pyfile.name)
self.log_info(_("Download skipped: %s due to %s") % (self.pyfile.name, e.message))
return filename, chosen_fmt
def _handle_subtitles(self):
def timedtext_to_srt(timedtext):
def _format_srt_time(millisec):
sec, milli = divmod(millisec, 1000)
m, s = divmod(int(sec), 60)
h, m = divmod(m, 60)
return "%02d:%02d:%02d,%s" % (h, m, s, milli)
i = 1
srt = ""
dom = parse_xml(timedtext)
body = dom.getElementsByTagName("body")[0]
paras = body.getElementsByTagName("p")
for para in paras:
srt += str(i) + "\n"
srt += _format_srt_time(int(para.attributes['t'].value)) + ' --> ' + \
_format_srt_time(int(para.attributes['t'].value) + int(para.attributes['d'].value)) + "\n"
for child in para.childNodes:
if child.nodeName == 'br':
srt += "\n"
elif child.nodeName == '#text':
srt += unicode(child.data)
srt += "\n\n"
i += 1
return srt
srt_files =[]
try:
subs = json.loads(self.player_config['args']['player_response'])['captions']['playerCaptionsTracklistRenderer']['captionTracks']
subtitles_urls = dict([(_subtitle['languageCode'],
urllib.unquote(_subtitle['baseUrl']).decode('unicode-escape') + "&fmt=3")
for _subtitle in subs])
self.log_debug("AVAILABLE SUBTITLES: %s" % subtitles_urls.keys() or "None")
except KeyError:
self.log_debug("AVAILABLE SUBTITLES: None")
return srt_files
subs_dl = self.config.get('subs_dl')
if subs_dl != "off":
subs_dl_langs = [_x.strip() for _x in self.config.get('subs_dl_langs', "").split(',') if _x.strip()]
if subs_dl_langs:
# Download only listed subtitles (`subs_dl_langs` config gives the priority)
for _lang in subs_dl_langs:
if _lang in subtitles_urls:
srt_filename = os.path.join(self.pyload.config.get("general", "download_folder"),
self.pyfile.package().folder,
os.path.splitext(self.file_name)[0] + "." + _lang + ".srt")
if self.pyload.config.get('download', 'skip_existing') and \
exists(srt_filename) and os.stat(srt_filename).st_size != 0:
self.log_info("Download skipped: %s due to File exists" % os.path.basename(srt_filename))
srt_files.append((srt_filename, _lang))
continue
timed_text = self.load(subtitles_urls[_lang], decode=False)
srt = timedtext_to_srt(timed_text)
with open(srt_filename, "w") as f:
f.write(srt.encode('utf-8'))
self.set_permissions(srt_filename)
self.log_debug("Saved subtitle: %s" % os.path.basename(srt_filename))
srt_files.append((srt_filename, _lang))
if subs_dl == "first_available":
break
else:
# Download any available subtitle
for _subtitle in subtitles_urls.items():
srt_filename = os.path.join(self.pyload.config.get("general", "download_folder"),
self.pyfile.package().folder,
os.path.splitext(self.file_name)[0] + "." + _subtitle[0] + ".srt")
if self.pyload.config.get('download', 'skip_existing') and \
exists(srt_filename) and os.stat(srt_filename).st_size != 0:
self.log_info("Download skipped: %s due to File exists" % os.path.basename(srt_filename))
srt_files.append((srt_filename, _subtitle[0]))
continue
timed_text = self.load(_subtitle[1], decode=False)
srt = timedtext_to_srt(timed_text)
with open(srt_filename, "w") as f:
f.write(srt.encode('utf-8'))
self.set_permissions(srt_filename)
self.log_debug("Saved subtitle: %s" % os.path.basename(srt_filename))
srt_files.append((srt_filename, _lang))
if subs_dl == "first_available":
break
return srt_files
def _postprocess(self, video_filename, audio_filename, subtitles_files):
final_filename = video_filename
subs_embed = self.config.get("subs_embed")
self.pyfile.setCustomStatus("postprocessing")
self.pyfile.setProgress(0)
if self.ffmpeg.found:
if audio_filename is not None:
video_suffix = os.path.splitext(video_filename)[1]
final_filename = os.path.join(os.path.dirname(video_filename),
self.file_name +
(video_suffix if video_suffix == os.path.splitext(audio_filename)[1]
else ".mkv"))
self.ffmpeg.add_stream(('v', video_filename))
self.ffmpeg.add_stream(('a', audio_filename))
if subtitles_files and subs_embed:
for subtitle in subtitles_files:
self.ffmpeg.add_stream(('s',) + subtitle)
self.ffmpeg.set_start_time(self.start_time)
self.ffmpeg.set_output_filename(final_filename)
self.pyfile.name = os.path.basename(final_filename)
self.pyfile.size = os.path.getsize(video_filename) + \
os.path.getsize(audio_filename) #: Just an estimate
if self.ffmpeg.run():
self.remove(video_filename, trash=False)
self.remove(audio_filename, trash=False)
if subtitles_files and subs_embed:
for subtitle in subtitles_files:
self.remove(subtitle[0])
else:
self.log_warning(_("ffmpeg error"), self.ffmpeg.error_message)
final_filename = video_filename
elif self.start_time[0] != 0 or self.start_time[1] != 0 or subtitles_files and subs_embed:
inputfile = video_filename + "_"
final_filename = video_filename
os.rename(video_filename, inputfile)
self.ffmpeg.add_stream(('v', video_filename))
self.ffmpeg.set_start_time(self.start_time)
if subtitles_files and subs_embed:
for subtitle in subtitles_files:
self.ffmpeg.add_stream(('s', subtitle))
self.pyfile.name = os.path.basename(final_filename)
self.pyfile.size = os.path.getsize(inputfile) #: Just an estimate
if self.ffmpeg.run():
self.remove(inputfile, trash=False)
if subtitles_files and subs_embed:
for subtitle in subtitles_files:
self.remove(subtitle[0])
else:
self.log_warning(_("ffmpeg error"), self.ffmpeg.error_message)
else:
if audio_filename is not None:
self.log_warning("ffmpeg is not installed, video and audio files will not be merged")
if subtitles_files and self.config.get("subs_embed"):
self.log_warning("ffmpeg is not installed, subtitles files will not be embedded")
self.pyfile.setProgress(100)
self.set_permissions(final_filename)
return final_filename
def setup(self):
self.resume_download = True
self.multiDL = True
try:
self.req.http.close()
except Exception:
pass
self.req.http = BIGHTTPRequest(
cookies=CookieJar(None),
options=self.pyload.requestFactory.getOptions(),
limit=2500000)
def process(self, pyfile):
pyfile.url = replace_patterns(pyfile.url, self.URL_REPLACEMENTS)
self.data = self.load(pyfile.url)
if re.search(r'<div id="player-unavailable" class="\s*player-width player-height\s*(?:player-unavailable\s*)?">',
self.data) or '"playabilityStatus":{"status":"ERROR"' in self.data:
self.offline()
if "We have been receiving a large volume of requests from your network." in self.data:
self.temp_offline()
m = re.search(r'ytplayer.config = ({.+?});', self.data)
if m is None:
self.fail(_("Player config pattern not found"))
self.player_config = json.loads(m.group(1))
self.ffmpeg = Ffmpeg(self.config.get('priority') ,self)
#: Set file name
self.file_name = self.player_config['args']['title']
#: Check for start time
self.start_time = (0, 0)
m = re.search(r't=(?:(\d+)m)?(\d+)s', pyfile.url)
if self.ffmpeg and m:
self.start_time = tuple(map(lambda _x: 0 if _x is None else int(_x), m.groups()))
self.file_name += " (starting at %sm%ss)" % (self.start_time[0], self.start_time[1])
#: Cleaning invalid characters from the file name
self.file_name = self.file_name.encode('ascii', 'replace')
for c in self.invalid_chars:
self.file_name = self.file_name.replace(c, '_')
#: Parse available streams
streams_keys = ['url_encoded_fmt_stream_map']
if 'adaptive_fmts' in self.player_config['args']:
streams_keys.append('adaptive_fmts')
self.streams = []
for streams_key in streams_keys:
streams = self.player_config['args'][streams_key]
streams = [_s.split('&') for _s in streams.split(',')]
streams = [dict((_x.split('=', 1)) for _x in _s) for _s in streams]
streams = [(int(_s['itag']),
urllib.unquote(_s['url']),
_s.get('s', _s.get('sig', None)),
True if 's' in _s else False)
for _s in streams]
self.streams += streams
self.log_debug("AVAILABLE STREAMS: %s" % [_s[0] for _s in self.streams])
video_filename, video_itag = self._handle_video()
has_audio = 'a' in self.formats[video_itag]['type']
if not has_audio:
audio_filename, audio_itag = self._handle_audio(video_itag)
else:
audio_filename = None
subtitles_files = self._handle_subtitles()
final_filename = self._postprocess(video_filename,
audio_filename,
subtitles_files)
#: Everything is finished and final name can be set
pyfile.name = os.path.basename(final_filename)
pyfile.size = os.path.getsize(final_filename)
self.last_download = final_filename
"""Credit to this awesome piece of code below goes to the 'youtube_dl' project, kudos!"""
class JSInterpreterError(Exception):
pass
class JSInterpreter(object):
def __init__(self, code, objects=None):
self._OPERATORS = [
('|', operator.or_),
('^', operator.xor),
('&', operator.and_),
('>>', operator.rshift),
('<<', operator.lshift),
('-', operator.sub),
('+', operator.add),
('%', operator.mod),
('/', operator.truediv),
('*', operator.mul),
]
self._ASSIGN_OPERATORS = [(op + '=', opfunc)
for op, opfunc in self._OPERATORS]
self._ASSIGN_OPERATORS.append(('=', lambda cur, right: right))
self._VARNAME_PATTERN = r'[a-zA-Z_$][a-zA-Z_$0-9]*'
if objects is None:
objects = {}
self.code = code
self._functions = {}
self._objects = objects
def interpret_statement(self, stmt, local_vars, allow_recursion=100):
if allow_recursion < 0:
raise JSInterpreterError('Recursion limit reached')
should_abort = False
stmt = stmt.lstrip()
stmt_m = re.match(r'var\s', stmt)
if stmt_m:
expr = stmt[len(stmt_m.group(0)):]
else:
return_m = re.match(r'return(?:\s+|$)', stmt)
if return_m:
expr = stmt[len(return_m.group(0)):]
should_abort = True
else:
# Try interpreting it as an expression
expr = stmt
v = self.interpret_expression(expr, local_vars, allow_recursion)
return v, should_abort
def interpret_expression(self, expr, local_vars, allow_recursion):
expr = expr.strip()
if expr == '': # Empty expression
return None
if expr.startswith('('):
parens_count = 0
for m in re.finditer(r'[()]', expr):
if m.group(0) == '(':
parens_count += 1
else:
parens_count -= 1
if parens_count == 0:
sub_expr = expr[1:m.start()]
sub_result = self.interpret_expression(sub_expr, local_vars, allow_recursion)
remaining_expr = expr[m.end():].strip()
if not remaining_expr:
return sub_result
else:
expr = json.dumps(sub_result) + remaining_expr
break
else:
raise JSInterpreterError('Premature end of parens in %r' % expr)
for op, opfunc in self._ASSIGN_OPERATORS:
m = re.match(r'(?x)(?P<out>%s)(?:\[(?P<index>[^\]]+?)\])?\s*%s(?P<expr>.*)$' %
(self._VARNAME_PATTERN, re.escape(op)), expr)
if m is None:
continue
right_val = self.interpret_expression(m.group('expr'), local_vars, allow_recursion - 1)
if m.groupdict().get('index'):
lvar = local_vars[m.group('out')]
idx = self.interpret_expression(m.group('index'), local_vars, allow_recursion)
assert isinstance(idx, int)
cur = lvar[idx]
val = opfunc(cur, right_val)
lvar[idx] = val
return val
else:
cur = local_vars.get(m.group('out'))
val = opfunc(cur, right_val)
local_vars[m.group('out')] = val
return val
if expr.isdigit():
return int(expr)
var_m = re.match(r'(?!if|return|true|false)(?P<name>%s)$' % self._VARNAME_PATTERN, expr)
if var_m:
return local_vars[var_m.group('name')]
try:
return json.loads(expr)
except ValueError:
pass
m = re.match(r'(?P<var>%s)\.(?P<member>[^(]+)(?:\(+(?P<args>[^()]*)\))?$' % self._VARNAME_PATTERN, expr)
if m is not None:
variable = m.group('var')
member = m.group('member')
arg_str = m.group('args')
if variable in local_vars:
obj = local_vars[variable]
else:
if variable not in self._objects:
self._objects[variable] = self.extract_object(variable)
obj = self._objects[variable]
if arg_str is None:
# Member access
if member == 'length':
return len(obj)
return obj[member]
assert expr.endswith(')')
# Function call
if arg_str == '':
argvals = tuple()
else:
argvals = tuple(self.interpret_expression(v, local_vars, allow_recursion) for v in arg_str.split(','))
if member == 'split':
assert argvals == ('',)
return list(obj)
if member == 'join':
assert len(argvals) == 1
return argvals[0].join(obj)
if member == 'reverse':
assert len(argvals) == 0
obj.reverse()
return obj
if member == 'slice':
assert len(argvals) == 1
return obj[argvals[0]:]
if member == 'splice':
assert isinstance(obj, list)
index, howMany = argvals
res = []
for i in range(index, min(index + howMany, len(obj))):
res.append(obj.pop(index))
return res
return obj[member](argvals)
m = re.match(r'(?P<in>%s)\[(?P<idx>.+)\]$' % self._VARNAME_PATTERN, expr)
if m is not None:
val = local_vars[m.group('in')]
idx = self.interpret_expression(m.group('idx'), local_vars, allow_recursion - 1)
return val[idx]
for op, opfunc in self._OPERATORS:
m = re.match(r'(?P<x>.+?)%s(?P<y>.+)' % re.escape(op), expr)
if m is None:
continue
x, abort = self.interpret_statement(m.group('x'), local_vars, allow_recursion - 1)
if abort:
raise JSInterpreterError('Premature left-side return of %s in %r' % (op, expr))
y, abort = self.interpret_statement(m.group('y'), local_vars, allow_recursion - 1)
if abort:
raise JSInterpreterError('Premature right-side return of %s in %r' % (op, expr))
return opfunc(x, y)
m = re.match(r'^(?P<func>%s)\((?P<args>[a-zA-Z0-9_$,]+)\)$' % self._VARNAME_PATTERN, expr)
if m is not None:
fname = m.group('func')
argvals = tuple(int(v) if v.isdigit() else local_vars[v]
for v in m.group('args').split(','))
if fname not in self._functions:
self._functions[fname] = self.extract_function(fname)
return self._functions[fname](argvals)
raise JSInterpreterError('Unsupported JS expression %r' % expr)
def extract_object(self, objname):
obj = {}
obj_m = re.search(r'(?:var\s+)?%s\s*=\s*\{\s*(?P<fields>([a-zA-Z$0-9]+\s*:\s*function\(.*?\)\s*\{.*?\}(?:,\s*)?)*)\}\s*;'
% re.escape(objname), self.code)
fields = obj_m.group('fields')
# Currently, it only supports function definitions
fields_m = re.finditer(r'(?P<key>[a-zA-Z$0-9]+)\s*:\s*function\((?P<args>[a-z,]+)\){(?P<code>[^}]+)}', fields)
for f in fields_m:
argnames = f.group('args').split(',')
obj[f.group('key')] = self.build_function(argnames, f.group('code'))
return obj
def extract_function(self, function_name):
func_m = re.search(r'(?x)(?:function\s+%s|[{;,]\s*%s\s*=\s*function|var\s+%s\s*=\s*function)\s*\((?P<args>[^)]*)\)\s*\{(?P<code>[^}]+)\}'
% (re.escape(function_name), re.escape(function_name), re.escape(function_name)), self.code)
if func_m is None:
raise JSInterpreterError('Could not find JS function %r' % function_name)
argnames = func_m.group('args').split(',')
return self.build_function(argnames, func_m.group('code'))
def call_function(self, function_name, *args):
f = self.extract_function(function_name)
return f(args)
def build_function(self, argnames, code):
def resf(argvals):
local_vars = dict(zip(argnames, argvals))
for stmt in code.split(';'):
res, abort = self.interpret_statement(stmt, local_vars)
if abort:
break
return res
return resf
| Arno-Nymous/pyload | module/plugins/hoster/YoutubeCom.py | Python | gpl-3.0 | 42,175 | 0.004268 |
import math
# According to Law of cosines, p^2 + p*r + r^2 = c^2.
# Let c = r+k, => r = (p^2-k^2)/(2*k-p) and p > k > p/2 (k is even).
# Suppose p <= q <= r.
max_sum = 120000
d = {} # p => set(r)
for p in range(1, max_sum/2+1):
if p%10000 == 0:
print p
d[p] = set()
mink = int(p/2)+1
maxk = int((math.sqrt(3)-1)*p) # so that r >= p
for k in range(mink, maxk+1):
if (p**2-k**2)%(2*k-p) == 0:
q = (p**2-k**2)/(2*k-p)
d[p].add(q)
ans = set()
for p in d.keys():
for q in d[p]:
if q in d and len(d[q]) > 0:
for r in d[p].intersection(d[q]):
if p + q + r > max_sum:
continue
ans.add(p+q+r)
print sum(ans)
| renxiaoyi/project_euler | problem_143.py | Python | unlicense | 739 | 0.002706 |
# -*- coding: utf-8 -*-
#
# http://www.privacyidea.org
# (c) cornelius kölbel, privacyidea.org
#
# 2014-12-08 Cornelius Kölbel, <cornelius@privacyidea.org>
# Complete rewrite during flask migration
# Try to provide REST API
#
# privacyIDEA is a fork of LinOTP. Some code is adapted from
# the system-controller from LinOTP, which is
# Copyright (C) 2010 - 2014 LSE Leading Security Experts GmbH
# License: AGPLv3
# contact: http://www.linotp.org
# http://www.lsexperts.de
# linotp@lsexperts.de
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__doc__ = """The realm endpoints are used to define realms.
A realm groups together many users. Administrators can manage the tokens of
the users in such a realm. Policies and tokens can be assigned to realms.
A realm consists of several resolvers. Thus you can create a realm and gather
users from LDAP and flat file source into one realm or you can pick resolvers
that collect users from different points from your vast LDAP directory and
group these users into a realm.
You will only be able to see and use user object, that are contained in a realm.
The code of this module is tested in tests/test_api_system.py
"""
from flask import (Blueprint,
request, current_app)
from lib.utils import (getParam,
required,
send_result, get_priority_from_param)
from ..lib.log import log_with
from ..lib.realm import get_realms
from ..lib.realm import (set_default_realm,
get_default_realm,
set_realm,
delete_realm)
from ..lib.policy import ACTION
from ..api.lib.prepolicy import prepolicy, check_base_action
from flask import g
from gettext import gettext as _
import logging
log = logging.getLogger(__name__)
realm_blueprint = Blueprint('realm_blueprint', __name__)
defaultrealm_blueprint = Blueprint('defaultrealm_blueprint', __name__)
# ----------------------------------------------------------------
#
# REALM functions
#
#
@log_with(log)
@realm_blueprint.route('/<realm>', methods=['POST'])
@prepolicy(check_base_action, request, ACTION.RESOLVERWRITE)
def set_realm_api(realm=None):
"""
This call creates a new realm or reconfigures a realm.
The realm contains a list of resolvers.
In the result it returns a list of added resolvers and a list of
resolvers, that could not be added.
:param realm: The unique name of the realm
:param resolvers: A comma separated list of unique resolver names or a
list object
:type resolvers: string or list
:param priority: Additional parameters priority.<resolvername> define the
priority of the resolvers within this realm.
:return: a json result with a list of Realms
**Example request**:
To create a new realm "newrealm", that consists of the resolvers
"reso1_with_realm" and "reso2_with_realm" call:
.. sourcecode:: http
POST /realm/newrealm HTTP/1.1
Host: example.com
Accept: application/json
Content-Length: 26
Content-Type: application/x-www-form-urlencoded
resolvers=reso1_with_realm, reso2_with_realm
priority.reso1_with_realm=1
priority.reso2_with_realm=2
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"id": 1,
"jsonrpc": "2.0",
"result": {
"status": true,
"value": {
"added": ["reso1_with_realm", "reso2_with_realm"],
"failed": []
}
}
"version": "privacyIDEA unknown"
}
"""
param = request.all_data
resolvers = getParam(param, "resolvers", required)
priority = get_priority_from_param(param)
if type(resolvers) == "list":
Resolvers = resolvers
else:
Resolvers = resolvers.split(',')
(added, failed) = set_realm(realm, Resolvers, priority=priority)
g.audit_object.log({'success': len(added) == len(Resolvers),
'info': "realm: %r, resolvers: %r" % (realm,
resolvers)})
return send_result({"added": added,
"failed": failed})
@log_with(log)
@realm_blueprint.route('/', methods=['GET'])
def get_realms_api():
"""
This call returns the list of all defined realms.
It takes no arguments.
:return: a json result with a list of realms
**Example request**:
.. sourcecode:: http
GET / HTTP/1.1
Host: example.com
Accept: application/json
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"id": 1,
"jsonrpc": "2.0",
"result": {
"status": true,
"value": {
"realm1_with_resolver": {
"default": true,
"resolver": [
{
"name": "reso1_with_realm",
"type": "passwdresolver"
}
]
}
}
},
"version": "privacyIDEA unknown"
}
"""
realms = get_realms()
g.audit_object.log({"success": True})
# If the admin is not allowed to see all realms,
# (policy scope=system, action=read)
# the realms, where he has no administrative rights need,
# to be stripped.
'''
polPost = self.Policy.checkPolicyPost('system',
'getRealms',
{'realms': realms})
res = polPost['realms']
'''
return send_result(realms)
@log_with(log)
@realm_blueprint.route('/superuser', methods=['GET'])
def get_super_user_realms():
"""
This call returns the list of all superuser realms
as they are defined in *pi.cfg*.
See :ref:`cfgfile` for more information about this.
:return: a json result with a list of realms
**Example request**:
.. sourcecode:: http
GET /superuser HTTP/1.1
Host: example.com
Accept: application/json
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"id": 1,
"jsonrpc": "2.0",
"result": {
"status": true,
"value": ["superuser",
"realm2"]
}
},
"version": "privacyIDEA unknown"
}
"""
superuser_realms = current_app.config.get("SUPERUSER_REALM", [])
g.audit_object.log({"success": True})
return send_result(superuser_realms)
@log_with(log)
@defaultrealm_blueprint.route('/<realm>', methods=['POST'])
@prepolicy(check_base_action, request, ACTION.RESOLVERWRITE)
def set_default_realm_api(realm=None):
"""
This call sets the default realm.
:param realm: the name of the realm, that should be the default realm
:return: a json result with either 1 (success) or 0 (fail)
"""
realm = realm.lower().strip()
r = set_default_realm(realm)
g.audit_object.log({"success": r,
"info": realm})
return send_result(r)
@log_with(log)
@defaultrealm_blueprint.route('', methods=['DELETE'])
@prepolicy(check_base_action, request, ACTION.RESOLVERDELETE)
def delete_default_realm_api(realm=None):
"""
This call deletes the default realm.
:return: a json result with either 1 (success) or 0 (fail)
**Example response**:
.. sourcecode:: http
{
"id": 1,
"jsonrpc": "2.0",
"result": {
"status": true,
"value": 1
},
"version": "privacyIDEA unknown"
}
"""
r = set_default_realm("")
g.audit_object.log({"success": r,
"info": ""})
return send_result(r)
@log_with(log)
@defaultrealm_blueprint.route('', methods=['GET'])
def get_default_realm_api():
"""
This call returns the default realm
:return: a json description of the default realm with the resolvers
**Example response**:
.. sourcecode:: http
{
"id": 1,
"jsonrpc": "2.0",
"result": {
"status": true,
"value": {
"defrealm": {
"default": true,
"resolver": [
{
"name": "defresolver",
"type": "passwdresolver"
}
]
}
}
},
"version": "privacyIDEA unknown"
}
"""
res = {}
defRealm = get_default_realm()
if defRealm:
res = get_realms(defRealm)
g.audit_object.log({"success": True,
"info": defRealm})
return send_result(res)
@log_with(log)
#@system_blueprint.route('/delRealm', methods=['POST', 'DELETE'])
@realm_blueprint.route('/<realm>', methods=['DELETE'])
@prepolicy(check_base_action, request, ACTION.RESOLVERDELETE)
def delete_realm_api(realm=None):
"""
This call deletes the given realm.
:param realm: The name of the realm to delete
:return: a json result with value=1 if deleting the realm was successful
**Example request**:
.. sourcecode:: http
DELETE /realm/realm_to_delete HTTP/1.1
Host: example.com
Accept: application/json
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"id": 1,
"jsonrpc": "2.0",
"result": {
"status": true,
"value": 1
},
"version": "privacyIDEA unknown"
}
"""
ret = delete_realm(realm)
g.audit_object.log({"success": ret,
"info": realm})
return send_result(ret)
| woddx/privacyidea | privacyidea/api/realm.py | Python | agpl-3.0 | 10,603 | 0.000283 |
"""Tests for distutils.command.build_py."""
import os
import sys
import StringIO
import unittest
from distutils.command.build_py import build_py
from distutils.core import Distribution
from distutils.errors import DistutilsFileError
from distutils.tests import support
class BuildPyTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_package_data(self):
sources = self.mkdtemp()
f = open(os.path.join(sources, "__init__.py"), "w")
f.write("# Pretend this is a package.")
f.close()
f = open(os.path.join(sources, "README.txt"), "w")
f.write("Info about this package")
f.close()
destination = self.mkdtemp()
dist = Distribution({"packages": ["pkg"],
"package_dir": {"pkg": sources}})
# script_name need not exist, it just need to be initialized
dist.script_name = os.path.join(sources, "setup.py")
dist.command_obj["build"] = support.DummyCommand(
force=0,
build_lib=destination)
dist.packages = ["pkg"]
dist.package_data = {"pkg": ["README.txt"]}
dist.package_dir = {"pkg": sources}
cmd = build_py(dist)
cmd.compile = 1
cmd.ensure_finalized()
self.assertEqual(cmd.package_data, dist.package_data)
cmd.run()
# This makes sure the list of outputs includes byte-compiled
# files for Python modules but not for package data files
# (there shouldn't *be* byte-code files for those!).
#
self.assertEqual(len(cmd.get_outputs()), 3)
pkgdest = os.path.join(destination, "pkg")
files = os.listdir(pkgdest)
self.assert_("__init__.py" in files)
self.assert_("__init__.pyc" in files)
self.assert_("README.txt" in files)
def test_empty_package_dir (self):
# See SF 1668596/1720897.
cwd = os.getcwd()
# create the distribution files.
sources = self.mkdtemp()
open(os.path.join(sources, "__init__.py"), "w").close()
testdir = os.path.join(sources, "doc")
os.mkdir(testdir)
open(os.path.join(testdir, "testfile"), "w").close()
os.chdir(sources)
old_stdout = sys.stdout
sys.stdout = StringIO.StringIO()
try:
dist = Distribution({"packages": ["pkg"],
"package_dir": {"pkg": ""},
"package_data": {"pkg": ["doc/*"]}})
# script_name need not exist, it just need to be initialized
dist.script_name = os.path.join(sources, "setup.py")
dist.script_args = ["build"]
dist.parse_command_line()
try:
dist.run_commands()
except DistutilsFileError:
self.fail("failed package_data test when package_dir is ''")
finally:
# Restore state.
os.chdir(cwd)
sys.stdout = old_stdout
def test_dont_write_bytecode(self):
# makes sure byte_compile is not used
pkg_dir, dist = self.create_dist()
cmd = build_py(dist)
cmd.compile = 1
cmd.optimize = 1
old_dont_write_bytecode = sys.dont_write_bytecode
sys.dont_write_bytecode = True
try:
cmd.byte_compile([])
finally:
sys.dont_write_bytecode = old_dont_write_bytecode
self.assertTrue('byte-compiling is disabled' in self.logs[0][1])
def test_suite():
return unittest.makeSuite(BuildPyTestCase)
if __name__ == "__main__":
unittest.main(defaultTest="test_suite")
| babyliynfg/cross | tools/project-creator/Python2.6.6/Lib/distutils/tests/test_build_py.py | Python | mit | 3,817 | 0.000786 |
# -------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Stefan
#
# Created: 11.07.2017
# Copyright: (c) Stefan 2017
# Licence: <your licence>
# -------------------------------------------------------------------------------
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from scrape_interface import ScrapeProcessor
import re
from datetime import datetime
# number of entity for which we download
_ENTITY = 4420465
_titleOverride = {
"2016pv 31 03": "pv 31.03.2016",
"2016PV 27.06. 2016": "PV 27.06.2016",
"2015Pv 20.08": "Pv 20.08.2015",
"2014Pv 22.05": "pv 22.05.2014",
"2014Pv 29.04": "Pv 29.04.2014",
"2014Pv 20.08": "pv 20.08.2014",
"2014Pv 28.07": "Pv 28.07.2014",
"2014PV 30 Septembrie 1": "PV 30.09.2014",
"2014Pv 31.10": "Pv 31.10.2014",
"2014Pv 24.10": "Pv 24.10.2014",
"2014PV 10.12 Sed Indata": "PV 10.12.2014",
"2014PV 6.10": "pv 06.10.2014",
"2014Pv 10.11.": "pv 10.11.2014",
"2014Pv 20.10": "pv 20.10.2014"
}
def extractdata(sp):
print("Start processing entity " + str(_ENTITY))
_process_main(sp, "http://www.primarie3.ro/consiliu-local/hotarari-de-consiliu/", False)
_process_main(sp, "http://www.primarie3.ro/consiliu-local/procese-verbale-de-sedinta/", True)
print("End processing entity " + str(_ENTITY))
# main processing - take all years and process until no more pages
def _process_main(sp, configaddress, ispvpage):
html = ScrapeProcessor.download_page(configaddress)
_process_year(sp, html, ispvpage)
if sp.get_processmode() in (ScrapeProcessor.ProcessMode.DELTA, ScrapeProcessor.ProcessMode.DELTA_DOWNLOAD):
return
soup = BeautifulSoup(html, 'html.parser')
arhiva = soup.find("div", {"class": "list_buget list_buget_arhiva MB30"}).find("ul")
if not arhiva:
print("ERROR: can't find div with class list_buget")
return
for li in arhiva.find_all("li"):
alink = li.find("a")
if not alink.has_attr("href"):
print("ERROR: link was expected to have href")
continue
link = alink["href"]
html2 = ScrapeProcessor.download_page(link)
_process_year(sp, html2, ispvpage)
# process a page with a year from the site
# html = page contents in string
def _process_year(sp, html, ispvpage):
soup = BeautifulSoup(html, 'html.parser')
pagetitle = soup.find("h2", {"class": "MT0"})
if pagetitle is None:
print("ERROR: no H2 title found")
return
match = re.search("(20[0-9]{2})", pagetitle.string)
if not match:
print("ERROR: H2 title was expected to contain a year" + pagetitle.string)
return
year = match.group(1)
lista = soup.find("ul", {"class": "list_buget_p"})
for li in lista.find_all("li"):
alink = li.a
href = alink["href"]
if not href.startswith("http"):
href = urljoin("http://www.primarie3.ro", href)
title = alink.string
if (str(year) + title) in _titleOverride:
title = _titleOverride[str(year) + title]
if ispvpage:
number = 0
else:
match = re.search("hc.*?[ .](\d+)($|\D)", title, re.IGNORECASE)
if not match:
match = re.search("hc.*?[ .](\d+)-(\d+)($|\D)", title, re.IGNORECASE)
if not match:
print("ERROR| Titlul nu incepe cu hc: " + title)
continue
number1 = int(match.group(1))
number2 = int(match.group(2))
if (number2 - number1) < 0 or (number2 - number1) > 10:
print("ERROR|gama invalida: " + title)
continue
for n in range(number1, number2 + 1):
_process_doc(sp, n, year, title, href, "", ispvpage)
return
number = match.group(1)
datetext = ""
if ispvpage:
datetext = ScrapeProcessor.finddate(title)
if datetext == "":
print("ERROR|PV should have a date: " + title)
continue
else:
match = re.search("din (\d+\.\d+\.\d+)", title, re.IGNORECASE)
if match:
datetext = match.group(1)
date = datetime.strptime(datetext, '%d.%m.%Y')
datetext = date.strftime("%Y-%m-%d")
if datetext[:4] != str(year):
print("WARNING| date mismatch " + datetext + " vs year " + str(year))
datetext = ""
# process the found document
code, result = _process_doc(sp, number, year, title, href, datetext, ispvpage)
if code == "ERROR":
print("ERROR|" + title + "|" + result)
# process the info regarding a document (decision)
# decision info should come in docInfo with the following tags:
# date, link, number, year, title
def _process_doc(sp, number, year, title, link, date, ispvpage):
annex = 0
doctype = "MAIN"
#analyse type and post decision
if ispvpage:
number = ScrapeProcessor.dayinyear(date)
code, result = sp.post_decision("PRVB", number, year, _ENTITY, date, title)
if code == "ERROR":
return code, result
decisionid = result
else:
match = re.search("anexa(\d+)", title, re.IGNORECASE)
if match:
annex = match.group(1)
else:
match = re.search("anexa", title, re.IGNORECASE)
if match:
annex = 1
if annex:
code, result = sp.get_decision("HOTA", number, year, _ENTITY)
if code == "ERROR":
return code, result
decisionid = result
doctype = "ANEX"
else:
# add the decision to server
code, result = sp.post_decision("HOTA", number, year, _ENTITY, date, title)
if code == "ERROR":
return code, result
decisionid = result
# download page
code, result = sp.download_file(link)
if code == "ERROR":
sp.post_document(doctype, decisionid, annex, "ERROR_DOWNLOAD", "", link)
return code, result
fname = result
code, result, filetype = sp.ocr_document(fname)
if code == "ERROR":
sp.post_document(doctype, decisionid, annex, "ERROR_OCR", "", link)
return code, result
ocrfname = result
outstr, cssstr = ScrapeProcessor.preparehtml(ocrfname, filetype)
return sp.post_document(doctype, decisionid, annex, outstr, cssstr, link)
if __name__ == '__main__':
localsp = ScrapeProcessor("http://192.168.56.10", "stefan_cioc", "parola1234")
localsp.set_folders("X:/hot/S3I", "X:/hot/S3O")
localsp.set_processmode(ScrapeProcessor.ProcessMode.FULL)
extractdata(localsp)
| stcioc/localdocindex | python/scrape_ps3.py | Python | mit | 7,074 | 0.003675 |
###############################################################################
# Name: ed_statbar.py #
# Purpose: Custom statusbar with builtin progress indicator #
# Author: Cody Precord <cprecord@editra.org> #
# Copyright: (c) 2008 Cody Precord <staff@editra.org> #
# License: wxWindows License #
###############################################################################
"""
Custom StatusBar for Editra that contains a progress bar that responds to
messages from ed_msg to display progress of different actions.
@summary: Editra's StatusBar class
"""
__author__ = "Cody Precord <cprecord@editra.org>"
__svnid__ = "$Id: ed_statbar.py 70229 2012-01-01 01:27:10Z CJP $"
__revision__ = "$Revision: 70229 $"
#--------------------------------------------------------------------------#
# Imports
import wx
import wx.stc
# Editra Libraries
import ed_glob
import util
import ed_msg
import ed_menu
from syntax.synglob import GetDescriptionFromId
from eclib import ProgressStatusBar, EncodingDialog
from extern.decorlib import anythread
#--------------------------------------------------------------------------#
_ = wx.GetTranslation
#--------------------------------------------------------------------------#
class EdStatBar(ProgressStatusBar):
"""Custom status bar that handles dynamic field width adjustment and
automatic expiration of status messages.
"""
ID_CLEANUP_TIMER = wx.NewId()
def __init__(self, parent):
super(EdStatBar, self).__init__(parent, style=wx.ST_SIZEGRIP)
# Attributes
self._pid = parent.GetId() # Save parents id for filtering msgs
self._widths = list()
self._cleanup_timer = wx.Timer(self, EdStatBar.ID_CLEANUP_TIMER)
self._eolmenu = wx.Menu()
self._lexmenu = None
self._log = wx.GetApp().GetLog()
# Setup
self.SetFieldsCount(6) # Info, vi stuff, line/progress
self.SetStatusWidths([-1, 90, 40, 40, 40, 155])
self._eolmenu.Append(ed_glob.ID_EOL_MAC, u"CR",
_("Change line endings to %s") % u"CR",
kind=wx.ITEM_CHECK)
self._eolmenu.Append(ed_glob.ID_EOL_WIN, u"CRLF",
_("Change line endings to %s") % u"CRLF",
kind=wx.ITEM_CHECK)
self._eolmenu.Append(ed_glob.ID_EOL_UNIX, u"LF",
_("Change line endings to %s") % u"LF",
kind=wx.ITEM_CHECK)
# Event Handlers
self.Bind(wx.EVT_WINDOW_DESTROY, self.OnDestroy, self)
self.Bind(wx.EVT_LEFT_DCLICK, self.OnLeftDClick)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self.Bind(wx.EVT_TIMER, self.OnExpireMessage,
id=EdStatBar.ID_CLEANUP_TIMER)
# Messages
ed_msg.Subscribe(self.OnProgress, ed_msg.EDMSG_PROGRESS_SHOW)
ed_msg.Subscribe(self.OnProgress, ed_msg.EDMSG_PROGRESS_STATE)
ed_msg.Subscribe(self.OnUpdateText, ed_msg.EDMSG_UI_SB_TXT)
ed_msg.Subscribe(self.OnUpdateDoc, ed_msg.EDMSG_UI_NB_CHANGED)
ed_msg.Subscribe(self.OnUpdateDoc, ed_msg.EDMSG_FILE_SAVED)
ed_msg.Subscribe(self.OnUpdateDoc, ed_msg.EDMSG_FILE_OPENED)
ed_msg.Subscribe(self.OnUpdateDoc, ed_msg.EDMSG_UI_STC_LEXER)
def OnDestroy(self, evt):
"""Unsubscribe from messages"""
if self._lexmenu:
self._lexmenu.Destroy()
if self._eolmenu:
self._eolmenu.Destroy()
if evt.GetId() == self.GetId():
ed_msg.Unsubscribe(self.OnProgress)
ed_msg.Unsubscribe(self.OnUpdateText)
ed_msg.Unsubscribe(self.OnUpdateDoc)
evt.Skip()
def __SetStatusText(self, txt, field):
"""Safe method to use for setting status text with CallAfter.
@param txt: string
@param field: int
"""
try:
super(EdStatBar, self).SetStatusText(txt, field)
self.AdjustFieldWidths()
if field == ed_glob.SB_INFO and txt != u'':
# Start the expiration countdown
if self._cleanup_timer.IsRunning():
self._cleanup_timer.Stop()
self._cleanup_timer.Start(10000, True)
except wx.PyDeadObjectError, wx.PyAssertionError:
# Getting some odd assertion errors on wxMac so just trap
# and ignore them for now
# glyphCount == (text.length()+1)" failed at graphics.cpp(2048)
# in GetPartialTextExtents()
pass
except TypeError, err:
self._log("[edstatbar][err] Bad status message: %s" % str(txt))
self._log("[edstatbar][err] %s" % err)
def AdjustFieldWidths(self):
"""Adjust each field width of status bar basing on the field text
@return: None
"""
widths = [-1]
# Calculate required widths
# NOTE: Order of fields is important
for field in [ed_glob.SB_BUFF,
ed_glob.SB_LEXER,
ed_glob.SB_ENCODING,
ed_glob.SB_EOL,
ed_glob.SB_ROWCOL]:
width = self.GetTextExtent(self.GetStatusText(field))[0] + 20
if width == 20:
width = 0
widths.append(width)
# Adjust widths
if widths[-1] < 155:
widths[-1] = 155
# Only update if there are changes
if widths != self._widths:
self._widths = widths
self.SetStatusWidths(self._widths)
def GetMainWindow(self):
"""Method required for L{ed_msg.mwcontext}"""
return self.TopLevelParent
def OnExpireMessage(self, evt):
"""Handle Expiring the status message when the oneshot timer
tells us it has expired.
"""
if evt.GetId() == EdStatBar.ID_CLEANUP_TIMER:
wx.CallAfter(self.__SetStatusText, u'', ed_glob.SB_INFO)
else:
evt.Skip()
def OnLeftDClick(self, evt):
"""Handlers mouse left double click on status bar
@param evt: wx.MouseEvent
@note: Assumes parent is MainWindow instance
"""
pt = evt.GetPosition()
if self.GetFieldRect(ed_glob.SB_ROWCOL).Contains(pt):
mw = self.GetParent()
mpane = mw.GetEditPane()
mpane.ShowCommandControl(ed_glob.ID_GOTO_LINE)
else:
evt.Skip()
def OnLeftUp(self, evt):
"""Handle left clicks on the status bar
@param evt: wx.MouseEvent
"""
pt = evt.GetPosition()
if self.GetFieldRect(ed_glob.SB_EOL).Contains(pt):
rect = self.GetFieldRect(ed_glob.SB_EOL)
self.PopupMenu(self._eolmenu, (rect.x, rect.y))
elif self.GetFieldRect(ed_glob.SB_ENCODING).Contains(pt):
nb = self.GetTopLevelParent().GetNotebook()
buff = nb.GetCurrentCtrl()
dlg = EncodingDialog(nb,
msg=_("Change the encoding of the current document."),
title=_("Change Encoding"),
default=buff.GetEncoding())
bmp = wx.ArtProvider.GetBitmap(str(ed_glob.ID_DOCPROP),
wx.ART_OTHER)
if bmp.IsOk():
dlg.SetBitmap(bmp)
dlg.CenterOnParent()
# TODO: should add EdFile callbacks for modification events instead
# of using explicit statusbar refresh.
if dlg.ShowModal() == wx.ID_OK:
buff.SetEncoding(dlg.GetEncoding())
self.UpdateFields()
# NOTE: Got an error report about a PyDeadObject error here. The
# error does not make any sense since the dialog is not
# destroyed or deleted by anything before this. Add validity
# check to ensure reference is still valid.
if dlg:
dlg.Destroy()
elif self.GetFieldRect(ed_glob.SB_LEXER).Contains(pt):
# Change Lexer popup menu
if self._lexmenu:
self._lexmenu.Destroy()
self._lexmenu = wx.Menu()
ed_menu.EdMenuBar.PopulateLexerMenu(self._lexmenu)
rect = self.GetFieldRect(ed_glob.SB_LEXER)
self.PopupMenu(self._lexmenu, (rect.x, rect.y))
else:
evt.Skip()
def OnProgress(self, msg):
"""Set the progress bar's state
@param msg: Message Object
"""
mdata = msg.GetData()
# Don't do anything if the message is not for this frame
if self._pid != mdata[0]:
return
mtype = msg.GetType()
if mtype == ed_msg.EDMSG_PROGRESS_STATE:
# May be called from non gui thread so don't do anything with
# the gui here.
self.SetProgress(mdata[1])
self.range = mdata[2]
if sum(mdata[1:]) == 0:
self.Stop()
elif mtype == ed_msg.EDMSG_PROGRESS_SHOW:
if mdata[1]:
self.Start(75)
else:
# TODO: findout where stray stop event is coming from...
self.Stop()
@ed_msg.mwcontext
def OnUpdateDoc(self, msg):
"""Update document related fields
@param msg: Message Object
"""
self.UpdateFields()
if msg.GetType() == ed_msg.EDMSG_UI_NB_CHANGED:
wx.CallAfter(self.__SetStatusText, u'', ed_glob.SB_INFO)
@anythread
def DoUpdateText(self, msg):
"""Thread safe update of status text. Proxy for OnUpdateText because
pubsub seems to have issues with passing decorator methods for
listeners.
@param msg: Message Object
"""
# Only process if this status bar is in the active window and shown
parent = self.GetTopLevelParent()
if (parent.IsActive() or wx.GetApp().GetTopWindow() == parent):
field, txt = msg.GetData()
self.UpdateFields()
wx.CallAfter(self.__SetStatusText, txt, field)
def OnUpdateText(self, msg):
"""Update the status bar text based on the received message
@param msg: Message Object
"""
self.DoUpdateText(msg)
def PushStatusText(self, txt, field):
"""Set the status text
@param txt: Text to put in bar
@param field: int
"""
wx.CallAfter(self.__SetStatusText, txt, field)
def SetStatusText(self, txt, field):
"""Set the status text
@param txt: Text to put in bar
@param field: int
"""
wx.CallAfter(self.__SetStatusText, txt, field)
def UpdateFields(self):
"""Update document fields based on the currently selected
document in the editor.
@postcondition: encoding and lexer fields are updated
@todo: update when readonly hooks are implemented
"""
nb = self.GetParent().GetNotebook()
if nb is None:
return
try:
cbuff = nb.GetCurrentCtrl()
doc = cbuff.GetDocument()
wx.CallAfter(self.__SetStatusText, doc.GetEncoding(),
ed_glob.SB_ENCODING)
wx.CallAfter(self.__SetStatusText,
GetDescriptionFromId(cbuff.GetLangId()),
ed_glob.SB_LEXER)
eol = { wx.stc.STC_EOL_CR : u"CR",
wx.stc.STC_EOL_LF : u"LF",
wx.stc.STC_EOL_CRLF : u"CRLF" }
wx.CallAfter(self.__SetStatusText,
eol[cbuff.GetEOLMode()],
ed_glob.SB_EOL)
except wx.PyDeadObjectError:
# May be called asyncronasly after the control is already dead
return
| garrettcap/Bulletproof-Backup | wx/tools/Editra/src/ed_statbar.py | Python | gpl-2.0 | 12,020 | 0.001165 |
from datetime import datetime, date
import pytest
from pytz import UTC
from uber.config import c
from uber.models import Attendee, Session
from uber.site_sections import summary
@pytest.fixture
def birthdays():
dates = [
date(1964, 12, 30),
date(1964, 12, 31),
date(1964, 1, 1),
date(1964, 1, 2),
date(1964, 1, 9),
date(1964, 1, 10),
date(1964, 1, 11),
date(1964, 1, 12),
date(1964, 1, 30),
date(1964, 1, 31),
date(1964, 2, 1),
date(1964, 2, 2),
date(1964, 2, 27),
date(1964, 2, 28),
date(1964, 2, 29),
date(1964, 3, 1),
date(1964, 3, 2)]
attendees = []
for d in dates:
attendees.append(Attendee(
placeholder=True,
first_name='Born on',
last_name=d.strftime('%B %-d, %Y'),
ribbon=c.VOLUNTEER_RIBBON,
staffing=True,
birthdate=d))
ids = []
with Session() as session:
session.bulk_insert(attendees)
ids = [a.id for a in attendees]
yield ids
with Session() as session:
session.query(Attendee).filter(Attendee.id.in_(ids)).delete(
synchronize_session=False)
class TestBirthdayCalendar(object):
@pytest.mark.parametrize('year', [None, 2027, 2028])
def test_attendee_birthday_calendar(
self,
admin_attendee,
year,
birthdays,
monkeypatch):
if year:
assert str(year)
response = summary.Root().attendee_birthday_calendar(year=year)
else:
assert str(datetime.now(UTC).year)
response = summary.Root().attendee_birthday_calendar()
if isinstance(response, bytes):
response = response.decode('utf-8')
lines = response.strip().split('\n')
assert len(lines) == (17 + 1) # Extra line for the header
@pytest.mark.parametrize('epoch,eschaton,expected', [
(datetime(2018, 1, 10), datetime(2018, 1, 11), 2), # Normal dates
(datetime(2017, 12, 31), datetime(2018, 1, 1), 2), # Crossing the year
(datetime(2018, 1, 31), datetime(2018, 2, 1), 2), # Crossing the month
(datetime(2018, 2, 28), datetime(2018, 3, 1), 3), # Leap day
(datetime(2018, 1, 1), datetime(2018, 3, 4), 15), # Multi-month
(datetime(2017, 12, 28), datetime(2018, 3, 4), 17), # Everybody
])
def test_event_birthday_calendar(
self,
admin_attendee,
epoch,
eschaton,
expected,
birthdays,
monkeypatch):
monkeypatch.setattr(c, 'EPOCH', epoch)
monkeypatch.setattr(c, 'ESCHATON', eschaton)
response = summary.Root().event_birthday_calendar()
if isinstance(response, bytes):
response = response.decode('utf-8')
lines = response.strip().split('\n')
assert len(lines) == (expected + 1) # Extra line for the header
def test_event_birthday_calendar_correct_birthday_years(
self,
admin_attendee,
birthdays,
monkeypatch):
monkeypatch.setattr(c, 'EPOCH', datetime(2017, 12, 31))
monkeypatch.setattr(c, 'ESCHATON', datetime(2018, 1, 1))
response = summary.Root().event_birthday_calendar()
if isinstance(response, bytes):
response = response.decode('utf-8')
assert '"Born on December 31, 1964\'s Birthday",2017-12-31' in response
assert '"Born on January 1, 1964\'s Birthday",2018-01-01' in response
lines = response.strip().split('\n')
assert len(lines) == (2 + 1) # Extra line for the header
| magfest/ubersystem | tests/uber/site_sections/test_summary.py | Python | agpl-3.0 | 3,732 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('structure', '0039_remove_permission_groups'),
]
operations = [
migrations.AlterField(
model_name='customerpermission',
name='is_active',
field=models.NullBooleanField(default=True, db_index=True),
),
migrations.AlterField(
model_name='projectpermission',
name='is_active',
field=models.NullBooleanField(default=True, db_index=True),
),
]
| opennode/nodeconductor | waldur_core/structure/migrations/0040_make_is_active_nullable.py | Python | mit | 634 | 0 |
import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
import scipy.signal
import shutil
import display_pyutils
def apply_averaging_filter(x, filter_size=5):
return np.convolve(x, np.ones(filter_size,) / float(filter_size), mode='valid')
def apply_median_filter(x, filter_size=5):
return scipy.signal.medfilt(x, filter_size)
def postprocess_signal(anomaly_ratings):
signal = anomaly_ratings / (1 - anomaly_ratings)
bottom_ninetyfive_percent = sorted(signal)[:int(np.floor(len(signal) * 0.95))]
smoothed_signal = apply_averaging_filter(signal, 100)
threshold = (np.median(signal) + 2 * np.std(bottom_ninetyfive_percent))).astype(float) * signal
return smoothed_signal, threshold
def save_anomaly_plot(signal, pars):
plt.figure(1); plt.clf()
plot_anomaly_ratings(signal)
title = 'video: {}\nlambda: {}\nmax_buffer_size:{}'.format(
os.path.basename(pars.paths.files.infile_features), pars.algorithm.discriminability.lambd,
pars.algorithm.discriminability.max_buffer_size)
plt.title(title)
print('Saving figure to {}.png in workspace'.format(plt.gcf().number))
display_pyutils.save_fig_to_workspace()
def plot_anomaly_ratings(signal):
plt.fill_between(range(len(signal)), signal, facecolor=display_pyutils.GOOD_COLOR_CYCLE[0],
alpha=1.0) # alpha=0.5
signal_sorted = np.sort(signal)
bottom_ninetyfive_percent = signal_sorted[:int(np.floor(len(signal_sorted) * 0.95))]
y_max = np.median(bottom_ninetyfive_percent) + 3*np.std(bottom_ninetyfive_percent)
plt.ylim([0, y_max])
# Given :
# - a set of anomaly ratings (continuous plus threshold or binary -- start with binary)
# - path to frames of a video
# - path to destination frames
# Output :
# - populate path to destination frames w/ video that highlights the anomaly frames (in red) /
# slows them down and speeds up non-anomalies.
def create_output_frames(anomaly_rating_binary_per_frame, input_frames, output_dir,
normal_fps=30*4, anomalous_fps=15):
an_binary = anomaly_rating_binary_per_frame
input_frames
def main():
LOCAL_SED_VIDEO_DIR = '/home/allie/projects/aladdin/videos/'
results_dirs = glob.glob('/home/allie/workspace/server_sync/2017_09_14/*')
for results_dir in results_dirs:
pars = pickle.load(open(os.path.join(results_dir, 'pars.pickle'), 'rb'))
an = np.load(results_dir + '/anomaly_ratings.npy')
signal, threshold = postprocess_signal(an)
save_anomaly_plot(signal, pars)
videoname = pars.paths.files.infile_features
anomalous_frames = sorted(glob.glob('/home/allie/projects/aladdin/videos/{}'
'frames'.format(videoname)))
input_frames =
create_output_frames(signal > threshold, input_frames, output_dir)
# 'image-%06d' % frame_num + '.png')
| alliedel/anomalyframework_python | anomalyframework/results.py | Python | mit | 2,930 | 0.024232 |
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import errno
import json
import os
import time
import urllib
from tempest.common import glance_http
from tempest.common import rest_client
from tempest.common.utils import misc as misc_utils
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
CONF = config.CONF
LOG = logging.getLogger(__name__)
class ImageClientJSON(rest_client.RestClient):
def __init__(self, auth_provider):
super(ImageClientJSON, self).__init__(
auth_provider,
CONF.image.catalog_type,
CONF.image.region or CONF.identity.region,
endpoint_type=CONF.image.endpoint_type)
self._http = None
def _image_meta_from_headers(self, headers):
meta = {'properties': {}}
for key, value in headers.iteritems():
if key.startswith('x-image-meta-property-'):
_key = key[22:]
meta['properties'][_key] = value
elif key.startswith('x-image-meta-'):
_key = key[13:]
meta[_key] = value
for key in ['is_public', 'protected', 'deleted']:
if key in meta:
meta[key] = meta[key].strip().lower() in ('t', 'true', 'yes',
'1')
for key in ['size', 'min_ram', 'min_disk']:
if key in meta:
try:
meta[key] = int(meta[key])
except ValueError:
pass
return meta
def _image_meta_to_headers(self, fields):
headers = {}
fields_copy = copy.deepcopy(fields)
copy_from = fields_copy.pop('copy_from', None)
if copy_from is not None:
headers['x-glance-api-copy-from'] = copy_from
for key, value in fields_copy.pop('properties', {}).iteritems():
headers['x-image-meta-property-%s' % key] = str(value)
for key, value in fields_copy.pop('api', {}).iteritems():
headers['x-glance-api-property-%s' % key] = str(value)
for key, value in fields_copy.iteritems():
headers['x-image-meta-%s' % key] = str(value)
return headers
def _get_file_size(self, obj):
"""Analyze file-like object and attempt to determine its size.
:param obj: file-like object, typically redirected from stdin.
:retval The file's size or None if it cannot be determined.
"""
# For large images, we need to supply the size of the
# image file. See LP Bugs #827660 and #845788.
if hasattr(obj, 'seek') and hasattr(obj, 'tell'):
try:
obj.seek(0, os.SEEK_END)
obj_size = obj.tell()
obj.seek(0)
return obj_size
except IOError as e:
if e.errno == errno.ESPIPE:
# Illegal seek. This means the user is trying
# to pipe image data to the client, e.g.
# echo testdata | bin/glance add blah..., or
# that stdin is empty, or that a file-like
# object which doesn't support 'seek/tell' has
# been supplied.
return None
else:
raise
else:
# Cannot determine size of input image
return None
def _get_http(self):
dscv = CONF.identity.disable_ssl_certificate_validation
ca_certs = CONF.identity.ca_certificates_file
return glance_http.HTTPClient(auth_provider=self.auth_provider,
filters=self.filters,
insecure=dscv, ca_certs=ca_certs)
def _create_with_data(self, headers, data):
resp, body_iter = self.http.raw_request('POST', '/v1/images',
headers=headers, body=data)
self._error_checker('POST', '/v1/images', headers, data, resp,
body_iter)
body = json.loads(''.join([c for c in body_iter]))
return resp, body['image']
def _update_with_data(self, image_id, headers, data):
url = '/v1/images/%s' % image_id
resp, body_iter = self.http.raw_request('PUT', url, headers=headers,
body=data)
self._error_checker('PUT', url, headers, data,
resp, body_iter)
body = json.loads(''.join([c for c in body_iter]))
return resp, body['image']
@property
def http(self):
if self._http is None:
if CONF.service_available.glance:
self._http = self._get_http()
return self._http
def create_image(self, name, container_format, disk_format, **kwargs):
params = {
"name": name,
"container_format": container_format,
"disk_format": disk_format,
}
headers = {}
for option in ['is_public', 'location', 'properties',
'copy_from', 'min_ram']:
if option in kwargs:
params[option] = kwargs.get(option)
headers.update(self._image_meta_to_headers(params))
if 'data' in kwargs:
return self._create_with_data(headers, kwargs.get('data'))
resp, body = self.post('v1/images', None, headers)
self.expected_success(201, resp.status)
body = json.loads(body)
return resp, body['image']
def update_image(self, image_id, name=None, container_format=None,
data=None, properties=None):
params = {}
headers = {}
if name is not None:
params['name'] = name
if container_format is not None:
params['container_format'] = container_format
if properties is not None:
params['properties'] = properties
headers.update(self._image_meta_to_headers(params))
if data is not None:
return self._update_with_data(image_id, headers, data)
url = 'v1/images/%s' % image_id
resp, body = self.put(url, data, headers)
self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['image']
def delete_image(self, image_id):
url = 'v1/images/%s' % image_id
resp, body = self.delete(url)
self.expected_success(200, resp.status)
return resp, body
def image_list(self, **kwargs):
url = 'v1/images'
if len(kwargs) > 0:
url += '?%s' % urllib.urlencode(kwargs)
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['images']
def image_list_detail(self, properties=dict(), changes_since=None,
**kwargs):
url = 'v1/images/detail'
params = {}
for key, value in properties.items():
params['property-%s' % key] = value
kwargs.update(params)
if changes_since is not None:
kwargs['changes-since'] = changes_since
if len(kwargs) > 0:
url += '?%s' % urllib.urlencode(kwargs)
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['images']
def get_image_meta(self, image_id):
url = 'v1/images/%s' % image_id
resp, __ = self.head(url)
self.expected_success(200, resp.status)
body = self._image_meta_from_headers(resp)
return resp, body
def get_image(self, image_id):
url = 'v1/images/%s' % image_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
return resp, body
def is_resource_deleted(self, id):
try:
self.get_image_meta(id)
except exceptions.NotFound:
return True
return False
@property
def resource_type(self):
"""Returns the primary type of resource this client works with."""
return 'image_meta'
def get_image_membership(self, image_id):
url = 'v1/images/%s/members' % image_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body
def get_shared_images(self, member_id):
url = 'v1/shared-images/%s' % member_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body
def add_member(self, member_id, image_id, can_share=False):
url = 'v1/images/%s/members/%s' % (image_id, member_id)
body = None
if can_share:
body = json.dumps({'member': {'can_share': True}})
resp, __ = self.put(url, body)
self.expected_success(204, resp.status)
return resp
def delete_member(self, member_id, image_id):
url = 'v1/images/%s/members/%s' % (image_id, member_id)
resp, __ = self.delete(url)
self.expected_success(204, resp.status)
return resp
# NOTE(afazekas): just for the wait function
def _get_image_status(self, image_id):
resp, meta = self.get_image_meta(image_id)
status = meta['status']
return status
# NOTE(afazkas): Wait reinvented again. It is not in the correct layer
def wait_for_image_status(self, image_id, status):
"""Waits for a Image to reach a given status."""
start_time = time.time()
old_value = value = self._get_image_status(image_id)
while True:
dtime = time.time() - start_time
time.sleep(self.build_interval)
if value != old_value:
LOG.info('Value transition from "%s" to "%s"'
'in %d second(s).', old_value,
value, dtime)
if value == status:
return value
if value == 'killed':
raise exceptions.ImageKilledException(image_id=image_id,
status=status)
if dtime > self.build_timeout:
message = ('Time Limit Exceeded! (%ds)'
'while waiting for %s, '
'but we got %s.' %
(self.build_timeout, status, value))
caller = misc_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
raise exceptions.TimeoutException(message)
time.sleep(self.build_interval)
old_value = value
value = self._get_image_status(image_id)
| afaheem88/tempest_neutron | tempest/services/image/v1/json/image_client.py | Python | apache-2.0 | 11,416 | 0 |
"""
Functional Data Analysis Routines
"""
from __future__ import division
import numpy as np
def _curve_area(A, B):
r1 = np.mean(A-B)
r2 = np.mean(B-A)
if r1 > r2:
return r1
else:
return r2
def curve_test(Y, cnd_1, cnd_2, n_perm=1000):
"""
Assess whether two curves are statistically significant based on
permutation test over conditions and replicates.
Parameters
----------
Y: 2d array, shape: (time x var)
Observations matrix for each variable over time.
cnd_1: list, shape: (n_reps_1)
List of replicate indices in columns of Y for condition 1
cnd_2: list, shape: (n_reps_2)
List of replicate indices in columns of Y for condition 2
n_perm: int
Number of permutations to group
Returns
-------
p: int
Two-tailed p-value
"""
n_reps_1 = len(cnd_1)
n_reps_2 = len(cnd_2)
n_reps = Y.shape[1]
assert n_reps == (n_reps_1 + n_reps_2)
# Get true area between condition curves
Y_1 = np.mean(Y[:, cnd_1], axis=1)
Y_2 = np.mean(Y[:, cnd_2], axis=1)
true_area = _curve_area(Y_1, Y_2)
# Estimate null distribution of area between curves
p_count = 0
for pr in xrange(n_perm):
rnd_reps = np.random.permutation(n_reps)
rnd_cnd_1 = rnd_reps[:n_reps_1]
rnd_cnd_2 = rnd_reps[n_reps_1:]
rnd_Y_1 = np.mean(Y[:, rnd_cnd_1], axis=1)
rnd_Y_2 = np.mean(Y[:, rnd_cnd_2], axis=1)
rnd_area = _curve_area(rnd_Y_1, rnd_Y_2)
if rnd_area > true_area:
p_count += 1
p = p_count / n_perm
return p
| akhambhati/Echobase | Echobase/Statistics/FDA/fda.py | Python | gpl-3.0 | 1,634 | 0 |
""" pyvalence
"""
__version__ = '0.0.1.3'
| blakeboswell/valence | pyvalence/__init__.py | Python | bsd-3-clause | 44 | 0 |
#!/usr/bin/python
def conversionMap():
""" returns conversionmap """
return {
'a': [1, -2, -1],
'b': [1, 2, 1],
'c': [1, 2, -1],
'd': [1, -2, 1],
'e': [-1, 1, 1],
'f': [1, -2, 2],
'g': [-2, 1, 2],
'h': [-2, -1, 2],
'i': [-1, -1, 1],
'j': [2, 1, 2],
'k': [2, -1, 2],
'l': [-1, 1, 2],
'm': [-1, 2, 1],
'n': [-1, -2, 1],
'o': [-1, -1, 2],
'p': [-2, -1, -2],
'q': [-2, 2, -1],
'r': [-2, 1, -2],
's': [-2, -1, 1],
't': [-2, 2, 1],
'u': [2, 1, -2],
'v': [-1, -2, -1],
'w': [-1, -2, 2],
'x': [2, -1, 1],
'y': [2, -1, -2],
'z': [-2, 1, 1],
'char_empty': [0, 2, -2, 0],
'char_eol': [0, 2, -2, 2, -2, 0],
}
def convertCharacter(c, m):
""" c = character to convert, m = conversionMap """
return m[c]
def convertCharacters(s, m):
""" s = string, m = conversionMap """
o = []
e = []
for c in s:
if c == ' ':
c = 'char_empty'
elif c == '.':
c = 'char_eol'
if c in m:
o += m[c]
else:
e.append(c)
if len(e) > 0:
return {'e': True, 'l': e}
else:
return {'e': False, 'l': o}
def addBaseLines(a):
""" a = array to add baselines to """
o = []
p = None
for c in a:
if p is not None:
if ((p - 1) == int(c) or (p) == int(c) or (p + 1) == int(c)) and (p != 0) and (c != 0):
o.append(0)
p = int(c)
o.append(int(c))
return o
def main(prefix = False, string = '', suffix = False):
print 'Input:'
print 'var_prefix: ' + str(prefix)
print 'var_string: ' + str(string)
print 'var_suffix: ' + str(suffix)
if string == '':
print 'No string entered.'
print 'Application will now exit.'
return
o = []
if prefix:
string = ' ' + string
if suffix:
string = string + '.'
o = convertCharacters(string, conversionMap())
print ''
print 'Output:'
if o['e']:
print 'The string could not be parsed because of the following characters:'
print o['l']
else:
o = addBaseLines(o['l'])
print o
return
if __name__ == "__main__":
cin = raw_input("Enter string: ")
main(
prefix = False,
string = cin,
suffix = False,
)
| theysconator/Scribbler | scribbler.py | Python | lgpl-3.0 | 2,173 | 0.02485 |
from tgbot import plugintest
from plugin_examples.guess import GuessPlugin
class GuessPluginTest(plugintest.PluginTestCase):
def setUp(self):
self.plugin = GuessPlugin()
self.bot = self.fake_bot('', plugins=[self.plugin])
def test_play(self):
self.receive_message('/guess_start')
self.assertReplied("I'm going to think of a number between 0 and 9 and you have to guess it! What's your guess?")
number = self.plugin.read_data(1)
self.assertIsNotNone(number)
self.assertGreaterEqual(number, 0)
self.assertLessEqual(number, 9)
# force number for testing
self.plugin.save_data(1, obj=5)
self.receive_message('1')
self.assertReplied("I'm thinking higher...")
self.receive_message('6')
self.assertReplied("I'm thinking lower...")
self.receive_message('gief error')
self.assertReplied('Invalid guess!')
self.receive_message('5')
self.assertReplied('Congratz, you nailed it John')
def test_stop(self):
self.receive_message('/guess_start')
self.assertReplied("I'm going to think of a number between 0 and 9 and you have to guess it! What's your guess?")
self.assertIsNotNone(self.plugin.read_data(1))
self.receive_message('/guess_stop')
self.assertReplied('Ok :(')
self.assertIsNone(self.plugin.read_data(1))
def test_stop_on_group(self):
chat = {
'id': -1,
'type': 'group',
'title': 'Test'
}
self.receive_message('/guess_start', chat=chat)
self.assertReplied("I'm going to think of a number between 0 and 9 and you have to guess it! What's your guess?")
self.assertIsNotNone(self.plugin.read_data(-1))
self.receive_message('/guess_stop', chat=chat)
self.assertReplied('Ok :(')
self.assertIsNone(self.plugin.read_data(-1))
| fopina/tgbotplug | tests/examples/test_guess.py | Python | mit | 1,935 | 0.00155 |
"""
Compatibility module.
This module contains duplicated code from Python itself or 3rd party
extensions, which may be included for the following reasons:
* compatibility
* we may only need a small subset of the copied library/module
"""
import _inspect
import py3k
from _inspect import getargspec, formatargspec
from py3k import *
__all__ = []
__all__.extend(_inspect.__all__)
__all__.extend(py3k.__all__)
| beiko-lab/gengis | bin/Lib/site-packages/numpy/compat/__init__.py | Python | gpl-3.0 | 434 | 0 |
# -*- coding: utf-8 -*-
import hook
import bnetprotocol
from misc import *
from config import config
#settings = config[__name__.split('.')[-1]]
def message_received(bn, d):
if d.event == bnetprotocol.EID_TALK:
msg_list = str(d.message).split(' ', 1)
try:
command, payload = msg_list
except ValueError:
command = msg_list[0]
payload = ''
if command == '.join' and len(payload) > 0:
'''if str(d.message).split(' ')[0] == settings['trigger'] + 'join':'''
bn.send_packet(bnetprotocol.SEND_SID_CHATCOMMAND('/join %s' % (payload)))
def install():
hook.register('after-handle_sid_chatevent', message_received)
def uninstall():
hook.unregister('after-handle_sid_chatevent', message_received)
| w3gh/ghost.py | plugins/join.py | Python | mit | 716 | 0.023743 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
from preggy import expect
from tornado.testing import gen_test
from tests.base import TestCase
from thumbor.config import Config
from thumbor.importer import Importer
class BaseMaxAgeFilterTestCase(TestCase):
def get_fixture_path(self, name):
return "./tests/fixtures/%s" % name
def get_config(self):
return Config.load(self.get_fixture_path("max_age_conf.py"))
def get_importer(self):
importer = Importer(self.config)
importer.import_modules()
return importer
class MaxAgeFilterTestCase(BaseMaxAgeFilterTestCase):
@gen_test
async def test_max_age_filter_with_regular_image(self):
response = await self.async_fetch("/unsafe/smart/image.jpg", method="GET")
expect(response.code).to_equal(200)
expect(response.headers["Cache-Control"]).to_equal("max-age=2,public")
expect(response.headers).to_include("Expires")
@gen_test
async def test_max_age_url(self):
response = await self.async_fetch(
"/unsafe/filters:max_age(30)/image.jpg", method="GET"
)
expect(response.code).to_equal(200)
expect(response.headers["Cache-Control"]).to_equal("max-age=30,public")
expect(response.headers).to_include("Expires")
class MaxAgeDetectorFilterTestCase(BaseMaxAgeFilterTestCase):
def get_config(self):
config = super(MaxAgeDetectorFilterTestCase, self).get_config()
config.DETECTORS = ["tests.fixtures.prevent_result_storage_detector"]
return config
@gen_test
async def test_max_age_filter_with_non_storaged_image(self):
response = await self.async_fetch("/unsafe/smart/image.jpg", method="GET")
expect(response.code).to_equal(200)
expect(response.headers["Cache-Control"]).to_equal("max-age=1,public")
expect(response.headers).to_include("Expires")
class MaxAgeErrorDectectorFilterTestCase(BaseMaxAgeFilterTestCase):
def get_config(self):
config = super(MaxAgeErrorDectectorFilterTestCase, self).get_config()
config.DETECTORS = ["tests.fixtures.detection_error_detector"]
return config
@gen_test
async def test_with_detection_error_image(self):
response = await self.async_fetch("/unsafe/smart/image.jpg", method="GET")
expect(response.code).to_equal(200)
expect(response.headers["Cache-Control"]).to_equal("max-age=1,public")
expect(response.headers).to_include("Expires")
| gi11es/thumbor | tests/filters/test_max_age.py | Python | mit | 2,711 | 0.001107 |
# -*- coding: utf-8 -*-
#
# Copyright 2014-2021 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Predicates structure for the BigML local AnomalyTree
This module defines an auxiliary Predicates structure that is used in the
AnomalyTree to save the node's predicates info.
"""
from bigml.predicate import Predicate
class Predicates():
"""A list of predicates to be evaluated in an anomaly tree's node.
"""
def __init__(self, predicates_list):
self.predicates = []
for predicate in predicates_list:
if predicate is True:
self.predicates.append(True)
else:
self.predicates.append(
Predicate(predicate.get('op'),
predicate.get('field'),
predicate.get('value'),
predicate.get('term')))
def to_rule(self, fields, label='name'):
""" Builds rule string from a predicates list
"""
return " and ".join([predicate.to_rule(fields, label=label) for
predicate in self.predicates
if not isinstance(predicate, bool)])
def apply(self, input_data, fields):
""" Applies the operators defined in each of the predicates to
the provided input data
"""
return all([predicate.apply(input_data, fields) for
predicate in self.predicates
if isinstance(predicate, Predicate)])
| jaor/python | bigml/predicates.py | Python | apache-2.0 | 2,025 | 0.000494 |
from functools import reduce
from operator import or_
from django.db.models import Q
from django.conf import settings
from django.contrib.auth.models import User
from django.http import JsonResponse
from cities_light.models import City, Country, Region
from dal import autocomplete
from pytz import country_timezones
from mozillians.common.templatetags.helpers import get_object_or_none
from mozillians.groups.models import GroupMembership
from mozillians.phonebook.forms import get_timezones_list
from mozillians.users.models import IdpProfile, UserProfile
class BaseProfileAdminAutocomplete(autocomplete.Select2QuerySetView):
"""Base class for django-autocomplete-light."""
def get_queryset(self):
"""Base queryset used only in admin.
Return all the users who have completed their profile registration.
"""
if not self.request.user.is_staff:
return UserProfile.objects.none()
qs = UserProfile.objects.complete()
self.q_base_filter = (Q(full_name__icontains=self.q)
| Q(user__email__icontains=self.q)
| Q(user__username__icontains=self.q))
if self.q:
qs = qs.filter(self.q_base_filter)
return qs
class UsersAdminAutocomplete(autocomplete.Select2QuerySetView):
"""Base class for django-autocomplete-light."""
def get_queryset(self):
"""Base queryset used only in admin.
Return all the users who have completed their profile registration.
"""
if not self.request.user.is_staff:
return User.objects.none()
qs = User.objects.all()
self.q_base_filter = (Q(userprofile__full_name__icontains=self.q)
| Q(email__icontains=self.q)
| Q(username__icontains=self.q))
if self.q:
qs = qs.filter(self.q_base_filter)
return qs
class VoucherAutocomplete(BaseProfileAdminAutocomplete):
def get_queryset(self):
"""Augment base queryset by returning only users who can vouch."""
qs = super(VoucherAutocomplete, self).get_queryset().filter(can_vouch=True)
if self.q:
qs = qs.filter(self.q_base_filter)
return qs
class VouchedAutocomplete(BaseProfileAdminAutocomplete):
def get_queryset(self):
"""Augment base queryset by returning only vouched users."""
qs = super(VouchedAutocomplete, self).get_queryset().vouched()
if self.q:
qs = qs.filter(self.q_base_filter)
return qs
class CuratorsAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
"""Augment base queryset by returning only vouched users."""
# Allow only vouched users to perform this query.
if not self.request.user.userprofile.is_vouched:
return UserProfile.objects.none()
qs = UserProfile.objects.vouched()
if self.q:
qs = qs.filter(Q(full_name__icontains=self.q)
| Q(user__email__icontains=self.q)
| Q(user__username__icontains=self.q))
return qs
def get_autocomplete_location_query(qs, q):
"""Return qs if ``istartswith`` filter exists, else fallback to ``icontains``."""
startswith_qs = qs.filter(name__istartswith=q)
if startswith_qs.exists():
return startswith_qs
return qs.filter(name__icontains=q)
class StaffProfilesAutocomplete(autocomplete.Select2QuerySetView):
def get_results(self, context):
"""Modify the text in the results of the group invitation form."""
results = []
for result in context['object_list']:
pk = self.get_result_value(result)
if not pk:
continue
profile = UserProfile.objects.get(pk=pk)
idp = get_object_or_none(IdpProfile, profile=profile, primary=True)
text = self.get_result_label(result)
# Append the email used for login in the autocomplete text
if idp:
text += ' ({0})'.format(idp.email)
item = {
'id': pk,
'text': text
}
results.append(item)
return results
def get_queryset(self):
if not self.request.user.userprofile.is_vouched:
return UserProfile.objects.none()
queries = []
# Query staff profiles
for domain in settings.AUTO_VOUCH_DOMAINS:
pks = IdpProfile.objects.filter(
email__endswith='@' + domain).values_list('profile__pk', flat=True)
queries.append(Q(pk__in=pks))
query = reduce(or_, queries)
qs = UserProfile.objects.filter(query).distinct()
if self.q:
qs = qs.filter(Q(full_name__icontains=self.q)
| Q(user__email__icontains=self.q)
| Q(user__username__icontains=self.q))
return qs
class AccessGroupInvitationAutocomplete(StaffProfilesAutocomplete):
def get_queryset(self):
staff_qs = super(AccessGroupInvitationAutocomplete, self).get_queryset()
staff_ids = staff_qs.values_list('pk', flat=True)
# Query NDA memberships
nda_members_ids = (
GroupMembership.objects.filter(Q(group__name=settings.NDA_GROUP)
| Q(group__name=settings.NDA_STAFF_GROUP))
.filter(status=GroupMembership.MEMBER).distinct()
.values_list('userprofile__pk', flat=True)
)
query = Q(pk__in=staff_ids) | Q(pk__in=nda_members_ids)
qs = UserProfile.objects.filter(query).distinct()
if self.q:
qs = qs.filter(Q(full_name__icontains=self.q)
| Q(user__email__icontains=self.q)
| Q(user__username__icontains=self.q))
return qs
class NDAGroupInvitationAutocomplete(StaffProfilesAutocomplete):
def get_queryset(self):
staff_qs = super(NDAGroupInvitationAutocomplete, self).get_queryset()
staff_ids = staff_qs.values_list('pk', flat=True)
mfa_idps_query = (IdpProfile.objects.filter(primary=True)
.filter(Q(type=IdpProfile.PROVIDER_GITHUB)
| Q(type=IdpProfile.PROVIDER_FIREFOX_ACCOUNTS)
| Q(type=IdpProfile.PROVIDER_GOOGLE)
| Q(type=IdpProfile.PROVIDER_LDAP)))
mfa_idps_pks = mfa_idps_query.values_list('profile__id', flat=True)
qs = UserProfile.objects.filter(Q(pk__in=mfa_idps_pks) | Q(pk__in=staff_ids))
if self.q:
qs = qs.filter(Q(full_name__icontains=self.q)
| Q(user__email__icontains=self.q)
| Q(user__username__icontains=self.q))
return qs
class CountryAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
"""Country queryset from cities_light."""
if not self.request.user.is_authenticated():
return Country.objects.none()
qs = Country.objects.all()
if self.q:
return get_autocomplete_location_query(qs, self.q)
return qs
class RegionAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
"""Region queryset from cities_light."""
country_id = self.forwarded.get('country')
if not self.request.user.is_authenticated():
return Region.objects.none()
qs = Region.objects.all()
if country_id:
country = Country.objects.get(id=country_id)
qs = qs.filter(country=country)
if self.q:
return get_autocomplete_location_query(qs, self.q)
return qs
class CityAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
"""City queryset from cities_light."""
region_id = self.forwarded.get('region')
country_id = self.forwarded.get('country')
if not self.request.user.is_authenticated():
return City.objects.none()
qs = City.objects.all()
if country_id:
country = Country.objects.get(id=country_id)
qs = qs.filter(country=country)
if region_id:
region = Region.objects.get(id=region_id)
qs = qs.filter(region=region, country=region.country)
if self.q:
return get_autocomplete_location_query(qs, self.q)
return qs
class TimeZoneAutocomplete(autocomplete.Select2ListView):
def get_list(self):
"""Timezone list provided from pytz."""
if not self.request.user.is_authenticated():
return []
return get_timezones_list()
def get(self, request, *args, **kwargs):
"""Override get method to tune the search."""
results = self.get_list()
country_id = self.forwarded.get('country')
region_id = self.forwarded.get('region')
city_id = self.forwarded.get('city')
country_code = None
# Try to get the timezone from the city, region, country
# forwarded values
if city_id:
city = City.objects.get(id=city_id)
country_code = city.country.code2
elif region_id:
region = Region.objects.get(id=region_id)
country_code = region.country.code2
elif country_id:
country = Country.objects.get(id=country_id)
country_code = country.code2
if country_code:
results = country_timezones(country_code)
if self.q:
results = [item for item in results if self.q.lower() in item.lower()]
return JsonResponse({
'results': [dict(id=x, text=x) for x in results]
})
| akatsoulas/mozillians | mozillians/users/views.py | Python | bsd-3-clause | 9,979 | 0.001203 |
# Daniel Fernandez Rodriguez <danielfr@cern.ch>
from argparse import ArgumentParser
from collections import defaultdict
from requests_kerberos import HTTPKerberosAuth
import json
import requests
import subprocess
import logging
import sys
class PuppetDBNodes(object):
def __init__(self, args):
for k, v in args.items():
setattr(self, k, v)
def negociate_krb_ticket(self, keytab_path, username):
kinit = '/usr/bin/kinit'
kinit_args = [kinit, '-kt', keytab_path, username]
kinit = subprocess.Popen(kinit_args)
kinit.wait()
def destroy_krb_ticket(self):
subprocess.call(["kdestroy"])
def get_facts_puppetdb(self, apiurl, facts, hostgroup):
url ='%s/facts' % apiurl
query_base = '["and",["or",%s],["in", "certname", ["extract", "certname", ["select-facts", ["and", ["=", "name", "hostgroup"], ["~", "value", "%s"]]]]]]'
query_facts = ','.join(['["=","name","%s"]' % fact for fact in facts])
query = query_base % (query_facts, hostgroup)
headers = {'Content-Type': 'application/json','Accept': 'application/json, version=2'}
payload = {'query': query}
logging.info("Getting facts from '%s', query: '%s'" % (url, query))
r = requests.get(url, params=payload, headers=headers, auth=HTTPKerberosAuth())
if r.status_code == requests.codes.ok:
logging.info("Request code: '%s'" % r.status_code)
return json.loads(r.text)
else:
logging.error("The request failed with code '%s'" % r.status_code)
return None
def print_puppetdb_nodes(self, apiurl, hostgroup, factlist):
'''
Queries PuppetDB and prints out the nodes information in a supported format for Rundeck
.
'''
factlist.extend(["operatingsystem", "operatingsystemrelease", "hostgroup"])
raw_data = self.get_facts_puppetdb(apiurl, factlist, hostgroup)
data = defaultdict(lambda: {})
if raw_data != None:
for entry in raw_data:
data[entry['certname']] = dict(data[entry['certname']].items() + [(entry['name'], entry['value'])])
logging.info("Printing node list using standard output...")
for node in data.keys():
print ('%s:'%node)
print (" "*4 + "hostname: " + node)
print (" "*4 + "username: root")
for fact in factlist:
if data[node].has_key(fact):
print (" "*4 + fact + ": " + data[node][fact] )
logging.info("Node list printed successfully")
else:
logging.error("Fact list empty. Check PuppetDB connection params")
def store_puppetdb_nodes(self, apiurl, hostgroup, factlist, filename):
'''
Instead of querying PuppetDB every time, saves the list of nodes on a local file
so Rundeck can access it localy.
'''
factlist.extend(["operatingsystem", "operatingsystemrelease", "hostgroup"])
raw_data = self.get_facts_puppetdb(apiurl, factlist, hostgroup)
data = defaultdict(lambda: {})
if raw_data != None:
for entry in raw_data:
data[entry['certname']] = dict(data[entry['certname']].items() + [(entry['name'], entry['value'])])
logging.info("Saving node list in '%s'..." % filename)
with open(filename, 'w') as file:
for node in data.keys():
file.write('%s:\n'%node)
file.write(" "*4 + "hostname: " + node + '\n')
file.write(" "*4 + "username: root" + '\n')
for fact in factlist:
if data[node].has_key(fact):
file.write(" "*4 + fact + ": " + data[node][fact] + '\n')
logging.info("Node list saved successfully")
else:
logging.error("Fact list empty. Check PuppetDB connection params")
def run(self):
self.negociate_krb_ticket(self.keytab, self.username)
if self.store:
self.store_puppetdb_nodes(self.apiurl, self.hostgroup, self.factlist, self.file)
else:
self.print_puppetdb_nodes(self.apiurl, self.hostgroup, self.factlist)
def main():
parser = ArgumentParser(description="Populate Rundeck list of nodes from PuppetDB")
parser.add_argument("-v", "--verbose", help="increase output verbosity", action="store_true")
parser.add_argument("-d", "--debug", help="increase output to debug messages", action="store_true")
parser.add_argument("--apiurl", help="PuppetDB API url (https://<SERVER>:<PORT>/<API VERSION>)", required=True)
parser.add_argument("--hostgroup", help="Foreman hostgroup", required=True)
parser.add_argument("--keytab", help="Keytab", required=True)
parser.add_argument("--username", help="Username to connect to PuppetDB", required=True)
parser.add_argument("--factlist", nargs='*', default=[], help="List of facts to retrieve for every node")
parser.add_argument("--file", default="/tmp/nodes.yaml", help="File path where the node list info will be stored")
behaviour = parser.add_mutually_exclusive_group()
behaviour.add_argument('--store', action='store_true')
behaviour.add_argument('--print', action='store_false')
args = parser.parse_args()
#trick to get the factlist as an object list when called it from Rundeck
if len(args.factlist) == 1:
args.factlist = args.factlist[0].split()
if args.verbose:
logging.basicConfig(level=logging.INFO)
elif args.debug:
logging.basicConfig(level=logging.DEBUG)
plugin = PuppetDBNodes(args.__dict__)
plugin.run()
if __name__ == "__main__":
try:
main()
except Exception, e:
logging.error(e)
sys.exit(-1)
| ak0ska/rundeck-puppetdb-nodes | rundeck-puppetdb-nodes-plugin/contents/rundeck_puppetdb_nodes.py | Python | apache-2.0 | 5,881 | 0.006802 |
from django.contrib import admin
from core.models import Language
# Register your models here.
class LanguageAdmin(admin.ModelAdmin):
model = Language
fieldsets = [
('', {'fields': ['name', 'locale']})
]
list_display = ['name', 'locale']
search_fields = ['name', 'locale']
ordering = ('name',)
admin.site.register(Language, LanguageAdmin) | ekaradon/demihi | core/admin.py | Python | mit | 348 | 0.028736 |
from csacompendium.research.models import ExperimentUnit
from csacompendium.utils.pagination import APILimitOffsetPagination
from csacompendium.utils.permissions import IsOwnerOrReadOnly
from csacompendium.utils.viewsutils import DetailViewUpdateDelete, CreateAPIViewHook
from rest_framework.filters import DjangoFilterBackend
from rest_framework.generics import ListAPIView
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from .filters import ExperimentUnitListFilter
from csacompendium.research.api.experimentunit.experimentunitserializers import experiment_unit_serializers
def experiment_unit_views():
"""
Experiment unit views
:return: All experiment unit views
:rtype: Object
"""
experiment_unit_serializer = experiment_unit_serializers()
class ExperimentUnitCreateAPIView(CreateAPIViewHook):
"""
Creates a single record.
"""
queryset = ExperimentUnit.objects.all()
serializer_class = experiment_unit_serializer['ExperimentUnitDetailSerializer']
permission_classes = [IsAuthenticated]
class ExperimentUnitListAPIView(ListAPIView):
"""
API list view. Gets all records API.
"""
queryset = ExperimentUnit.objects.all()
serializer_class = experiment_unit_serializer['ExperimentUnitListSerializer']
filter_backends = (DjangoFilterBackend,)
filter_class = ExperimentUnitListFilter
pagination_class = APILimitOffsetPagination
class ExperimentUnitDetailAPIView(DetailViewUpdateDelete):
"""
Updates a record.
"""
queryset = ExperimentUnit.objects.all()
serializer_class = experiment_unit_serializer['ExperimentUnitDetailSerializer']
permission_classes = [IsAuthenticated, IsAdminUser]
lookup_field = 'slug'
return {
'ExperimentUnitListAPIView': ExperimentUnitListAPIView,
'ExperimentUnitDetailAPIView': ExperimentUnitDetailAPIView,
'ExperimentUnitCreateAPIView': ExperimentUnitCreateAPIView
}
| nkoech/csacompendium | csacompendium/research/api/experimentunit/experimentunitviews.py | Python | mit | 2,054 | 0.003408 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "game_server.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| cypreess/PyrateDice | game_server/game_server/manage.py | Python | mit | 254 | 0 |
#! /usr/bin/env python
from PyFoam.Applications.ChangePython import changePython
changePython("pvpython","PVSnapshot",options=["--mesa"])
| Unofficial-Extend-Project-Mirror/openfoam-extend-Breeder-other-scripting-PyFoam | bin/pyFoamPVSnapshotMesa.py | Python | gpl-2.0 | 140 | 0.014286 |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the 'License'); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bson
import mock
from st2common.models.db.rule_enforcement import RuleEnforcementDB
from st2common.persistence.rule_enforcement import RuleEnforcement
from st2common.transport.publishers import PoolPublisher
from st2common.exceptions.db import StackStormDBObjectNotFoundError
from st2tests import DbTestCase
SKIP_DELETE = False
@mock.patch.object(PoolPublisher, 'publish', mock.MagicMock())
class RuleEnforcementModelTest(DbTestCase):
def test_ruleenforcment_crud(self):
saved = RuleEnforcementModelTest._create_save_rule_enforcement()
retrieved = RuleEnforcement.get_by_id(saved.id)
self.assertEqual(saved.rule.ref, retrieved.rule.ref,
'Same rule enforcement was not returned.')
self.assertTrue(retrieved.enforced_at is not None)
# test update
RULE_ID = str(bson.ObjectId())
self.assertEqual(retrieved.rule.id, None)
retrieved.rule.id = RULE_ID
saved = RuleEnforcement.add_or_update(retrieved)
retrieved = RuleEnforcement.get_by_id(saved.id)
self.assertEqual(retrieved.rule.id, RULE_ID,
'Update to rule enforcement failed.')
# cleanup
RuleEnforcementModelTest._delete([retrieved])
try:
retrieved = RuleEnforcement.get_by_id(saved.id)
except StackStormDBObjectNotFoundError:
retrieved = None
self.assertIsNone(retrieved, 'managed to retrieve after delete.')
@staticmethod
def _create_save_rule_enforcement():
created = RuleEnforcementDB(trigger_instance_id=str(bson.ObjectId()),
rule={'ref': 'foo_pack.foo_rule',
'uid': 'rule:foo_pack:foo_rule'},
execution_id=str(bson.ObjectId()))
return RuleEnforcement.add_or_update(created)
@staticmethod
def _delete(model_objects):
global SKIP_DELETE
if SKIP_DELETE:
return
for model_object in model_objects:
model_object.delete()
| punalpatel/st2 | st2common/tests/unit/test_db_rule_enforcement.py | Python | apache-2.0 | 2,863 | 0 |
#!/usr/bin/python
# -*- Coding:utf-8 -*-
#
# Copyright (C) 2012 Red Hat, Inc. All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: Jan Safranek <jsafrane@redhat.com>
from test_base import StorageTestBase, short_tests_only
import unittest
import pywbem
MEGABYTE = 1024 * 1024
class TestCreateLV(StorageTestBase):
"""
Test CreateOrModifyLV method.
"""
VG_CLASS = "LMI_VGStoragePool"
STYLE_GPT = 3
PARTITION_CLASS = "LMI_GenericDiskPartition"
def setUp(self):
""" Find storage service. """
super(TestCreateLV, self).setUp()
self.service = self.wbemconnection.EnumerateInstanceNames(
"LMI_StorageConfigurationService")[0]
self.part_service = self.wbemconnection.EnumerateInstanceNames(
"LMI_DiskPartitionConfigurationService")[0]
vgname = self._create_vg()
self.vg = self.wbemconnection.GetInstance(vgname)
self.lvcaps_name = self.wbemconnection.AssociatorNames(vgname,
AssocClass="LMI_LVElementCapabilities")[0]
def tearDown(self):
self._destroy_vg(self.vg.path)
super(TestCreateLV, self).tearDown()
def _create_vg(self):
"""
Create a partition and Volume Group on it and return its
CIMInstanceName.
"""
(ret, outparams) = self.wbemconnection.InvokeMethod(
"CreateOrModifyVG",
self.service,
InExtents=self.partition_names[:1],
ElementName='tstName')
self.assertEqual(ret, 0)
return outparams['pool']
def _destroy_vg(self, vgname):
""" Destroy VG and its partition. """
self.wbemconnection.DeleteInstance(vgname)
def test_create_no_pool(self):
""" Test CreateOrModifyLV without InPool."""
self.assertRaises(pywbem.CIMError, self.wbemconnection.InvokeMethod,
"CreateOrModifyLV",
self.service,
Size=pywbem.Uint64(40 * MEGABYTE))
def test_create_no_size(self):
""" Test CreateOrModifyLV without Size."""
self.assertRaises(pywbem.CIMError, self.wbemconnection.InvokeMethod,
"CreateOrModifyLV",
self.service,
InPool=self.vg.path)
def test_create_wrong_size(self):
""" Test CreateOrModifyLV with wrong Size."""
self.assertRaises(pywbem.CIMError, self.wbemconnection.InvokeMethod,
"CreateOrModifyLV",
self.service,
InPool=self.vg.path,
Size=pywbem.Uint64(0))
# TODO: test this:
# self.assertRaises(pywbem.CIMError, self.wbemconnection.InvokeMethod,
# "CreateOrModifyLV",
# self.service,
# InPool=self.vg.path,
# Size=pywbem.Uint64(self.vg['TotalManagedSpace'] * 10))
def test_create_missing_goal(self):
""" Test CreateOrModifyLV with missing Goal."""
goal_name = pywbem.CIMInstanceName(
classname="LMI_LVStorageSetting",
keybindings={
"InstanceID": "LMI:LMI_LVStorageSetting:not-existing"
})
self.assertRaises(pywbem.CIMError, self.wbemconnection.InvokeMethod,
"CreateOrModifyLV",
self.service,
InPool=self.vg.path,
Size=pywbem.Uint64(40 * MEGABYTE),
Goal=goal_name)
def _create_setting(self):
"""
Create new LMI_LVStorageSetting with default values and return
its CIMInstance.
"""
(ret, outparams) = self.wbemconnection.InvokeMethod(
"CreateLVStorageSetting",
self.lvcaps_name)
self.assertEqual(ret, 0)
setting_name = outparams['setting']
setting = self.wbemconnection.GetInstance(setting_name)
return setting
def test_create_wrong_goal(self):
""" Test CreateOrModifyLV with wrong Goal."""
setting = self._create_setting()
setting['ExtentStripeLengthMin'] = pywbem.Uint16(100)
self.wbemconnection.ModifyInstance(setting)
self.assertRaises(pywbem.CIMError, self.wbemconnection.InvokeMethod,
"CreateOrModifyLV",
self.service,
InPool=self.vg.path,
Size=pywbem.Uint64(40 * MEGABYTE),
Goal=setting.path)
self.wbemconnection.DeleteInstance(setting.path)
def test_create_no_goal(self):
""" Test CreateOrModifyLV without any Goal."""
(retval, outparams) = self.wbemconnection.InvokeMethod(
"CreateOrModifyLV",
self.service,
InPool=self.vg.path,
Size=pywbem.Uint64(10 * self.vg['ExtentSize']))
self.assertEqual(retval, 0)
self.assertEqual(len(outparams), 2)
self.assertEqual(outparams['Size'], 10 * self.vg['ExtentSize'])
lv_name = outparams['theelement']
lv = self.wbemconnection.GetInstance(lv_name)
vg_setting = self.wbemconnection.Associators(self.vg.path,
AssocClass="LMI_VGElementSettingData")[0]
lv_setting = self.wbemconnection.Associators(lv_name,
AssocClass="LMI_LVElementSettingData")[0]
self.assertEqual(
lv['BlockSize'] * lv['NumberOfBlocks'],
10 * self.vg['ExtentSize'])
self.assertEqual(
lv['NoSinglePointOfFailure'],
lv_setting['NoSinglePointOfFailure'])
self.assertEqual(
lv['NoSinglePointOfFailure'],
vg_setting['NoSinglePointOfFailure'])
self.assertEqual(
lv['DataRedundancy'],
lv_setting['DataRedundancyGoal'])
self.assertEqual(
lv['DataRedundancy'],
vg_setting['DataRedundancyGoal'])
self.assertEqual(
lv['PackageRedundancy'],
lv_setting['PackageRedundancyGoal'])
self.assertEqual(
lv['PackageRedundancy'],
vg_setting['PackageRedundancyGoal'])
self.assertEqual(
lv['ExtentStripeLength'],
lv_setting['ExtentStripeLength'])
self.assertEqual(
lv['ExtentStripeLength'],
vg_setting['ExtentStripeLength'])
# check vg is reduced
new_vg = self.wbemconnection.GetInstance(self.vg.path)
self.assertEqual(
new_vg['RemainingExtents'],
self.vg['RemainingExtents'] - 10)
self.assertEqual(
new_vg['RemainingManagedSpace'],
self.vg['RemainingManagedSpace'] - 10 * self.vg['ExtentSize'])
self.wbemconnection.DeleteInstance(lv_name)
def test_create_goal_name(self):
""" Test CreateOrModifyLV with a Goal and elementname."""
goal = self._create_setting()
(retval, outparams) = self.wbemconnection.InvokeMethod(
"CreateOrModifyLV",
self.service,
InPool=self.vg.path,
Size=pywbem.Uint64(10 * self.vg['ExtentSize']),
Goal=goal.path,
ElementName="tstNAME")
self.assertEqual(retval, 0)
self.assertEqual(len(outparams), 2)
self.assertEqual(outparams['Size'], 10 * self.vg['ExtentSize'])
lv_name = outparams['theelement']
lv = self.wbemconnection.GetInstance(lv_name)
lv_setting = self.wbemconnection.Associators(lv_name,
AssocClass="LMI_LVElementSettingData")[0]
self.assertEqual(lv['ElementName'], "tstNAME")
self.assertEqual(
lv['BlockSize'] * lv['NumberOfBlocks'],
10 * self.vg['ExtentSize'])
self.assertEqual(
lv['NoSinglePointOfFailure'],
lv_setting['NoSinglePointOfFailure'])
self.assertEqual(
lv['NoSinglePointOfFailure'],
goal['NoSinglePointOfFailure'])
self.assertEqual(
lv['DataRedundancy'],
lv_setting['DataRedundancyGoal'])
self.assertEqual(
lv['DataRedundancy'],
goal['DataRedundancyGoal'])
self.assertEqual(
lv['PackageRedundancy'],
lv_setting['PackageRedundancyGoal'])
self.assertEqual(
lv['PackageRedundancy'],
goal['PackageRedundancyGoal'])
self.assertEqual(
lv['ExtentStripeLength'],
lv_setting['ExtentStripeLength'])
self.assertEqual(
lv['ExtentStripeLength'],
goal['ExtentStripeLength'])
# check vg is reduced
new_vg = self.wbemconnection.GetInstance(self.vg.path)
self.assertEqual(
new_vg['RemainingExtents'],
self.vg['RemainingExtents'] - 10)
self.assertEqual(
new_vg['RemainingManagedSpace'],
self.vg['RemainingManagedSpace'] - 10 * self.vg['ExtentSize'])
self.wbemconnection.DeleteInstance(goal.path)
self.wbemconnection.DeleteInstance(lv_name)
@unittest.skipIf(short_tests_only(), reason="Skipping long tests.")
def test_create_10(self):
""" Test CreateOrModifyLV 10x."""
lvs = []
for i in range(10):
(retval, outparams) = self.wbemconnection.InvokeMethod(
"CreateOrModifyLV",
self.service,
InPool=self.vg.path,
Size=pywbem.Uint64(2 * self.vg['ExtentSize']),
)
self.assertEqual(retval, 0)
self.assertEqual(len(outparams), 2)
self.assertEqual(outparams['Size'], 2 * self.vg['ExtentSize'])
lv_name = outparams['theelement']
lv = self.wbemconnection.GetInstance(lv_name)
lv_setting = self.wbemconnection.Associators(lv_name,
AssocClass="LMI_LVElementSettingData")[0]
lvs.append(lv)
self.assertEqual(
lv['BlockSize'] * lv['NumberOfBlocks'],
2 * self.vg['ExtentSize'])
self.assertEqual(
lv['NoSinglePointOfFailure'],
lv_setting['NoSinglePointOfFailure'])
self.assertEqual(
lv['DataRedundancy'],
lv_setting['DataRedundancyGoal'])
self.assertEqual(
lv['PackageRedundancy'],
lv_setting['PackageRedundancyGoal'])
self.assertEqual(
lv['ExtentStripeLength'],
lv_setting['ExtentStripeLength'])
# check vg is reduced
new_vg = self.wbemconnection.GetInstance(self.vg.path)
self.assertEqual(
new_vg['RemainingExtents'],
self.vg['RemainingExtents'] - (i + 1) * 2)
self.assertEqual(
new_vg['RemainingManagedSpace'],
self.vg['RemainingManagedSpace'] - (i + 1) * 2 * self.vg['ExtentSize'])
for lv in lvs:
self.wbemconnection.DeleteInstance(lv.path)
if __name__ == '__main__':
unittest.main()
| jsafrane/openlmi-storage | test/test_create_lv.py | Python | lgpl-2.1 | 12,008 | 0.002498 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# Convolve MTSS rotamers with MD trajectory.
# Copyright (c) 2011-2017 Philip Fowler and AUTHORS
# Published under the GNU Public Licence, version 2 (or higher)
#
# Includes a rotamer library for MTSS at 298 K by Gunnar Jeschke,
# which is published under the same licence by permission.
"""\
Rotamer library handling
========================
:mod:`rotamers.library` contains the data (:data:`LIBRARIES`) to load
a rotamer library, represented by a :class:`RotamerLibrary`.
"""
from __future__ import absolute_import, division, print_function
import MDAnalysis, MDAnalysis.lib.util
import logging
logger = logging.getLogger("MDAnalysis.app")
import numpy as np
import os.path
import pkg_resources
#: Name of the directory in the package that contains the library data.
LIBDIR = "data"
# This could be turned into a YAML file.
#: Registry of libraries, indexed by name.
LIBRARIES = {
'MTSSL 298K 2011': {
'topology': "rotamer1_R1A_298K_2011.pdb",
'ensemble': "rotamer1_R1A_298K_2011.dcd",
'populations': "R1A_298K_populations_2011.dat",
'author': "Gunnar Jeschke",
'licence': "GPL v2",
'citation': "Polyhach Y, Bordignon E, Jeschke G. "
"Phys Chem Chem Phys. 2011; 13(6):2356-2366. doi: 10.1039/c0cp01865a",
},
'MTSSL 298K 2015': {
'topology': "rotamer1_R1A_298K_2015.pdb",
'ensemble': "rotamer1_R1A_298K_2015.dcd",
'populations': "R1A_298K_populations_2015.dat",
'author': "Gunnar Jeschke",
'licence': "GPL v2",
'citation': "Polyhach Y, Bordignon E, Jeschke G. "
"Phys Chem Chem Phys. 2011; 13(6):2356-2366. doi: 10.1039/c0cp01865a",
'information': "updated version of the MTSSL rotamer library from 2015"
},
}
def find_file(filename, pkglibdir=LIBDIR):
"""Return full path to file *filename*.
1) If the *filename* exists, return rooted canonical path.
2) Otherwise, create a path to file in the installed *pkglibdir*.
.. note::
A file name is *always* returned, even if the file does not
exist (because this is how :func:`pkg_resources.resource_filename`
works).
"""
if os.path.exists(filename):
return MDAnalysis.lib.util.realpath(filename)
return pkg_resources.resource_filename(__name__, os.path.join(pkglibdir, filename))
class RotamerLibrary(object):
"""Rotamer library
The library makes available the attributes :attr:`rotamers`, and :attr:`weights`.
.. attribute:: rotamers
:class:`MDAnalysis.core.AtomGroup.Universe` instance that
records all rotamers as a trajectory
.. attribute:: weights
NumPy array containing the population of each rotomer.
.. attribute:: name
Name of the library.
.. attribute:: lib
Dictionary containing the file names and meta data for the library :attr:`name`.
"""
def __init__(self, name):
"""RotamerLibrary(name)
:Arguments:
*name*
name of the library (must exist in the registry of libraries, :data:`LIBRARIES`)
"""
self.name = name
self.lib = {}
try:
self.lib.update(LIBRARIES[name]) # make a copy
except KeyError:
raise ValueError("No rotamer library with name {0} known: must be one of {1}".format(name,
LIBRARIES.keys()))
logger.info("Using rotamer library '{0}' by {1[author]}".format(self.name, self.lib))
logger.info("Please cite: {0[citation]}".format(self.lib))
# adjust paths
for k in 'ensemble', 'topology', 'populations':
self.lib[k] = find_file(self.lib[k])
logger.debug("[rotamers] ensemble = {0[ensemble]} with topology = {0[topology]}".format(self.lib))
logger.debug("[rotamers] populations = {0[populations]}".format(self.lib))
self.rotamers = MDAnalysis.Universe(self.lib['topology'], self.lib['ensemble'])
self.weights = self.read_rotamer_weights(self.lib['populations'])
if len(self.rotamers.trajectory) != len(self.weights):
err_msg = "Discrepancy between number of rotamers ({0}) and weights ({1})".format(
len(self.rotamers.trajectory), len(self.weights))
logger.critical(err_msg)
raise ValueError(err_msg)
def read_rotamer_weights(self, filename):
"""read in the rotamer weights from *filename*
There is one weight per conformer (frame) in the trajectory.
"""
return np.loadtxt(filename)
def __repr__(self):
return "<RotamerLibrary '{0}' by {1} with {2} rotamers>".format(self.name, self.lib['author'],
len(self.weights))
| MDAnalysis/RotamerConvolveMD | rotcon/library.py | Python | gpl-2.0 | 5,002 | 0.003798 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of cpe package.
This module of is an implementation of name matching
algorithm in accordance with version 2.2 of CPE (Common Platform
Enumeration) specification.
Copyright (C) 2013 Alejandro Galindo García, Roberto Abdelkader Martínez Pérez
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
For any problems using the cpe package, or general questions and
feedback about it, please contact:
- Alejandro Galindo García: galindo.garcia.alejandro@gmail.com
- Roberto Abdelkader Martínez Pérez: robertomartinezp@gmail.com
"""
from .cpe import CPE
from .cpeset import CPESet
class CPESet2_2(CPESet):
"""
Represents a set of CPE Names.
This class allows:
- create set of CPE Names.
- match a CPE element against a set of CPE Names.
"""
###############
# CONSTANTS #
###############
#: Version of CPE set
VERSION = "2.2"
####################
# OBJECT METHODS #
####################
def append(self, cpe):
"""
Adds a CPE Name to the set if not already.
:param CPE cpe: CPE Name to store in set
:returns: None
:exception: ValueError - invalid version of CPE Name
TEST:
>>> from .cpeset2_2 import CPESet2_2
>>> from .cpe2_2 import CPE2_2
>>> uri1 = 'cpe:/h:hp'
>>> c1 = CPE2_2(uri1)
>>> s = CPESet2_2()
>>> s.append(c1)
"""
if cpe.VERSION != CPE.VERSION_2_2:
errmsg = "CPE Name version {0} not valid, version 2.2 expected".format(
cpe.VERSION)
raise ValueError(errmsg)
for k in self.K:
if cpe.cpe_str == k.cpe_str:
return None
self.K.append(cpe)
def name_match(self, cpe):
"""
Accepts a set of known instances of CPE Names and a candidate CPE Name,
and returns 'True' if the candidate can be shown to be
an instance based on the content of the known instances.
Otherwise, it returns 'False'.
:param CPESet self: A set of m known CPE Names K = {K1, K2, …, Km}.
:param CPE cpe: A candidate CPE Name X.
:returns: True if X matches K, otherwise False.
:rtype: boolean
TEST: matching with ANY values explicit
>>> from .cpe2_2 import CPE2_2
>>> uri1 = 'cpe:/o:microsoft:windows:vista'
>>> uri2 = 'cpe:/o:cisco:ios:12.3:enterprise'
>>> c1 = CPE2_2(uri1)
>>> c2 = CPE2_2(uri2)
>>> s = CPESet2_2()
>>> s.append(c1)
>>> s.append(c2)
>>> uri3 = 'cpe:/o:microsoft::vista'
>>> c3 = CPE2_2(uri3)
>>> s.name_match(c3)
True
"""
return super(CPESet2_2, self).name_match(cpe)
if __name__ == "__main__":
import doctest
doctest.testmod()
doctest.testfile("tests/testfile_cpeset2_2.txt")
| nilp0inter/cpe | cpe/cpeset2_2.py | Python | lgpl-3.0 | 3,512 | 0.000571 |
## begin license ##
#
# "Meresco Components" are components to build searchengines, repositories
# and archives, based on "Meresco Core".
#
# Copyright (C) 2007-2009 SURF Foundation. http://www.surf.nl
# Copyright (C) 2007 SURFnet. http://www.surfnet.nl
# Copyright (C) 2007-2010 Seek You Too (CQ2) http://www.cq2.nl
# Copyright (C) 2007-2009 Stichting Kennisnet Ict op school. http://www.kennisnetictopschool.nl
# Copyright (C) 2012, 2017 Seecr (Seek You Too B.V.) http://seecr.nl
# Copyright (C) 2017 SURF http://www.surf.nl
# Copyright (C) 2017 Stichting Kennisnet http://www.kennisnet.nl
#
# This file is part of "Meresco Components"
#
# "Meresco Components" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Meresco Components" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Meresco Components"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
from meresco.core import Transparent
class PathRename(Transparent):
def __init__(self, rename):
Transparent.__init__(self)
self._rename = rename
def handleRequest(self, path, *args, **kwargs):
originalPath = kwargs.pop('originalPath', path)
yield self.all.handleRequest(path=self._rename(path), originalPath=originalPath, *args, **kwargs)
| seecr/meresco-components | meresco/components/http/pathrename.py | Python | gpl-2.0 | 1,768 | 0.003394 |
#coding=utf8
import thread, time, sys, os, platform
try:
import termios, tty
termios.tcgetattr, termios.tcsetattr
import threading
OS = 'Linux'
except (ImportError, AttributeError):
try:
import msvcrt
OS = 'Windows'
except ImportError:
raise Exception('Mac is currently not supported')
OS = 'Mac'
else:
getch = msvcrt.getwch
else:
def fn():
try:
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
tty.setraw(fd)
ch = sys.stdin.read(1)
except:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
raise Exception
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
getch = fn
CMD_HISTORY = 30
class ChatLikeCMD():
def __init__(self, header = 'LittleCoder', symbol = '>', inPip = None, inputMaintain = False):
self.strBuff = []
self.cmdBuff = []
self.historyCmd = -1
self.cursor = 0
self.inPip = [] if inPip == None else inPip
self.outPip = []
self.isLaunch = False
self.isPause = False
self.header = header
self.symbol = symbol
self.inputMaintain = inputMaintain
def reprint_input(self):
sys.stdout.write(self.header + self.symbol)
if self.strBuff:
for i in self.strBuff: sys.stdout.write(i)
sys.stdout.flush()
def getch(self):
c = getch()
return c if c != '\r' else '\n'
def get_history_command(self, direction):
if direction == 'UP':
if self.historyCmd < CMD_HISTORY - 1 and self.historyCmd < len(self.cmdBuff) - 1: self.historyCmd += 1
else:
if self.historyCmd == 0: return ''
if self.historyCmd > 0: self.historyCmd -= 1
if -1 < self.historyCmd < len(self.cmdBuff): return self.cmdBuff[self.historyCmd]
def output_command(self, s):
self.outPip.append(s if isinstance(s, unicode) else s.decode(sys.stdin.encoding))
if len(self.cmdBuff) >= CMD_HISTORY: self.cmdBuff = self.cmdBuff[::-1].pop()[::-1]
self.cmdBuff.append(s)
def print_thread(self):
while self.isLaunch:
if self.inPip:
sys.stdout.write('\r' + ' ' * 50 + '\r')
sys.stdout.flush()
print self.inPip.pop()
# linux special
sys.stdout.write('\r')
sys.stdout.flush()
self.reprint_input()
time.sleep(0.01)
def fast_input_test(self):
timer = threading.Timer(0.001, thread.interrupt_main)
c = None
try:
timer.start()
c = getch()
except:
pass
timer.cancel()
return c
def process_direction_char(self, c):
if OS == 'Windows':
if ord(c) == 72:
c = 'A'
elif ord(c) == 80:
c = 'B'
elif ord(c) == 77:
c = 'C'
elif ord(c) == 75:
c = 'D'
if ord(c) == 68: # LEFT
self.process_char('\b')
return
# cursor bugs
if self.cursor > 0:
if OS == 'Windows':
sys.stdout.write(chr(224) + chr(75))
else:
sys.stdout.write(chr(27) + '[C')
self.cursor -= 1
elif ord(c) == 67: # RIGHT
return
# cursor bugs
if self.cursor < len(self.strBuff):
if OS == 'Windows':
sys.stdout.write(chr(224) + chr(77))
else:
sys.stdout.write(chr(27) + '[D')
self.cursor += 1
elif ord(c) == 65: # UP
hc = self.get_history_command('UP')
if not hc is None:
self.strBuff = [i for i in hc]
self.cursor = len(hc)
sys.stdout.write('\r' + ' ' * 50 + '\r')
self.reprint_input()
elif ord(c) == 66: # DOWN
hc = self.get_history_command('DOWN')
if not hc is None:
self.strBuff = [i for i in hc]
self.cursor = len(hc)
sys.stdout.write('\r' + ' ' * 50 + '\r')
self.reprint_input()
else:
raise Exception(c)
def process_char(self, c):
if ord(c) == 27: # Esc
if OS == 'Linux':
fitc1 = self.fast_input_test()
if ord(fitc1) == 91:
fitc2 = self.fast_input_test()
if 65 <= ord(fitc2) <= 68:
self.process_direction_char(fitc2)
return
sys.stdout.write('\r' + ' ' * 50 + '\r')
sys.stdout.flush()
self.reprint_input()
self.outPip.append(c)
time.sleep(0.02)
if 'fitc1' in dir():
self.process_char(fitc1)
self.cursor += 1
if 'fitc2' in dir():
self.process_char(fitc2)
self.cursor += 1
elif ord(c) == 3: # Ctrl+C
self.stop()
self.isPause = True
if raw_input('Exit?(y) ') == 'y':
sys.stdout.write('Command Line Exit')
else:
self.start()
self.isPause = False
elif ord(c) in (8, 127): # Backspace
if self.strBuff:
if ord(self.strBuff[-1]) < 128:
sys.stdout.write('\b \b')
else:
sys.stdout.write('\b\b \b')
if OS == 'Linux':
self.strBuff.pop()
self.strBuff.pop()
self.strBuff.pop()
self.cursor -= 1
elif c == '\n':
if self.strBuff:
if self.inputMaintain:
sys.stdout.write(c)
else:
sys.stdout.write('\r' + ' ' * 50 + '\r')
sys.stdout.flush()
self.reprint_input()
self.output_command(''.join(self.strBuff))
self.strBuff = []
self.historyCmd = -1
elif ord(c) == 224: # Windows direction
if OS == 'Windows':
direction = self.getch()
self.process_direction_char(direction)
else:
sys.stdout.write(c)
sys.stdout.flush()
self.strBuff.append(c)
self.cursor += 1
def command_thread(self):
c = None
while self.isLaunch:
c = self.getch()
self.process_char(c)
time.sleep(0.01)
def start(self):
self.isLaunch = True
thread.start_new_thread(self.print_thread, ())
self.reprint_input()
thread.start_new_thread(self.command_thread, ())
def stop(self):
sys.stdout.write('\r' + ' ' * 50 + '\r')
sys.stdout.flush()
self.isLaunch = False
def print_line(self, msg = None):
self.inPip.append(msg)
def clear(self):
os.system('cls' if platform.system() == 'Windows' else 'clear')
self.reprint_input()
def get_command_pip(self):
return self.outPip
def set_header(self, header):
self.header = header
if __name__ == '__main__':
c = ChatLikeCMD()
s = c.get_command_pip()
c.start()
def loopinput(c):
while True:
c.print_line('LOOP INPUT......')
time.sleep(3)
thread.start_new_thread(loopinput, (c,))
while c.isLaunch or c.isPause:
if s:
c.print_line(s.pop())
time.sleep(0.01)
| littlecodersh/EasierLife | Plugins/ChatLikeCMD/ChatLikeCMD.py | Python | mit | 7,974 | 0.007399 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Daniel Reis, 2013
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Projects Issue extensions for user roles',
'version': '1.0',
'category': 'Project Management',
'summary': 'Extend Project user roles to support more complex use cases',
'description': """\
Also implements the Project user role extensions to the Project Issue
documents.
This module is automatically installed if the Issue Tracker is also installed.
Please refer to the ``project_baseuser`` module for more details.
""",
'author': "Daniel Reis,Odoo Community Association (OCA)",
'license': 'AGPL-3',
'depends': [
'project_issue',
'project_baseuser',
],
'data': [
'security/ir.model.access.csv',
'security/project_security.xml',
],
'installable': True,
'auto_install': True,
}
| raycarnes/project | project_issue_baseuser/__openerp__.py | Python | agpl-3.0 | 1,661 | 0 |
from argparse import ArgumentParser
from typing import Any
from zerver.lib.actions import create_stream_if_needed
from zerver.lib.management import ZulipBaseCommand
class Command(ZulipBaseCommand):
help = """Create a stream, and subscribe all active users (excluding bots).
This should be used for TESTING only, unless you understand the limitations of
the command."""
def add_arguments(self, parser: ArgumentParser) -> None:
self.add_realm_args(parser, True, "realm in which to create the stream")
parser.add_argument('stream_name', metavar='<stream name>', type=str,
help='name of stream to create')
def handle(self, *args: Any, **options: str) -> None:
realm = self.get_realm(options)
assert realm is not None # Should be ensured by parser
stream_name = options['stream_name']
create_stream_if_needed(realm, stream_name)
| tommyip/zulip | zerver/management/commands/create_stream.py | Python | apache-2.0 | 919 | 0.002176 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/poi/shared_lok_nymshenchman_medium.iff"
result.attribute_template_id = -1
result.stfName("poi_n","base_poi_building")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | anhstudios/swganh | data/scripts/templates/object/building/poi/shared_lok_nymshenchman_medium.py | Python | mit | 454 | 0.046256 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.