commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
57714fd6838f48920f7093a24ec4d85abf4278ee | Fix merge issue | linkedin/naarad,linkedin/naarad,richardhsu/naarad,kilink/naarad,forever342/naarad,linkedin/naarad,kilink/naarad,richardhsu/naarad,kilink/naarad,richardhsu/naarad,kilink/naarad,linkedin/naarad,forever342/naarad,forever342/naarad,forever342/naarad,richardhsu/naarad | src/naarad/naarad_imports.py | src/naarad/naarad_imports.py | # coding=utf-8
"""
© 2013 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
from naarad.graphing import matplotlib_naarad
from naarad.metrics.jmeter_metric import JmeterMetric
from naarad.reporting.report import Report
#Custom metrics
metric_classes = {
#'MyMetric' : MyMetricParserClass
'JMETER' : JmeterMetric
}
graphing_modules = {
'matplotlib' : matplotlib_naarad
}
reporting_modules = {
'report' : Report
}
important_sub_metrics_import = {
'GC' : ('GC', 'used'),
'SAR-cpuusage' : ('%sys', '%usr')
}
| # coding=utf-8
"""
© 2013 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
from naarad.graphing import matplotlib_naarad
from naarad.metrics.jmeter_metric import JmeterMetric
from naarad.reporting.report import Report
#Custom metrics
metric_classes = {
#'MyMetric' : MyMetricParserClass
'JMETER' : JmeterMetric
}
graphing_modules = {
'matplotlib' : matplotlib_naarad
}
<<<<<<< HEAD
reporting_modules = {
'report' : Report
}
=======
important_sub_metrics_import = {
'GC' : ('GC', 'used'),
'SAR-cpuusage' : ('%sys', '%usr')
}
>>>>>>> upstream/master
| apache-2.0 | Python |
6de41e5acfe55b6cb7698f81e3031079a530b1af | test for cli mode | dariusbakunas/rawdisk | tests/test_cli_mode.py | tests/test_cli_mode.py | import unittest
from unittest.mock import Mock
from rawdisk.ui.cli.cli_mode import CliMode, CliShell
class CliModeTest(unittest.TestCase):
def test_initialize_loads_fs_plugins(self):
session = Mock()
cli = CliShell(session=session)
cli.initialize()
session.load_plugins.assert_called_once_with()
| bsd-3-clause | Python |
|
be0cb304047c7a410eac577b8aa2765747991100 | add script to summarise output | martinjuckes/ceda_cc,martinjuckes/ceda_cc | summary.py | summary.py |
import string, sys, glob
idir = sys.argv[1]
fl = glob.glob( '%s/*.txt' % idir )
ee = {}
for f in fl:
for l in open(f).readlines():
if string.find(l, 'FAILED') != -1:
bits = string.split(l, ':' )
if len(bits) > 3:
code = bits[0]
msg = bits[3]
if code not in ee.keys():
ee[code] = [0,msg]
ee[code][0] += 1
if ee[code][1] != msg:
print 'code %s occurs with multiple messages: %s, %s' % (code,ee[code][1],msg)
else:
print bits
keys = ee.keys()
keys.sort()
for k in keys:
print k,ee[k]
| bsd-3-clause | Python |
|
6d90ccd7d6f03630106f78ec7d75666429e26e45 | Add an example workloads module | gem5/gem5,gem5/gem5,gem5/gem5,gem5/gem5,gem5/gem5,gem5/gem5,gem5/gem5 | configs/example/arm/workloads.py | configs/example/arm/workloads.py | # Copyright (c) 2020 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import print_function
from __future__ import absolute_import
import m5
from m5.objects import *
from m5.options import *
from common.SysPaths import binary, disk
class ArmBaremetal(ArmFsWorkload):
""" Baremetal workload """
atags_addr = 0
def __init__(self, obj, system, **kwargs):
super(ArmBaremetal, self).__init__(**kwargs)
self.object_file = obj
class ArmTrustedFirmware(ArmFsWorkload):
"""
Arm Trusted Firmware (TFA) workload.
It models the firmware design described at:
https://trustedfirmware-a.readthedocs.io/en/latest/design/firmware-design.html
The Workload is expecting to find a set of firmare images under
the M5_PATH/binaries path. Those images are:
* bl1.bin (BL1 = Stage 1 Bootloader)
* fip.bin (FIP = Firmware Image Package):
BL2, BL31, BL33 binaries compiled under a singe package
These are the results of the compilation of Arm Trusted Firmware.
https://github.com/ARM-software/arm-trusted-firmware
"""
atags_addr = 0
def __init__(self, obj, system, **kwargs):
super(ArmTrustedFirmware, self).__init__(**kwargs)
self.extras = [ binary('bl1.bin'), binary('fip.bin'), ]
self.extras_addrs = [
system.realview.bootmem.range.start,
system.realview.flash0.range.start
]
# Arm Trusted Firmware will provide a PSCI implementation
system._have_psci = True
| bsd-3-clause | Python |
|
d38148097b96ecf7681d0e6c5f7dbc0de5c4b16b | Create backpackI.py | UmassJin/Leetcode | LintCode/backpackI.py | LintCode/backpackI.py | '''
Given n items with size Ai, an integer m denotes the size of a backpack. How full you can fill this backpack?
Have you met this question in a real interview? Yes
Example
If we have 4 items with size [2, 3, 5, 7], the backpack size is 11, we can select [2, 3, 5], so that the max size we can fill this backpack is 10. If the backpack size is 12. we can select [2, 3, 7] so that we can fulfill the backpack.
You function should return the max size we can fill in the given backpack.
Note
You can not divide any item into small pieces.
Challenge
O(n x m) time and O(m) memory.
O(n x m) memory is also acceptable if you do not know how to optimize memory.
'''
class Solution_MLE:
# @param m: An integer m denotes the size of a backpack
# @param A: Given n items with size A[i]
# @return: The maximum size
def backPack(self, m, A):
if not A: return 0
n = len(A)
dp = [[0 for i in xrange(m+1)] for j in xrange(n+1)]
for i in xrange(1, n+1):
for j in xrange(1, m+1):
if j < A[i-1]:
dp[i][j] = dp[i-1][j]
else:
dp[i][j] = max(dp[i-1][j], dp[i-1][j-A[i-1]] + A[i-1])
return dp[n][m]
class Solution:
# @param m: An integer m denotes the size of a backpack
# @param A: Given n items with size A[i]
# @return: The maximum size
def backPack(self, m, A):
if not A: return 0
n = len(A)
dp = [0 for i in xrange(m+1)]
for i in xrange(1, n+1):
for j in xrange(m, 0, -1):
if j >= A[i-1]:
dp[j] = max(dp[j], dp[j-A[i-1]] + A[i-1])
return dp[m]
# dp[i][j] means we put the first i items for backpack j
# function: if j >= A[i-1], dp[i][j] = max(dp[i-1][j], dp[i-1][j-A[i-1]] + A[i-1]), put the ith item or not
# result: dp[n][m]
| mit | Python |
|
6a65640d1567d3cf2a9dac232e705e4697022987 | add migration for hidden col | neynt/tsundiary,neynt/tsundiary,neynt/tsundiary,neynt/tsundiary | migrations/versions/388d0cc48e7c_.py | migrations/versions/388d0cc48e7c_.py | """empty message
Revision ID: 388d0cc48e7c
Revises: 21a633e449ce
Create Date: 2014-11-13 10:49:45.512414
"""
# revision identifiers, used by Alembic.
revision = '388d0cc48e7c'
down_revision = '21a633e449ce'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('post', sa.Column('hidden', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('post', 'hidden')
### end Alembic commands ###
| mit | Python |
|
9d0e85a10b1073000d22500938e3d8d65107f062 | Update migrations | fernando24164/flask_api,fernando24164/flask_api | migrations/versions/70e630638c64_.py | migrations/versions/70e630638c64_.py | """empty message
Revision ID: 70e630638c64
Revises:
Create Date: 2017-05-23 13:19:47.177767
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '70e630638c64'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('stations',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('description', sa.String(length=255), nullable=True),
sa.Column('latitude', sa.String(length=255), nullable=True),
sa.Column('longitude', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('description')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=55), nullable=True),
sa.Column('pwdhash', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_name'), 'users', ['name'], unique=False)
op.create_table('association',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('station_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['station_id'], ['stations.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], )
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('association')
op.drop_index(op.f('ix_users_name'), table_name='users')
op.drop_table('users')
op.drop_table('stations')
# ### end Alembic commands ###
| mit | Python |
|
c934c77392b98e7625bc5e03ea6c3c7960cdcd5d | Create syslog_parser.py | seefor/rpzsyslog | syslog_parser.py | syslog_parser.py | import re
import sys
import argparse
__author__ = 'sif.baksh@gmail.com'
__version__ = "$Revision: 1.6 $"
'''
TODO
- nothing
USAGE
python syslog_parser.py -i syslog.log -o customerxyz.txt
'''
# This will allow us to pass command line arguments
# FOR HELP - python syslog_test.py --h
parser = argparse.ArgumentParser(description='RPZ Syslog Parser')
parser.add_argument('-i', '--input', help='Input file name', required=True)
parser.add_argument('-o', '--output', help='Output file name', required=True)
args = parser.parse_args()
input_file = open(str(args.input))
n = 0
output_file = open(str(args.output), "w")
for line in iter(input_file):
m = re.search('(?<=\s\[A\]\svia\s)(\S*)(?=\"\"\"$)', line)
if m:
n = n + 1
print m.group(1)
output_file.write(m.group(1))
output_file.write("\n")
print "[+] Found %s domains in : %s" % (n, str(args.input))
print "[+] Please check %s for the output!" % str(args.output)
# # show values ##
print ("Input file: %s" % args.input )
print ("Output file: %s" % args.output )
output_file.close()
input_file.close()
| unlicense | Python |
|
375f63b76fccc89ce0ee9b4246e5fb9a2400d1eb | Add Austrian Versicherungsnummer | arthurdejong/python-stdnum,arthurdejong/python-stdnum,arthurdejong/python-stdnum | stdnum/at/vnr.py | stdnum/at/vnr.py | # vnr.py - functions for handling Austrian social security numbers
# coding: utf-8
#
# Copyright (C) 2018 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""VNR, SVNR, VSNR (Versicherungsnummer, Austrian social security number).
The Austian Versicherungsnummer is a personal identification number used for
social security. The number is 10 digits long and consists of a 3 digit
serial, a check digit and 6 digits that usually specify the person's birth
date.
More information:
* https://de.wikipedia.org/wiki/Sozialversicherungsnummer#Österreich
>>> validate('1237 010180')
'1237010180'
>>> validate('2237 010180')
Traceback (most recent call last):
...
InvalidChecksum: ...
"""
from stdnum.exceptions import *
from stdnum.util import clean
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
return clean(number, ' ')
def calc_check_digit(number):
"""Calculate the check digit. The fourth digit in the number is
ignored."""
weights = (3, 7, 9, 0, 5, 8, 4, 2, 1, 6)
return str(sum(w * int(n) for w, n in zip(weights, number)) % 11)
def validate(number):
"""Check if the number is a valid VAT number. This checks the length,
formatting and check digit."""
number = compact(number)
if not number.isdigit() or number.startswith('0'):
raise InvalidFormat()
if len(number) != 10:
raise InvalidLength()
if calc_check_digit(number) != number[3]:
raise InvalidChecksum()
return number
def is_valid(number):
"""Check if the number is a valid VAT number."""
try:
return bool(validate(number))
except ValidationError:
return False
| lgpl-2.1 | Python |
|
33d3f3f0805fb2e34144eec1870442427c2a12b5 | Add initial config management interface for the wheel module | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/wheel/config.py | salt/wheel/config.py | '''
Manage the master configuration file
'''
# Import python libs
import os
# Import third party libs
import yaml
# Import salt libs
import salt.config
def values():
'''
Return the raw values of the config file
'''
data = salt.config.master_config(__opts__['conf_file'])
data.pop('aes')
data.pop('token_dir')
return data
def apply(key, value):
'''
Set a single key
'''
path = __opts__['conf_file']
if os.path.isdir(path):
path = os.path.join(path, 'master')
data = values()
data[key] = value
yaml.dump(data, default_flow_style=False)
| apache-2.0 | Python |
|
9e835d341513d7477b05f19ec2b72499b170db40 | Add initial libguestfs module | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/modules/libguestfs.py | salt/modules/libguestfs.py | '''
Interact with virtual machine images via libguestfs
:depends: - libguestfs
'''
# Import Salt libs
import salt.utils
def __virtual__():
'''
Only load if libguestfs python bindings are installed
'''
if salt.utils.which('guestmount'):
return 'guestfs'
return False
def seed(location, id_='', config=None):
'''
Seed a vm image before booting it
CLI Example::
salt '*' guestfs.seed /tmp/image.qcow2
'''
if config is None:
config = {}
| apache-2.0 | Python |
|
e2c46d78cc4efe2d8778b0580b37ac95299c4ee1 | Implement DefaultConfigMixin | coala/corobo,coala/corobo | utils/mixin.py | utils/mixin.py | from itertools import chain
class DefaultConfigMixin():
@property
def _default_config(self):
if (hasattr(self.bot_config, 'DEFAULT_CONFIG') and
self.name in self.bot_config.DEFAULT_CONFIG):
return self.bot_config.DEFAULT_CONFIG[self.name]
def __init__(self, bot, name=None):
super().__init__(bot, name=name)
default_config = self._default_config
if default_config and not hasattr(self, 'config'):
self.configure(default_config)
def get_configuration_template(self):
default_config = self._default_config
if default_config:
return default_config
elif self.CONFIG_TEMPLATE:
return self.CONFIG_TEMPLATE
def configure(self, configuration):
default_config = self._default_config
if configuration and default_config:
config = dict(chain(
default_config.items(),
configuration.items()))
elif configuration:
config = dict(chain(self.CONFIG_TEMPLATE.items(),
configuration.items()))
elif default_config:
config = default_config
else:
config = self.CONFIG_TEMPLATE
self.config = config
| mit | Python |
|
1c631f8a6426a50e2e86d77a9b2729e102c5ad32 | add DecomposeComponentsFilter | googlei18n/ufo2ft,googlefonts/ufo2ft,jamesgk/ufo2ft,moyogo/ufo2ft,jamesgk/ufo2fdk | Lib/ufo2ft/filters/decomposeComponents.py | Lib/ufo2ft/filters/decomposeComponents.py | from __future__ import (
print_function, division, absolute_import, unicode_literals)
from fontTools.pens.reverseContourPen import ReverseContourPen
from fontTools.misc.transform import Transform, Identity
from fontTools.pens.transformPen import TransformPen
from ufo2ft.filters import BaseFilter
class DecomposeComponentsFilter(BaseFilter):
def filter(self, glyph, glyphSet=None):
if not glyph.components:
return False
_deepCopyContours(glyphSet, glyph, glyph, Transform())
glyph.clearComponents()
return True
def _deepCopyContours(glyphSet, parent, component, transformation):
"""Copy contours from component to parent, including nested components."""
for nested in component.components:
_deepCopyContours(
glyphSet, parent, glyphSet[nested.baseGlyph],
transformation.transform(nested.transformation))
if component != parent:
if transformation == Identity:
pen = parent.getPen()
else:
pen = TransformPen(parent.getPen(), transformation)
# if the transformation has a negative determinant, it will
# reverse the contour direction of the component
xx, xy, yx, yy = transformation[:4]
if xx*yy - xy*yx < 0:
pen = ReverseContourPen(pen)
component.draw(pen)
| mit | Python |
|
b7e024913d1d1bc87306f4a85b8737a8d5c35ec7 | add XML validation utility | ocefpaf/pycsw,benhowell/pycsw,PublicaMundi/pycsw,kevinpdavies/pycsw,bukun/pycsw,geopython/pycsw,tomkralidis/pycsw,bukun/pycsw,rouault/pycsw,ckan-fcd/pycsw-fcd,tomkralidis/pycsw,geopython/pycsw,mwengren/pycsw,kalxas/pycsw,rouault/pycsw,geopython/pycsw,ricardogsilva/pycsw,ingenieroariel/pycsw,benhowell/pycsw,kalxas/pycsw,mwengren/pycsw,ricardogsilva/pycsw,tomkralidis/pycsw,kevinpdavies/pycsw,bukun/pycsw,ocefpaf/pycsw,kalxas/pycsw,ricardogsilva/pycsw,ingenieroariel/pycsw,ckan-fcd/pycsw-fcd,PublicaMundi/pycsw | sbin/validate_xml.py | sbin/validate_xml.py | #!/usr/bin/python
# -*- coding: ISO-8859-15 -*-
# =================================================================
#
# $Id$
#
# Authors: Angelos Tzotsos <tzotsos@gmail.com>
#
# Copyright (c) 2011 Angelos Tzotsos
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import os
import sys
from lxml import etree
if len(sys.argv) < 3:
print 'Usage: %s <xml> <xsd>' % sys.argv[0]
sys.exit(1)
print 'Validating %s against schema %s' % (sys.argv[1], sys.argv[2])
schema = etree.XMLSchema(etree.parse(sys.argv[2]))
parser = etree.XMLParser(schema=schema)
try:
valid = etree.parse(sys.argv[1], parser)
print 'Valid XML document'
except Exception, err:
print 'ERROR: %s' % str(err)
| mit | Python |
|
cf3ca764c571a51952cd7c98c9752aedc701c3eb | Drop the status column | alphagov/notifications-api,alphagov/notifications-api | migrations/versions/0053_perform_drop_status_column.py | migrations/versions/0053_perform_drop_status_column.py | """empty message
Revision ID: 0053_perform_drop_status_column
Revises: 0052_drop_jobs_status
Create Date: 2016-08-25 15:56:31.779399
"""
# revision identifiers, used by Alembic.
revision = '0053_perform_drop_status_column'
down_revision = '0052_drop_jobs_status'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.drop_column('jobs', 'status')
def downgrade():
op.add_column('jobs', sa.Column('status', postgresql.ENUM('pending', 'in progress', 'finished', 'sending limits exceeded', name='job_status_types'), autoincrement=False, nullable=True))
| mit | Python |
|
653221002d97f4ab646b11c01016763550912036 | Update word-ladder.py | kamyu104/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,yiwen-luo/LeetCode | Python/word-ladder.py | Python/word-ladder.py | # Time: O(n * d), n is length of string, d is size of dictionary
# Space: O(d)
#
# Given two words (start and end), and a dictionary, find the length of shortest transformation sequence from start to end, such that:
#
# Only one letter can be changed at a time
# Each intermediate word must exist in the dictionary
# For example,
#
# Given:
# start = "hit"
# end = "cog"
# dict = ["hot","dot","dog","lot","log"]
# As one shortest transformation is "hit" -> "hot" -> "dot" -> "dog" -> "cog",
# return its length 5.
#
# Note:
# Return 0 if there is no such transformation sequence.
# All words have the same length.
# All words contain only lowercase alphabetic characters.
#
# BFS
class Solution(object):
def ladderLength(self, beginWord, endWord, wordList):
"""
:type beginWord: str
:type endWord: str
:type wordList: List[str]
:rtype: int
"""
distance, cur, visited, lookup = 0, [beginWord], set([beginWord]), set(wordList)
while cur:
_next = []
for word in cur:
if word == endWord:
return distance + 1
for i in xrange(len(word)):
for j in 'abcdefghijklmnopqrstuvwxyz':
candidate = word[:i] + j + word[i + 1:]
if candidate not in visited and candidate in lookup:
_next.append(candidate)
visited.add(candidate)
distance += 1
cur = _next
return 0
if __name__ == "__main__":
print Solution().ladderLength("hit", "cog", set(["hot", "dot", "dog", "lot", "log"]))
print Solution().ladderLength("hit", "cog", set(["hot", "dot", "dog", "lot", "log", "cog"]))
| # Time: O(n * d), n is length of string, d is size of dictionary
# Space: O(d)
#
# Given two words (start and end), and a dictionary, find the length of shortest transformation sequence from start to end, such that:
#
# Only one letter can be changed at a time
# Each intermediate word must exist in the dictionary
# For example,
#
# Given:
# start = "hit"
# end = "cog"
# dict = ["hot","dot","dog","lot","log"]
# As one shortest transformation is "hit" -> "hot" -> "dot" -> "dog" -> "cog",
# return its length 5.
#
# Note:
# Return 0 if there is no such transformation sequence.
# All words have the same length.
# All words contain only lowercase alphabetic characters.
#
# BFS
class Solution:
# @param start, a string
# @param end, a string
# @param dict, a set of string
# @return an integer
def ladderLength(self, start, end, word_list):
distance, cur, visited = 0, [start], set([start])
while cur:
_next = []
for word in cur:
if word == end:
return distance + 1
for i in xrange(len(word)):
for j in 'abcdefghijklmnopqrstuvwxyz':
candidate = word[:i] + j + word[i + 1:]
if candidate not in visited and candidate in word_list:
_next.append(candidate)
visited.add(candidate)
distance += 1
cur = _next
return 0
if __name__ == "__main__":
print Solution().ladderLength("hit", "cog", set(["hot", "dot", "dog", "lot", "log"]))
print Solution().ladderLength("hit", "cog", set(["hot", "dot", "dog", "lot", "log", "cog"]))
| mit | Python |
96e0f1f4946e4663991f6af1dfb333b064721df2 | Add cctbx_progs/emma_shelxd_lst.py to execute phenix.emma for all solutions in _fa.lst of SHELXD. | keitaroyam/yamtbx,keitaroyam/yamtbx,keitaroyam/yamtbx,keitaroyam/yamtbx | cctbx_progs/emma_shelxd_lst.py | cctbx_progs/emma_shelxd_lst.py | #!/usr/bin/env phenix.python
# The original code is iotbx/command_line/emma.py
from __future__ import division
from iotbx import crystal_symmetry_from_any
from iotbx.option_parser import option_parser
from cctbx import euclidean_model_matching as emma
from iotbx.command_line.emma import get_emma_model
import sys, os, re
def get_emma_models_from_lst(file_name, crystal_symmetry):
read_flag = False
positions = []
re_lst_header = re.compile("Try *([0-9]+), CPU *([0-9]+), CC All/Weak *([-0-9\.]+) */ *([-0-9\.]+)")
for l in open(file_name):
if l.startswith(" x y z"):
read_flag = True
positions = []
elif read_flag and l.strip() == "":
read_flag = False
elif read_flag:
site = map(float, (l[:8], l[8:16], l[16:24]))
positions.append(emma.position(str(len(positions)+1), site))
elif l.startswith(" Try "):
r = re_lst_header.search(l)
itry, ccall, ccweak = r.group(1), r.group(3), r.group(4)
ret = emma.model(crystal_symmetry.special_position_settings(), positions)
ret.label = "Try%s_CCall_%s_CCweak_%s" % (itry, ccall, ccweak)
yield (ret, itry, ccall, ccweak)
# get_emma_models_from_lst
def run(args, command_name="emma_shelxd_lst.py"):
command_line = (option_parser(
usage=command_name + " [options]"
+" reference_coordinates shelxd-lst-file",
description="Example: %s model1.pdb sad_fa.lst" % command_name)
.enable_symmetry_comprehensive()
.option(None, "--tolerance",
action="store",
type="float",
default=.5,
help="match tolerance",
metavar="FLOAT")
.option(None, "--diffraction_index_equivalent",
action="store_true",
help="Use only if models are diffraction-index equivalent.")
).process(args=args, nargs=2)
crystal_symmetry = command_line.symmetry
if ( crystal_symmetry.unit_cell() is None
or crystal_symmetry.space_group_info() is None):
for file_name in command_line.args:
crystal_symmetry = crystal_symmetry.join_symmetry(
other_symmetry=crystal_symmetry_from_any.extract_from(
file_name=file_name),
force=False)
tolerance = command_line.options.tolerance
print "Tolerance:", tolerance
if (tolerance <= 0.):
raise ValueError, "Tolerance must be greater than zero."
print
diffraction_index_equivalent = \
command_line.options.diffraction_index_equivalent
if (diffraction_index_equivalent):
print "Models are diffraction index equivalent."
print
emma_ref = get_emma_model(file_name=command_line.args[0],
crystal_symmetry=crystal_symmetry)
emma_ref.show("Reference model")
emma_others = get_emma_models_from_lst(command_line.args[1], crystal_symmetry)
print "try CCall CCweak nmatch rms order.min order.max"
for emma_other, itry, ccall, ccweak in emma_others:
model_matches = emma.model_matches(model1=emma_ref,
model2=emma_other,
tolerance=tolerance,
models_are_diffraction_index_equivalent=diffraction_index_equivalent)
print itry, ccall, ccweak,
if (model_matches.n_matches() == 0):
print "0 nan nan nan"
else:
max_n_pairs = None
first=True
for match in model_matches.refined_matches:
if (max_n_pairs is None or len(match.pairs) > max_n_pairs*0.2):
orders = map(lambda x: int(x[1]), match.pairs)
print "%3d %.5f %3d %3d" % (len(match.pairs), match.rms, min(orders), max(orders))
#match.show()
#first=False
break
if (max_n_pairs is None):
max_n_pairs = len(match.pairs)
if (__name__ == "__main__"):
run(args=sys.argv[1:])
| bsd-3-clause | Python |
|
8affa8de7338f08c2bb77e290fd7509440d6eee6 | Add test for issue #169 | GaZ3ll3/numba,shiquanwang/numba,jriehl/numba,shiquanwang/numba,ssarangi/numba,gmarkall/numba,stuartarchibald/numba,stuartarchibald/numba,pitrou/numba,cpcloud/numba,stefanseefeld/numba,cpcloud/numba,seibert/numba,jriehl/numba,shiquanwang/numba,cpcloud/numba,IntelLabs/numba,stuartarchibald/numba,numba/numba,numba/numba,IntelLabs/numba,stuartarchibald/numba,sklam/numba,ssarangi/numba,IntelLabs/numba,pitrou/numba,sklam/numba,stonebig/numba,jriehl/numba,pombredanne/numba,pitrou/numba,ssarangi/numba,GaZ3ll3/numba,GaZ3ll3/numba,pitrou/numba,stefanseefeld/numba,pombredanne/numba,gmarkall/numba,numba/numba,gdementen/numba,stefanseefeld/numba,jriehl/numba,stonebig/numba,stonebig/numba,gmarkall/numba,pombredanne/numba,jriehl/numba,pitrou/numba,gdementen/numba,seibert/numba,stefanseefeld/numba,ssarangi/numba,cpcloud/numba,cpcloud/numba,gdementen/numba,IntelLabs/numba,pombredanne/numba,GaZ3ll3/numba,stuartarchibald/numba,sklam/numba,gmarkall/numba,seibert/numba,stonebig/numba,gmarkall/numba,seibert/numba,numba/numba,pombredanne/numba,gdementen/numba,stefanseefeld/numba,numba/numba,seibert/numba,stonebig/numba,sklam/numba,gdementen/numba,IntelLabs/numba,ssarangi/numba,sklam/numba,GaZ3ll3/numba | numba/tests/issues/test_issue_169.py | numba/tests/issues/test_issue_169.py | # -*- coding: utf-8 -*-
"""
Test binding of autojit methods.
"""
from __future__ import print_function, division, absolute_import
from numba import *
class A(object):
@autojit
def a(self, arg):
return self * arg
def __mul__(self, other):
return 10 * other
assert A().a(10) == 100
| bsd-2-clause | Python |
|
d523ea99145941f35c00aecfbcdc18101645358b | Add tests for feed import | stadtgestalten/stadtgestalten,stadtgestalten/stadtgestalten,stadtgestalten/stadtgestalten | features/imports/tests/test_feed.py | features/imports/tests/test_feed.py | import datetime
from django.conf import settings
import django.utils.timezone
from core import tests
from features.associations import models as associations
from features.memberships.test_mixins import MemberMixin, OtherMemberMixin
from ..management.commands.import_feeds import (
import_from_feed, parse_feed_url_from_website_content)
FEED_LINK_EXAMPLES = (
('<link rel="alternate" type="application/rss+xml" title="Peter Weiss Haus » Feed" '
'href="https://peterweisshaus.de/feed/" />"', 'https://peterweisshaus.de/feed/'),
)
FEED_CONTENT_TEMPLATE = (
'<?xml version="1.0" encoding="UTF-8"?><rss version="2.0">'
'<channel><title>Channel Title</title><link>https://example.org</link>'
'<item><title>{title}</title><link>{url}</link><pubDate>{date}</pubDate>'
'<description>{description}</description><content>Some Content</content>'
'</item></channel></rss>')
class DetectFeedURL(tests.Test):
def test_references(self):
for snippet, feed_url in FEED_LINK_EXAMPLES:
with self.subTest(feed_url=feed_url):
self.assertEqual(parse_feed_url_from_website_content(snippet), feed_url)
class ImportFeedItems(MemberMixin, OtherMemberMixin, tests.Test):
FEED_DEFAULTS = {
"title": "First Title", "url": "https://example.org/1/2",
"date": "Tue, 05 Jun 2018 07:55:15 +0000", "description": "Some Description",
"content": "Some Content",
}
def setUp(self):
super().setUp()
settings.GROUPRISE["FEED_IMPORTER_GESTALT_ID"] = self.other_gestalt.id
def _get_now(self):
tz = django.utils.timezone.get_current_timezone()
return datetime.datetime.now(tz=tz)
def _get_feed_content(self, **kwargs):
data = dict(self.FEED_DEFAULTS)
data.update(kwargs)
if "date" not in data:
data["date"] = self._get_now()
if isinstance(data["date"], datetime.datetime):
data["date"] = data["date"].strftime("%a, %d %b %Y %H:%M:%S %z").strip()
return FEED_CONTENT_TEMPLATE.format(**data)
def test_content(self):
now = self._get_now()
import_from_feed(self._get_feed_content(date=now), self.gestalt, self.group)
# TODO: check timezone correctness ("hour" is omitted below)
self.assertExists(
associations.Association,
content__title="First Title",
content__versions__time_created__year=now.year,
content__versions__time_created__month=now.month,
content__versions__time_created__day=now.day,
content__versions__time_created__minute=now.minute,
content__versions__time_created__second=now.second,
)
def test_ignore_duplicates(self):
import_from_feed(self._get_feed_content(), self.gestalt, self.group)
self.assertEqual(associations.Association.objects.count(), 1)
import_from_feed(self._get_feed_content(), self.gestalt, self.group)
self.assertEqual(associations.Association.objects.count(), 1)
| agpl-3.0 | Python |
|
5617038f4ec48915411ec5ce4bf5ae2df98e9e0e | Add dodo dockerkill command | mnieber/dodo_commands | dodo_commands/extra/standard_commands/dockerkill.py | dodo_commands/extra/standard_commands/dockerkill.py | # noqa
from dodo_commands.system_commands import DodoCommand
from plumbum.cmd import docker
from six.moves import input as raw_input
class Command(DodoCommand): # noqa
help = ""
def _containers(self):
result = []
for line in docker("ps", "--format", "{{.ID}} {{.Names}} {{.Image}}").split('\n'):
if line:
cid, name, image = line.split()
result.append(dict(name=name, cid=cid, image=image))
return result
def handle_imp(self, **kwargs): # noqa
while True:
containers = self._containers()
print("0 - exit")
for idx, container in enumerate(containers):
print("%d - %s" % (idx + 1, container['name']))
print("999 - all of the above")
print("\nSelect a container: ")
raw_choice = int(raw_input())
kill_all = raw_choice == 999
choice = raw_choice - 1
if choice == -1:
return
elif kill_all:
pass
else:
containers = [containers[choice]]
for container in containers:
self.runcmd(
['docker', 'kill', container['cid']],
)
if kill_all:
return
| mit | Python |
|
37ab6b858b6f115bef7a6500a8c81da161e4c659 | Add normal list params to list method for telemetry statistics | dudymas/python-openstacksdk,mtougeron/python-openstacksdk,dtroyer/python-openstacksdk,briancurtin/python-openstacksdk,dudymas/python-openstacksdk,mtougeron/python-openstacksdk,openstack/python-openstacksdk,briancurtin/python-openstacksdk,openstack/python-openstacksdk,stackforge/python-openstacksdk,stackforge/python-openstacksdk,dtroyer/python-openstacksdk | openstack/telemetry/v2/statistics.py | openstack/telemetry/v2/statistics.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import resource
from openstack.telemetry import telemetry_service
class Statistics(resource.Resource):
id_attribute = 'meter_name'
resource_key = 'statistics'
base_path = '/meters/%(meter_name)s/statistics'
service = telemetry_service.TelemetryService()
# Supported Operations
allow_list = True
# Path Parameter
meter_name = resource.prop('meter_name')
# Properties
aggregate = resource.prop('aggregate')
avg = resource.prop('avg')
count = resource.prop('count')
duration = resource.prop('duration')
duration_end = resource.prop('duration_end')
duration_start = resource.prop('duration_start')
group_by = resource.prop('groupby')
max = resource.prop('max')
min = resource.prop('min')
period = resource.prop('period')
period_end = resource.prop('period_end')
period_start = resource.prop('period_start')
sum = resource.prop('sum')
unit = resource.prop('unit')
@classmethod
def list(cls, session, limit=None, marker=None, path_args=None,
paginated=False, **params):
url = cls._get_url(path_args)
for stat in session.get(url, service=cls.service, params=params).body:
yield cls.existing(**stat)
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import resource
from openstack.telemetry import telemetry_service
class Statistics(resource.Resource):
id_attribute = 'meter_name'
resource_key = 'statistics'
base_path = '/meters/%(meter_name)s/statistics'
service = telemetry_service.TelemetryService()
# Supported Operations
allow_list = True
# Path Parameter
meter_name = resource.prop('meter_name')
# Properties
aggregate = resource.prop('aggregate')
avg = resource.prop('avg')
count = resource.prop('count')
duration = resource.prop('duration')
duration_end = resource.prop('duration_end')
duration_start = resource.prop('duration_start')
group_by = resource.prop('groupby')
max = resource.prop('max')
min = resource.prop('min')
period = resource.prop('period')
period_end = resource.prop('period_end')
period_start = resource.prop('period_start')
sum = resource.prop('sum')
unit = resource.prop('unit')
@classmethod
def list(cls, session, path_args=None, paginated=False, **params):
url = cls._get_url(path_args)
for stat in session.get(url, service=cls.service, params=params).body:
yield cls.existing(**stat)
| apache-2.0 | Python |
1b217b7990453b4b20c8d255b27825971a32092c | add testing python script for tring out | patelkunal/dotfiles,patelkunal/dotfiles | python/setup.py | python/setup.py | from sys import argv
# main method as entry point
def main(arg_inputs):
pass
if __name__ == "__main__":
main(argv[1])
pass
| apache-2.0 | Python |
|
5dba05e3012b2004fd63f29200ba83b36529da41 | add caesar cipher python exercise | mddenton/Projects-my,mddenton/Projects-my | Text/caesar_cipher.py | Text/caesar_cipher.py | #Global constants for menu choices
SHIFT_ONE = 1
SHIFT_TWO = 2
def caesar(plaintext, shift):
alphabet=["a","b","c","d","e","f","g","h","i","j","k","l",
"m","n","o","p","q","r","s","t","u","v","w","x","y","z","A","B",
"C","D""E","F","G","H","I","J","K","L","M","N","O","P",
"Q","R","S","T","U","V","W","X","Y","Z"]
#Create our substitution dictionary
dic={}
for i in range(0,len(alphabet)):
dic[alphabet[i]]=alphabet[(i+shift)%len(alphabet)]
#Convert each letter of plaintext to the corresponding
#encrypted letter in our dictionary creating the cryptext
caesartext=""
for l in plaintext:
if plaintext.isupper():
uppercase = True
else:
uppercase = False
for l in plaintext:
if uppercase:
l = l.upper()
l=dic[l]
elif l in dic:
l=dic[l]
caesartext+=l
return caesartext
#Get choice
def main():
user = 0
user = get_menu_choice ()
if user == SHIFT_ONE:
plaintext=input("Enter your text to be coded: ")
print ("Plaintext:", plaintext )
print ("Caesartext:",caesar(plaintext,1))
elif user ==SHIFT_TWO:
plaintext=input("Enter your text to be coded: ")
print ("Plaintext:", plaintext )
print ("Caesartext:",caesar(plaintext,2))
def get_menu_choice():
user=int(input("For one positive shift, enter 1; for two positive shifts enter 2: "))
return user
#"Now is the time for all good men to come to the aid of their country"
#"The quick brown fox jumps over the lazy dog"
main()
| mit | Python |
|
f901632651fa0d177a3ba7fc99504aa874eb48b8 | add watcher | thomasjsn/python-sios | watcher.py | watcher.py | class Watcher:
def __init__(self, topic, client):
self.value = None
self.topic = topic
self.client = client
def set_value(self, new_value):
if self.value != new_value:
self.value = new_value
self.change()
def change(self):
print('change: {} to {}'.format(self.topic, self.value))
self.client.publish(self.topic, self.value)
| mit | Python |
|
679e969e1cab73139406bdeb5a2f6d03757a89af | Allow Authentication header in CORS | SilentCircle/sentry,BayanGroup/sentry,mitsuhiko/sentry,wujuguang/sentry,pauloschilling/sentry,imankulov/sentry,Kryz/sentry,ifduyue/sentry,hongliang5623/sentry,JackDanger/sentry,wujuguang/sentry,TedaLIEz/sentry,daevaorn/sentry,wong2/sentry,alexm92/sentry,JTCunning/sentry,camilonova/sentry,wong2/sentry,chayapan/django-sentry,pauloschilling/sentry,mvaled/sentry,gencer/sentry,llonchj/sentry,gg7/sentry,rdio/sentry,gencer/sentry,looker/sentry,daevaorn/sentry,chayapan/django-sentry,looker/sentry,kevinlondon/sentry,nicholasserra/sentry,rdio/sentry,Natim/sentry,ewdurbin/sentry,JamesMura/sentry,SilentCircle/sentry,beeftornado/sentry,JackDanger/sentry,jean/sentry,gg7/sentry,jokey2k/sentry,daevaorn/sentry,jokey2k/sentry,korealerts1/sentry,jean/sentry,zenefits/sentry,ifduyue/sentry,vperron/sentry,drcapulet/sentry,vperron/sentry,gg7/sentry,argonemyth/sentry,boneyao/sentry,rdio/sentry,nicholasserra/sentry,drcapulet/sentry,looker/sentry,BuildingLink/sentry,fotinakis/sentry,ngonzalvez/sentry,JamesMura/sentry,looker/sentry,BuildingLink/sentry,NickPresta/sentry,ngonzalvez/sentry,kevinlondon/sentry,alexm92/sentry,BuildingLink/sentry,JamesMura/sentry,songyi199111/sentry,mvaled/sentry,BuildingLink/sentry,vperron/sentry,felixbuenemann/sentry,ewdurbin/sentry,beni55/sentry,JamesMura/sentry,fotinakis/sentry,jean/sentry,gencer/sentry,BayanGroup/sentry,beeftornado/sentry,BayanGroup/sentry,kevinlondon/sentry,boneyao/sentry,JackDanger/sentry,Kryz/sentry,imankulov/sentry,chayapan/django-sentry,korealerts1/sentry,JTCunning/sentry,beni55/sentry,mvaled/sentry,JTCunning/sentry,camilonova/sentry,beni55/sentry,beeftornado/sentry,imankulov/sentry,hongliang5623/sentry,TedaLIEz/sentry,gencer/sentry,Kryz/sentry,boneyao/sentry,argonemyth/sentry,fotinakis/sentry,alex/sentry,alexm92/sentry,SilentCircle/sentry,rdio/sentry,looker/sentry,ngonzalvez/sentry,ewdurbin/sentry,wujuguang/sentry,ifduyue/sentry,daevaorn/sentry,fotinakis/sentry,mvaled/sentry,kevinastone/sentry,korealerts1/sentry,zenefits/sentry,1tush/sentry,mvaled/sentry,pauloschilling/sentry,1tush/sentry,ifduyue/sentry,alex/sentry,zenefits/sentry,argonemyth/sentry,zenefits/sentry,zenefits/sentry,felixbuenemann/sentry,gencer/sentry,NickPresta/sentry,drcapulet/sentry,BuildingLink/sentry,llonchj/sentry,kevinastone/sentry,mitsuhiko/sentry,JamesMura/sentry,songyi199111/sentry,alex/sentry,fuziontech/sentry,hongliang5623/sentry,mvaled/sentry,felixbuenemann/sentry,SilentCircle/sentry,jokey2k/sentry,llonchj/sentry,nicholasserra/sentry,fuziontech/sentry,kevinastone/sentry,camilonova/sentry,NickPresta/sentry,songyi199111/sentry,ifduyue/sentry,jean/sentry,1tush/sentry,jean/sentry,TedaLIEz/sentry,Natim/sentry,Natim/sentry,NickPresta/sentry,wong2/sentry,fuziontech/sentry | sentry/utils/http.py | sentry/utils/http.py | """
sentry.utils.http
~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import urllib
from urlparse import urlparse
from sentry.conf import settings
from sentry.plugins.helpers import get_option
def safe_urlencode(params, doseq=0):
"""
UTF-8-safe version of safe_urlencode
The stdlib safe_urlencode prior to Python 3.x chokes on UTF-8 values
which can't fail down to ascii.
"""
# Snippet originally from pysolr: https://github.com/toastdriven/pysolr
if hasattr(params, "items"):
params = params.items()
new_params = list()
for k, v in params:
k = k.encode("utf-8")
if isinstance(v, basestring):
new_params.append((k, v.encode("utf-8")))
elif isinstance(v, (list, tuple)):
new_params.append((k, [i.encode("utf-8") for i in v]))
else:
new_params.append((k, unicode(v)))
return urllib.urlencode(new_params, doseq)
def is_same_domain(url1, url2):
"""
Returns true if the two urls should be treated as if they're from the same
domain (trusted).
"""
url1 = urlparse(url1)
url2 = urlparse(url2)
return url1.netloc == url2.netloc
def apply_access_control_headers(response, project=None):
"""
Provides the Access-Control headers to enable cross-site HTTP requests. You
can find more information about these headers here:
https://developer.mozilla.org/En/HTTP_access_control#Simple_requests
"""
origin = settings.ALLOW_ORIGIN or ''
if project and origin is not '*':
optval = get_option('sentry:origins', project)
if optval:
origin = ('%s %s' % (origin, ' '.join(optval))).strip()
if origin:
response['Access-Control-Allow-Origin'] = origin
response['Access-Control-Allow-Headers'] = 'X-Sentry-Auth, Authentication'
response['Access-Control-Allow-Methods'] = 'POST'
return response
| """
sentry.utils.http
~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import urllib
from urlparse import urlparse
from sentry.conf import settings
from sentry.plugins.helpers import get_option
def safe_urlencode(params, doseq=0):
"""
UTF-8-safe version of safe_urlencode
The stdlib safe_urlencode prior to Python 3.x chokes on UTF-8 values
which can't fail down to ascii.
"""
# Snippet originally from pysolr: https://github.com/toastdriven/pysolr
if hasattr(params, "items"):
params = params.items()
new_params = list()
for k, v in params:
k = k.encode("utf-8")
if isinstance(v, basestring):
new_params.append((k, v.encode("utf-8")))
elif isinstance(v, (list, tuple)):
new_params.append((k, [i.encode("utf-8") for i in v]))
else:
new_params.append((k, unicode(v)))
return urllib.urlencode(new_params, doseq)
def is_same_domain(url1, url2):
"""
Returns true if the two urls should be treated as if they're from the same
domain (trusted).
"""
url1 = urlparse(url1)
url2 = urlparse(url2)
return url1.netloc == url2.netloc
def apply_access_control_headers(response, project=None):
"""
Provides the Access-Control headers to enable cross-site HTTP requests. You
can find more information about these headers here:
https://developer.mozilla.org/En/HTTP_access_control#Simple_requests
"""
origin = settings.ALLOW_ORIGIN or ''
if project and origin is not '*':
optval = get_option('sentry:origins', project)
if optval:
origin = ('%s %s' % (origin, ' '.join(optval))).strip()
if origin:
response['Access-Control-Allow-Origin'] = origin
response['Access-Control-Allow-Headers'] = 'X-Sentry-Auth'
response['Access-Control-Allow-Methods'] = 'POST'
return response
| bsd-3-clause | Python |
553825a2bd5db860d2842a8f72b0142e36d61ba0 | build the very basic web server->app.py | mookaka/mywebblog,mookaka/mywebblog | www/app.py | www/app.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'mookaka'
import logging
import asyncio
from aiohttp import web
logging.basicConfig(level=logging.INFO)
def index(request):
return web.Response(body=b'<h1>Awesome!</h1>')
async def init(loop):
app = web.Application(loop=loop)
app.router.add_route('GET', '/', index)
svr = await loop.create_server(app.make_handler(), '127.0.0.1', 9000)
logging.info('we have run the tiny web server successfully!')
return svr
loop = asyncio.get_event_loop()
loop.run_until_complete(init(loop))
loop.run_forever() | mit | Python |
|
e9e845b33891f50834e9b8bfb1796e43e9faac81 | Create complete_the_pattern_#9.py | Kunalpod/codewars,Kunalpod/codewars | complete_the_pattern_#9.py | complete_the_pattern_#9.py | #Kunal Gautam
#Codewars : @Kunalpod
#Problem name: Complete The Pattern #9 - Diamond
#Problem level: 6 kyu
def pattern(n):
top = '\n'.join(' '*(n-i) + ''.join(str(j%10) for j in range(1, i+1)) + ''.join(str(j%10) for j in list(range(1,i))[::-1]) + ' '*(n-i) for i in range(1, n+1))
bottom = '\n'.join(' '*(n-i) + ''.join(str(j%10) for j in range(1, i+1)) + ''.join(str(j%10) for j in list(range(1,i))[::-1]) + ' '*(n-i) for i in list(range(1, n))[::-1])
return top + '\n' + bottom if bottom else top
| mit | Python |
|
37061643f4e416c8926411229a2e4d2737cef2e5 | Create sportability2shutterfly.py | pythonbag/scripts,pythonbag/scripts | sportability2shutterfly.py | sportability2shutterfly.py | #!/usr/bin/python
#
# convert sportability.com player info to shutterfly.com format.
#
import sys
import csv
def pullContact(list,row,num):
if row["Parent"+num+"_FirstName"] != "" and row["Parent1_LastName"] != "":
key=row["Parent"+num+"_FirstName"]+row["Parent1_LastName"]
if key not in list:
data = {"FirstName":row["Parent"+num+"_FirstName"],
"LastName":row["Parent"+num+"_LastName"],
"HomePhone":row["Phone"],
"CellPhone":row["Parent"+num+"_Phone"],
"Email":row["Parent"+num+"_Email"],
"Address":row["Parent"+num+"_Address"],
"City":row["Parent"+num+"_City"],
"State":row["Parent"+num+"_State"],
"Zip":row["Parent"+num+"_Zip"]}
list[key]=data
return list
def csv_reader(filename):
# with open(filename) as f_obj:
# reader = csv.DictReader(f_obj, delimiter=',', quotechar='|')
reader = csv.DictReader(open (filename))
list = {}
for row in reader:
list = pullContact(list,row,"1")
list = pullContact(list,row,"2")
print list
if __name__ == "__main__":
csv_path = "./playersExtended.csv"
csv_reader(csv_path)
| apache-2.0 | Python |
|
2bc33138e7e110486f98145548b05da65577491c | Fix test set up | edofic/ggrc-core,jmakov/ggrc-core,AleksNeStu/ggrc-core,prasannav7/ggrc-core,AleksNeStu/ggrc-core,NejcZupec/ggrc-core,VinnieJohns/ggrc-core,kr41/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,hyperNURb/ggrc-core,prasannav7/ggrc-core,hasanalom/ggrc-core,j0gurt/ggrc-core,hyperNURb/ggrc-core,j0gurt/ggrc-core,hyperNURb/ggrc-core,hasanalom/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,edofic/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,selahssea/ggrc-core,NejcZupec/ggrc-core,NejcZupec/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,andrei-karalionak/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,edofic/ggrc-core,prasannav7/ggrc-core,j0gurt/ggrc-core,hasanalom/ggrc-core,hasanalom/ggrc-core,hyperNURb/ggrc-core,hasanalom/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,andrei-karalionak/ggrc-core,jmakov/ggrc-core,NejcZupec/ggrc-core,josthkko/ggrc-core,selahssea/ggrc-core,josthkko/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,hyperNURb/ggrc-core,jmakov/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,plamut/ggrc-core,jmakov/ggrc-core,VinnieJohns/ggrc-core,VinnieJohns/ggrc-core,prasannav7/ggrc-core,kr41/ggrc-core,jmakov/ggrc-core | src/tests/ggrc/__init__.py | src/tests/ggrc/__init__.py | # Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: david@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
import os
import logging
from flask.ext.testing import TestCase as BaseTestCase
from ggrc import db
from ggrc.app import app
from ggrc.models import create_db
if os.environ.get('TRAVIS', False):
db.engine.execute("DROP DATABASE IF EXISTS ggrcdevtest;")
db.engine.execute("CREATE DATABASE ggrcdevtest; USE ggrcdevtest;")
create_db(use_migrations=True, quiet=True)
# Hide errors during testing. Errors are still displayed after all tests are
# done. This is for the bad request error messages while testing the api calls.
logging.disable(logging.CRITICAL)
class TestCase(BaseTestCase):
@classmethod
def clear_data(cls):
ignore_tables = (
"test_model", "roles", "notification_types", "object_types", "options",
"categories",
)
tables = set(db.metadata.tables).difference(ignore_tables)
for _ in range(len(tables)):
if len(tables) == 0:
break # stop the loop once all tables have been deleted
for table in reversed(db.metadata.sorted_tables):
if table.name not in ignore_tables:
try:
db.engine.execute(table.delete())
tables.remove(table.name)
except:
pass
db.session.commit()
def setUp(self):
# this is a horrible hack because db.metadata.sorted_tables does not sort
# by dependencies. Events table is before Person table - reversed is bad.
self.clear_data()
# if getattr(settings, 'MEMCACHE_MECHANISM', False) is True:
# from google.appengine.api import memcache
# from google.appengine.ext import testbed
# self.testbed = testbed.Testbed()
# self.testbed.activate()
# self.testbed.init_memcache_stub()
def tearDown(self):
db.session.remove()
# if getattr(settings, 'MEMCACHE_MECHANISM', False) is True:
# from google.appengine.api import memcache
# from google.appengine.ext import testbed
# self.testbed.deactivate()
def create_app(self):
app.config["SERVER_NAME"] = "localhost"
app.testing = True
app.debug = False
return app
| # Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: david@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
import os
import logging
from flask.ext.testing import TestCase as BaseTestCase
from ggrc import db
from ggrc.app import app
from ggrc.models import create_db
if os.environ.get('TRAVIS', False):
db.engine.execute("DROP DATABASE IF EXISTS ggrcdevtest;")
db.engine.execute("CREATE DATABASE ggrcdevtest; USE ggrcdevtest;")
create_db(use_migrations=True, quiet=True)
# Hide errors during testing. Errors are still displayed after all tests are
# done. This is for the bad request error messages while testing the api calls.
logging.disable(logging.CRITICAL)
class TestCase(BaseTestCase):
@classmethod
def clear_data(cls):
ignore_tables = (
"test_model", "roles", "notification_types", "object_types", "options"
)
tables = set(db.metadata.tables).difference(ignore_tables)
for _ in range(len(tables)):
if len(tables) == 0:
break # stop the loop once all tables have been deleted
for table in reversed(db.metadata.sorted_tables):
if table.name not in ignore_tables:
try:
db.engine.execute(table.delete())
tables.remove(table.name)
except:
pass
db.session.commit()
def setUp(self):
# this is a horrible hack because db.metadata.sorted_tables does not sort
# by dependencies. Events table is before Person table - reversed is bad.
self.clear_data()
# if getattr(settings, 'MEMCACHE_MECHANISM', False) is True:
# from google.appengine.api import memcache
# from google.appengine.ext import testbed
# self.testbed = testbed.Testbed()
# self.testbed.activate()
# self.testbed.init_memcache_stub()
def tearDown(self):
db.session.remove()
# if getattr(settings, 'MEMCACHE_MECHANISM', False) is True:
# from google.appengine.api import memcache
# from google.appengine.ext import testbed
# self.testbed.deactivate()
def create_app(self):
app.config["SERVER_NAME"] = "localhost"
app.testing = True
app.debug = False
return app
| apache-2.0 | Python |
8e55abfd68de915c9db75b3385033c97d85b191d | Implement text vectorizer | otknoy/ExTAT,otknoy/ExTAT | nlp/TextVectorizer.py | nlp/TextVectorizer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
import utils
class Vectorizer:
def __init__(self, tokenizer, texts):
self.tokenizer = tokenizer
self.texts = texts
class TfVectorizer(Vectorizer):
def vectorize(self):
docs = [self.tokenizer(t.encode('utf-8')) for t in self.texts]
return [utils.term_frequency(d, normalize=False) for d in docs]
class TfidfVectorizer(Vectorizer):
def vectorize(self):
docs = [self.tokenizer(t.encode('utf-8')) for t in self.texts]
return utils.tf_idf(docs, normalize=True)
if __name__ == '__main__':
texts = [u'Emacs(イーマックス)とは高機能でカスタマイズ性の高いテキストエディタである。',
u'vi(ヴィーアイ)は、Emacsと共にUNIX環境で人気があるテキストエディタ。',
u'nanoは、UNIXを中心としたシステムで使われる、cursesを使ったテキストエディタの一種である。']
import tokenizer
print 'TF'
v = TfVectorizer(tokenizer=tokenizer.tokenizeJp, texts=texts)
tf_list = v.vectorize()
for i in range(len(tf_list)):
tf = tf_list[i]
ranking = sorted(tf.items(), key=lambda x:-x[1])[:10]
print "[%d]" % (i+1), ', '.join(map(lambda e: "%s: %.2f" % e, ranking))
print
print 'TFIDF'
v = TfidfVectorizer(tokenizer=tokenizer.tokenizeJp, texts=texts)
tfidf_list = v.vectorize()
for i in range(len(tfidf_list)):
tfidf = tfidf_list[i]
ranking = sorted(tfidf.items(), key=lambda x:-x[1])[:10]
print "[%d]" % (i+1), ', '.join(map(lambda e: "%s: %.2f" % e, ranking))
| mit | Python |
|
b08cbbf353da0e84f0f1a160de5e79d5d05c0ea6 | add gensim utils | totalgood/nlpia,totalgood/nlpia,totalgood/nlpia | nlpia/gensim_utils.py | nlpia/gensim_utils.py | from __future__ import print_function, unicode_literals, division, absolute_import
from future import standard_library
standard_library.install_aliases() # noqa
from builtins import object # noqa
# from gensim.models import Word2Vec
from gensim import corpora
from gensim import utils
from nlpia.constants import logging
logger = logging.getLogger(__name__)
def tokens2ngrams(tokens, n=2):
tokens = list(tokens)
ngrams = []
for i in range(len(tokens) - n + 1):
ngrams.append(' '.join(tokens[i:i + n]))
return ngrams
def passthrough(*args, **kwargs):
return args[0] if len(args) else list(kwargs.values())[0]
def return_false(*args, **kwargs):
return False
def return_true(*args, **kwargs):
return True
def noop(*args, **kwargs):
pass
def return_none(*args, **kwargs):
pass
class TweetCorpus(corpora.TextCorpus):
ignore_matcher = return_none # compiled regular expression for token matches to skip/ignore
num_grams = 2
case_normalizer = utils.to_unicode
tokenizer = str.split
mask = None
def get_texts(self):
""" Parse documents from a .txt file assuming 1 document per line, yielding lists of filtered tokens """
with self.getstream() as text_stream:
for i, line in enumerate(text_stream):
if self.mask is not None and not self.mask[i]:
continue
ngrams = []
for ng in tokens2ngrams(self.tokenizer(self.case_normalizer(line))):
if self.ignore_matcher(ng):
continue
ngrams += [ng]
yield ngrams
def __len__(self):
""" Enables `len(corpus)` """
if 'length' not in self.__dict__:
logger.info("Computing the number of lines in the corpus size (calculating number of documents)")
self.length = sum(1 for doc in self.get_stream())
return self.length
class SMSCorpus(corpora.TextCorpus):
ignore_matcher = return_none # compiled regular expression for token matches to skip/ignore
num_grams = 2
case_normalizer = utils.to_unicode
tokenizer = str.split
mask = None
def get_texts(self):
""" Parse documents from a .txt file assuming 1 document per line, yielding lists of filtered tokens """
with self.getstream() as text_stream:
for i, line in enumerate(text_stream):
if self.mask is not None and not self.mask[i]:
continue
ngrams = []
for ng in tokens2ngrams(self.tokenizer(self.case_normalizer(line))):
if self.ignore_matcher(ng):
continue
ngrams += [ng]
yield ngrams
def __len__(self):
""" Enables `len(corpus)` """
if 'length' not in self.__dict__:
logger.info("Computing the number of lines in the corpus size (calculating number of documents)")
self.length = sum(1 for doc in self.getstream())
return self.length
| mit | Python |
|
5d28840117020821d5f8e0ea6c3214d1180808a0 | Add code I wrote on the train as a starting point | Bemmu/DecisionTree | decisiontree.py | decisiontree.py | S = [
{'Hearing loss':'No', 'Injury':'No', 'Frequency of vertigo attacks':'0', 'classes' : {'not-BPV':3, 'BPV':0}},
{'Hearing loss':'No', 'Injury':'No', 'Frequency of vertigo attacks':'1', 'classes' : {'not-BPV':59, 'BPV':0}},
{'Hearing loss':'No', 'Injury':'No', 'Frequency of vertigo attacks':'2', 'classes' : {'not-BPV':1, 'BPV':55}},
{'Hearing loss':'No', 'Injury':'Yes', 'Frequency of vertigo attacks':'2', 'classes' : {'not-BPV':21, 'BPV':1}},
{'Hearing loss':'Yes', 'Injury':'No', 'Frequency of vertigo attacks':'0', 'classes' : {'not-BPV':63, 'BPV':0}},
{'Hearing loss':'Yes', 'Injury':'No', 'Frequency of vertigo attacks':'1', 'classes' : {'not-BPV':28, 'BPV':0}},
{'Hearing loss':'Yes', 'Injury':'No', 'Frequency of vertigo attacks':'2', 'classes' : {'not-BPV':234, 'BPV':0}},
{'Hearing loss':'Yes', 'Injury':'Yes', 'Frequency of vertigo attacks':'1', 'classes' : {'not-BPV':1, 'BPV':0}},
{'Hearing loss':'Yes', 'Injury':'Yes', 'Frequency of vertigo attacks':'2', 'classes' : {'not-BPV':30, 'BPV':0}}
]
import math
memo_h = {}
pcalcs = ""
# Expected information needed to classify an arbitrary case in S
def H(S):
if str(S) in memo_h:
return memo_h[str(S)]
s = sum([sum(_case['classes'].values()) for _case in S])
def p(s, _class):
global pcalcs
su = 0
for _case in S:
su += _case['classes'][_class]
line = "p(%s) = %s/%s = %.2f\n" % (_class, su, s, su/float(s))
print line
return su/float(s)
classes = S[0]['classes'].keys()
result = -sum([p(s, Ci)*math.log(p(s, Ci),2) for Ci in classes if p(s, Ci) != 0])
# Now just explain the result
print "H(S) = -(",
for i, Ci in enumerate(classes):
if p(s, Ci) == 0: continue
if i > 0: print "+",
print "%.2f * log2(%.2f)" % (p(s, Ci), p(s, Ci)),
print ") = %.2f" % result
memo_h[str(S)] = result
return result
# OK, so I can compute H(S) now and it even matches my previous calculation.
memo_h_given_aj = {}
# Consider only those cases having the value Aj for attr A
def H_given_Aj(S, A, Aj):
memo_key = (str(S), A, Aj)
if memo_key in memo_h_given_aj:
return memo_h_given_aj[memo_key]
S = [_case for _case in S if _case[A] == Aj]
result = H(S)
memo_h_given_aj[memo_key] = result
print "H_given_Aj %s = %s is %.2f" % (A, Aj, result)
return result
memo = {}
def H_for_attribute(S, A):
if (len(S), A) in memo:
return memo[len(S), A]
s = sum([sum(_case['classes'].values()) for _case in S])
s_for_Aj = lambda Aj: sum([sum(_case['classes'].values()) for _case in S if _case[A] == Aj])
# p(Aj) is the relative frequency of the cases having
# value Aj for the attribute A in the set S
p = lambda Aj:s_for_Aj(Aj) / float(s)
# All the different values attribute A could have
vals = set([_case[A] for _case in S])
# Pre-memoize
for Aj in vals:
p(Aj)*H_given_Aj(S, A, Aj)
print "H_for_attribute %s = sum(" % A,
result = 0
for i, Aj in enumerate(vals):
if i > 0: print "+",
print "%.2f * %.2f" % (p(Aj), H_given_Aj(S, A, Aj)),
part = p(Aj)*H_given_Aj(S, A, Aj)
result += part
print ")"
memo[len(S), A] = result
return result
def I(S, A):
print "I(C|%s) = %.2f - %.2f = %.2f" % (A, H(S), H_for_attribute(S, A), H(S) - H_for_attribute(S, A))
return H(S) - H_for_attribute(S, A)
print H(S)
for attr in [key for key in S[0].keys() if key != 'classes']:
print I(S, attr)
print pcalcs | mit | Python |
|
01c0518d88d3b1a6919f9841752eb676bed8f68a | Create test.py | jon-mqn/genealogical_record_linkage | scripts/test.py | scripts/test.py | print "hello world"
| unlicense | Python |
|
427d1bce56c2f5ce7f34ec0816e183fb4aee130f | Create ConvLSTMCell.py | carlthome/tensorflow-convlstm-cell | ConvLSTMCell.py | ConvLSTMCell.py | from tensorflow.python.ops.variable_scope import variable_scope, get_variable
from tensorflow.python.ops.init_ops import constant_initializer
from tensorflow.python.ops.math_ops import tanh, sigmoid
from tensorflow.python.ops.rnn_cell import RNNCell, LSTMStateTuple
from tensorflow.python.ops.nn_ops import conv2d, conv3d
from tensorflow.python.ops.array_ops import concat, split, reshape, zeros
class ConvLSTMCell(RNNCell):
"""A LSTM cell with convolutions instead of multiplications.
Reference:
Xingjian, S. H. I., et al. "Convolutional LSTM network: A machine learning approach for precipitation nowcasting." Advances in Neural Information Processing Systems. 2015.
"""
def __init__(self, filters, height, width, channels, kernel=[3, 3], forget_bias=1.0, activation=tanh):
self._kernel = kernel
self._num_units = filters
self._height = height
self._width = width
self._channels = channels
self._forget_bias = forget_bias
self._activation = activation
@property
def state_size(self):
size = self._height * self._width * self._num_units
return LSTMStateTuple(size, size)
@property
def output_size(self):
return self._height * self._width * self._num_units
def zero_state(self, batch_size, dtype):
shape = [batch_size, self._height * self._width * self._num_units]
memory = zeros(shape, dtype=dtype)
output = zeros(shape, dtype=dtype)
return LSTMStateTuple(memory, output)
def __call__(self, input, state, scope=None):
"""Convolutional long short-term memory cell (ConvLSTM)."""
with variable_scope(scope or 'ConvLSTMCell'):
previous_memory, previous_output = state
with variable_scope('Expand'):
batch_size = int(previous_memory.get_shape()[0])
shape = [batch_size, self._height, self._width, self._num_units]
input = reshape(input, shape)
previous_memory = reshape(previous_memory, shape)
previous_output = reshape(previous_output, shape)
with variable_scope('Convolve'):
x = concat(3, [input, previous_output])
W = get_variable('Weights', self._kernel + [2 * self._num_units, 4 * self._num_units])
b = get_variable('Biases', [4 * self._num_units], initializer=constant_initializer(0.0))
y = conv2d(x, W, [1, 1, 1, 1], 'SAME') + b
input_gate, new_input, forget_gate, output_gate = split(3, 4, y)
with variable_scope('LSTM'):
memory = (previous_memory
* sigmoid(forget_gate + self._forget_bias)
+ sigmoid(input_gate) * self._activation(new_input))
output = self._activation(memory) * sigmoid(output_gate)
with variable_scope('Flatten'):
shape = [-1, self._height * self._width * self._num_units]
output = reshape(output, shape)
memory = reshape(memory, shape)
return output, LSTMStateTuple(memory, output)
def convolve_inputs(inputs, batch_size, height, width, channels, filters):
W = get_variable('Weights', [1, 1, 1] + [channels, filters])
b = get_variable('Biases', [filters], initializer=constant_initializer(0.0))
y = conv3d(inputs, W, [1] * 5, 'SAME') + b
return reshape(y, [batch_size, -1, height * width * filters])
def expand_outputs(outputs, batch_size, height, width, filters):
return reshape(outputs, [batch_size, -1, height, width, filters])
| mit | Python |
|
ffa1a6711a616582c38ffeeef47df9f6ff272fe2 | Create DAGUtilities.py | mpsommer/Schedulator | DAGUtilities.py | DAGUtilities.py | '''
Created on Apr 6, 2016
@author: Noah Higa
'''
import sys
from Job import Job
def newJob(identity):
aNewJob = Job(identity, 1, 1, 1, 1, 1, 1, 1, 1)
return aNewJob
def printEdges(G):
edges = G.edges()
for edge in edges:
sys.stdout.write('(')
sys.stdout.write(edge[0].__str__())
sys.stdout.write(',')
sys.stdout.write(edge[1].__str__())
sys.stdout.write(') ')
print ' '
def printNodes(G):
nodes = G.nodes()
for node in nodes:
sys.stdout.write(node.__str__())
sys.stdout.write("; ")
print ' '
| mit | Python |
|
569be57e96a97885c4f7d92a3ce524aa47413897 | Create for_loops.py | ucsb-cs8/ucsb-cs8.github.io,ucsb-cs8/ucsb-cs8.github.io,ucsb-cs8/ucsb-cs8.github.io,ucsb-cs8/ucsb-cs8.github.io | _ptopics/for_loops.py | _ptopics/for_loops.py | ---
topic: "for loops"
desc: "for loops in Python, from basic to advanced"
---
# Basic for loop over a list
```python
schools = ["UCSB","UCLA","UCI","Cal Poly"]
for s in schools:
print(s,len(s))
```
Output:
```
UCSB 4
UCLA 4
UCI 3
Cal Poly 8
```
# Basic for loop with counter
```python
>>> for i in range(4):
... print(i)
...
0
1
2
3
>>>
```
# For loop over a list using `range(len(thelist))`
```python
schools = ["UCSB","UCLA","UCI","Cal Poly"]
for i in range(len(schools)):
print(i,schools[i])
```
| mit | Python |
|
a235ee9cf64c1161dddbbb0f1bd540d42575e839 | add comprehension sample | yusabana-sandbox/python-practice | comprehension.py | comprehension.py | # -*- coding: utf-8 -*-
# Pythonの内包表記の使い方まとめ - Life with Python
# http://www.lifewithpython.com/2014/09/python-list-comprehension-and-generator-expression-and-dict-comprehension.html
def main():
## リスト内包
list = [number + 1 for number in range(1,5)]
print(list)
print([x * y for x, y in zip([1,2,3], [11,12,13])])
# 条件(値部分で条件を入れる)
print(['タァーん' if x % 3 == 0 else x for x in range(1,10)])
print([('fizzbuzz' if x % 15 == 0 else ('fizz' if x % 3 == 0 else ('buzz' if x % 5 == 0 else x))) for x in range(1, 30)])
# 条件(後置ifで条件を入れる)
print(['タァーん %d' % x for x in range(1,10) if x % 3 == 0 ])
# 多重ループ
print([x + y for x in range(3) for y in [100, 200, 300]])
# ネスト(flattenな配列にする)
print([x for inner_list in [[1, 3], [5], [7, 9]] for x in inner_list])
## ジェネレータ内包(Rubyのイテレータのような)
generator = (x + 1 for x in range(5))
for x in generator:
print(x)
## Set(集合)内包
set = {x for x in range(5) if x % 2 == 0}
print(set)
## ディクショナリ(辞書)
words = 'あいうえおあ'
dict = {letter: words.count(letter) for letter in words}
print(dict)
li = [("C", 1972), ("Java", 1995), ("JavaScript", 1995)]
print({k: v for k, v in li})
if __name__ == '__main__':
main()
| mit | Python |
|
58b5ff7daf0c240ddbb4b83d11b361dcf574ae74 | Add mkaudiocd.py for making Audio CD for dosbox | frank-deng/retro-works,frank-deng/retro-works,frank-deng/retro-works,frank-deng/retro-works,frank-deng/retro-works,frank-deng/retro-works | mkaudiocd.py | mkaudiocd.py | #!/usr/bin/env python3
import sys, subprocess, os, multiprocessing, magic, tempfile;
files = sys.argv[1:];
tempfiles = [];
mime = magic.open(magic.MIME);
mime.load();
for i in range(len(files)):
mime_type = mime.file(files[i]).split(';')[0];
if (mime_type in ('audio/mpeg')):
tf = tempfile.NamedTemporaryFile(prefix='%02d-'%(i+1), suffix='.wav', delete=False);
tf.close();
tempfiles.append(tf.name);
plame = subprocess.Popen(['/usr/bin/lame', '--decode', files[i], tf.name]);
if (0 == plame.wait()):
files[i] = tf.name;
mime.close();
OUTPUT_AUDIO='audiocd.wav';
pwav = subprocess.Popen(['/usr/bin/shntool', 'join'] + files + ['-o', 'wav', '-O', 'always']);
pcue = subprocess.Popen(
['/usr/bin/shntool', 'cue']+files,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
);
stdout, stderr = pcue.communicate();
sys.stderr.write(stderr.decode('UTF-8'));
with open('audiocd.cue', 'w') as f:
f.write(stdout.decode('UTF-8').replace('joined.wav', OUTPUT_AUDIO).replace('WAVE', 'BINARY'));
pwav.wait();
os.rename('joined.wav', OUTPUT_AUDIO);
for f in tempfiles:
os.unlink(f);
| mit | Python |
|
0cabf3c4dae3599e2d1627ff41707cf36b4d2ddd | Load all modules by default | python-acoustics/python-acoustics,antiface/python-acoustics,FRidh/python-acoustics,felipeacsi/python-acoustics,giumas/python-acoustics | acoustics/__init__.py | acoustics/__init__.py | """
Acoustics
=========
The acoustics module...
"""
import acoustics.aio
import acoustics.ambisonics
import acoustics.atmosphere
import acoustics.bands
import acoustics.building
import acoustics.cepstrum
import acoustics.criterion
import acoustics.decibel
import acoustics.descriptors
import acoustics.directivity
import acoustics.doppler
import acoustics.generator
import acoustics.imaging
import acoustics.octave
import acoustics.power
import acoustics.quantity
import acoustics.reflection
import acoustics.room
import acoustics.signal
import acoustics.turbulence
#import acoustics.utils
import acoustics.weighting
from acoustics._signal import Signal
| """
Acoustics
=========
The acoustics module...
"""
import acoustics.ambisonics
import acoustics.utils
import acoustics.octave
import acoustics.doppler
import acoustics.signal
import acoustics.directivity
import acoustics.building
import acoustics.room
import acoustics.standards
import acoustics.cepstrum
from acoustics._signal import Signal
| bsd-3-clause | Python |
68a37792fd7c5a197758aff738a69c7ce08b4a8b | Add manhole module | Heufneutje/txircd,ElementalAlchemist/txircd,DesertBus/txircd | txircd/modules/manhole.py | txircd/modules/manhole.py | from twisted.conch.manhole_tap import makeService
class Spawner(object):
def __init__(self, ircd):
self.manhole = makeService({
'namespace': {'ircd': ircd},
'passwd': 'manhole.passwd',
'telnetPort': None,
'sshPort': '65432'
})
def spawn(self):
self.manhole.startService()
return {}
def cleanup(self):
self.manhole.stopService()
| bsd-3-clause | Python |
|
513af3716c596bb67c0f6552824b854b3735858c | Add simple tests for password strength and sensitivity to MINIMUM_ZXCVBN_SCORE setting | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/apps/domain/tests/test_password_strength.py | corehq/apps/domain/tests/test_password_strength.py | from django import forms
from django.test import SimpleTestCase, override_settings
from corehq.apps.domain.forms import clean_password
class PasswordStrengthTest(SimpleTestCase):
@override_settings(MINIMUM_ZXCVBN_SCORE=2)
def test_score_0_password(self):
self.assert_bad_password(PASSWORDS_BY_STRENGTH[0])
@override_settings(MINIMUM_ZXCVBN_SCORE=2)
def test_score_1_password(self):
self.assert_bad_password(PASSWORDS_BY_STRENGTH[1])
@override_settings(MINIMUM_ZXCVBN_SCORE=2)
def test_score_2_password(self):
self.assert_good_password(PASSWORDS_BY_STRENGTH[2])
@override_settings(MINIMUM_ZXCVBN_SCORE=3)
def test_sensitivity_to_minimum_zxcvbn_score_setting(self):
self.assert_bad_password(PASSWORDS_BY_STRENGTH[2])
def assert_good_password(self, password):
self.assertEqual(clean_password(password), password)
def assert_bad_password(self, password):
with self.assertRaises(forms.ValidationError):
clean_password(password)
PASSWORDS_BY_STRENGTH = {
0: 's3cr3t',
1: 'password7',
2: 'aljfzpo',
3: '1234mna823',
4: ')(^#:LKNVA^',
}
| bsd-3-clause | Python |
|
e4a5761d997bee3eefad80c6b92c4c4a0c3568fd | Create day_14_part_1.py | Korred/advent_of_code_2016 | day_14_part_1.py | day_14_part_1.py | import hashlib, re
salt = "yjdafjpo"
keys = []
i = 0
while len(keys) < 64:
first_key = hashlib.md5((salt + str(i)).encode("utf-8")).hexdigest()
# for every 3-gram in key
m1 = re.search(r'(.)\1{2,2}', first_key)
if m1:
for j in range(i + 1, i + 1001):
second_key = hashlib.md5((salt + str(j)).encode("utf-8")).hexdigest()
m2 = re.search(r'(.)\1{4,4}', second_key)
if m2:
if m2.group()[:3] == m1.group():
keys.append((i, first_key))
print(i, first_key)
break
i += 1
print("64th key found at index: ", keys[-1][0])
| mit | Python |
|
a2ffd8ea0b2b1b7ace6ed5b37dd76d9dd9063d25 | Add utility function. | abilian/abilian-core,abilian/abilian-core,abilian/abilian-core,abilian/abilian-core,abilian/abilian-core | yaka/web/util.py | yaka/web/util.py |
def get_object_or_404(cls, *args):
return cls.query.filter(*args).first_or_404()
| lgpl-2.1 | Python |
|
24a19e957a1827d6c5a1d34a68d8fcde8581f588 | Add first pass of slice evaluation script | HazyResearch/metal,HazyResearch/metal | metal/mmtl/eval_slices.py | metal/mmtl/eval_slices.py | """
Example script:
python eval_slices.py --tasks COLA --model_dir /dfs/scratch0/vschen/metal-mmtl/metal/mmtl/aws/output/2019_03_06_03_26_06/0/logdir/search_large_lr/QNLI.STSB.MRPC.QQP.WNLI.RTE.MNLI.SST2.COLA_11_43_53 --slices locs_orgs,proper_nouns
"""
import argparse
import json
import os
from collections import defaultdict
import numpy as np
import torch
from metal.mmtl.debugging.tagger import Tagger
from metal.mmtl.glue_tasks import create_tasks_and_payloads
from metal.mmtl.metal_model import MetalModel
def get_task_config(model_dir):
with open(os.path.join(args.model_dir, "task_config.json")) as f:
task_config = json.load(f)
return task_config
def get_slice_metrics(task, Y, Y_probs, Y_preds, mask=None):
if mask is None:
mask = np.ones(len(Y)).astype(bool)
return {
"score": task.scorer.score(Y[mask], Y_probs[mask], Y_preds[mask]),
"num_examples": np.sum(mask),
}
def eval_on_slices(model_dir, task_names, slice_names):
# initialize tasks/payloads with previous task_config
task_config = get_task_config(model_dir)
bert_model = task_config["bert_model"]
max_len = task_config["max_len"]
dl_kwargs = {"shuffle": False}
tasks, payloads = create_tasks_and_payloads(
task_names=task_names,
bert_model=bert_model,
max_len=max_len,
dl_kwargs=dl_kwargs,
splits=[args.split],
max_datapoints=-1,
generate_uids=True, # NOTE: this must be True to match with slice_uids!
)
# initialize model with previous weights
try:
pickled_model = os.path.join(model_dir, "model.pkl")
model = torch.load(pickled_model)
except FileNotFoundError:
# model.pkl not found, load weights instead
model = MetalModel(tasks, verbose=False, device=0)
model_path = os.path.join(model_dir, "best_model.pth")
model.load_weights(model_path)
# match uids for slices and evaluate
tagger = Tagger(verbose=False)
slice_scores = defaultdict(dict)
for task, payload in zip(tasks, payloads):
payload_uids = payload.data_loader.dataset.uids
Ys, Ys_probs, Ys_preds = model.predict_with_gold(
payload, [task.name], return_preds=True
)
Y = np.array(Ys[task.name])
Y_probs = np.array(Ys_probs[task.name])
Y_preds = np.array(Ys_preds[task.name])
assert len(payload_uids) == len(Y) == len(Y_probs)
# compute overall scores for task
slice_scores[task.name].update(
{"overall": get_slice_metrics(task, Y, Y_probs, Y_preds)}
)
# compute slice-specific scores
for slice_name in slices_to_evaluate:
# mask uids in slice
slice_uids = tagger.get_uids(slice_name)
mask = [uid in slice_uids for uid in payload_uids]
mask = np.array(mask, dtype=bool)
print(
f"Found {np.sum(mask)}/{len(slice_uids)} "
f"{slice_name} uids in {payload.name}"
)
slice_scores[task.name].update(
{slice_name: get_slice_metrics(task, Y, Y_probs, Y_preds, mask)}
)
return dict(slice_scores)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--tasks", required=True, type=str, help="Comma-sep task list e.g. QNLI,QQP"
)
parser.add_argument(
"--slices", required=True, type=str, help="Comma-sep list of slices to evaluate"
)
parser.add_argument(
"--model_dir",
required=True,
type=str,
help="directory where *_config.json and <model>.pth are stored",
)
parser.add_argument(
"--split",
type=str,
choices=["train", "valid", "test"],
default="valid",
help="split to evaluate",
)
args = parser.parse_args()
task_names = [task_name for task_name in args.tasks.split(",")]
slices_to_evaluate = [slice_name for slice_name in args.slices.split(",")]
slice_scores = eval_on_slices(args.model_dir, task_names, slices_to_evaluate)
print(slice_scores)
| apache-2.0 | Python |
|
cdc311adcd05e5292f61bf5718ba68dceb4121c3 | install requirements | sanjoydesk/FrameworkBenchmarks,zapov/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,khellang/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,torhve/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,dmacd/FB-try1,zane-techempower/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,Verber/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,testn/FrameworkBenchmarks,leafo/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,denkab/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,methane/FrameworkBenchmarks,sxend/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,khellang/FrameworkBenchmarks,valyala/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,sxend/FrameworkBenchmarks,joshk/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,jamming/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,zapov/FrameworkBenchmarks,leafo/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,denkab/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,testn/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,testn/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,herloct/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,denkab/FrameworkBenchmarks,Verber/FrameworkBenchmarks,herloct/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,sgml/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,valyala/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,sxend/FrameworkBenchmarks,valyala/FrameworkBenchmarks,dmacd/FB-try1,psfblair/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,sxend/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,dmacd/FB-try1,zloster/FrameworkBenchmarks,zloster/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,dmacd/FB-try1,hamiltont/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,sxend/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,valyala/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,dmacd/FB-try1,F3Community/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,herloct/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,jamming/FrameworkBenchmarks,leafo/FrameworkBenchmarks,khellang/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,grob/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,Verber/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,sgml/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,herloct/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,grob/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,joshk/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,zloster/FrameworkBenchmarks,jamming/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,dmacd/FB-try1,stefanocasazza/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,sxend/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,doom369/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,testn/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,sxend/FrameworkBenchmarks,dmacd/FB-try1,diablonhn/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,leafo/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,zapov/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,doom369/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,grob/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,zapov/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,valyala/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,leafo/FrameworkBenchmarks,methane/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,leafo/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,doom369/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,testn/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,methane/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,leafo/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,doom369/FrameworkBenchmarks,grob/FrameworkBenchmarks,torhve/FrameworkBenchmarks,leafo/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,khellang/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,testn/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,joshk/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,doom369/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,denkab/FrameworkBenchmarks,actframework/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,zapov/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,zloster/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,joshk/FrameworkBenchmarks,actframework/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,zloster/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,zapov/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,zapov/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,actframework/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,grob/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,torhve/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,khellang/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,doom369/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,doom369/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,torhve/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,zloster/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,methane/FrameworkBenchmarks,grob/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,khellang/FrameworkBenchmarks,Verber/FrameworkBenchmarks,leafo/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,zloster/FrameworkBenchmarks,khellang/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,jamming/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,doom369/FrameworkBenchmarks,testn/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,sgml/FrameworkBenchmarks,torhve/FrameworkBenchmarks,denkab/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,jamming/FrameworkBenchmarks,sxend/FrameworkBenchmarks,actframework/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,zapov/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,doom369/FrameworkBenchmarks,zloster/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,zapov/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,valyala/FrameworkBenchmarks,denkab/FrameworkBenchmarks,valyala/FrameworkBenchmarks,jamming/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,valyala/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,sgml/FrameworkBenchmarks,grob/FrameworkBenchmarks,khellang/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,khellang/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,sxend/FrameworkBenchmarks,herloct/FrameworkBenchmarks,herloct/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,leafo/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,zapov/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,jamming/FrameworkBenchmarks,zloster/FrameworkBenchmarks,torhve/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,Verber/FrameworkBenchmarks,grob/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,grob/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,khellang/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,denkab/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,dmacd/FB-try1,zhuochenKIDD/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,valyala/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,zapov/FrameworkBenchmarks,torhve/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,herloct/FrameworkBenchmarks,joshk/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,torhve/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,denkab/FrameworkBenchmarks,joshk/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,denkab/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,valyala/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,grob/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,actframework/FrameworkBenchmarks,khellang/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,denkab/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,joshk/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,zloster/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,khellang/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,methane/FrameworkBenchmarks,herloct/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,herloct/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,khellang/FrameworkBenchmarks,doom369/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,zloster/FrameworkBenchmarks,dmacd/FB-try1,joshk/FrameworkBenchmarks,denkab/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,jamming/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,valyala/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,zloster/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,grob/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,methane/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,joshk/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,doom369/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,jamming/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,jamming/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,doom369/FrameworkBenchmarks,testn/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,actframework/FrameworkBenchmarks,Verber/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,dmacd/FB-try1,sagenschneider/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,actframework/FrameworkBenchmarks,jamming/FrameworkBenchmarks,actframework/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,torhve/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,khellang/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,grob/FrameworkBenchmarks,methane/FrameworkBenchmarks,valyala/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,grob/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,sxend/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,herloct/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,herloct/FrameworkBenchmarks,zapov/FrameworkBenchmarks,jamming/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,methane/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,valyala/FrameworkBenchmarks,joshk/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,torhve/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,doom369/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,joshk/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,jamming/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,sgml/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,actframework/FrameworkBenchmarks,torhve/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,sgml/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,dmacd/FB-try1,jetty-project/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,zloster/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,actframework/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,testn/FrameworkBenchmarks,zapov/FrameworkBenchmarks,Verber/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,Verber/FrameworkBenchmarks,joshk/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,methane/FrameworkBenchmarks,denkab/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,Verber/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,sgml/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,zloster/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,actframework/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,herloct/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,testn/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,methane/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,sgml/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,joshk/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,doom369/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,grob/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,joshk/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,sgml/FrameworkBenchmarks,Verber/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,actframework/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,Verber/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,doom369/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,sxend/FrameworkBenchmarks,testn/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,methane/FrameworkBenchmarks,actframework/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,actframework/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,sgml/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,Verber/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,testn/FrameworkBenchmarks,sgml/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,torhve/FrameworkBenchmarks,doom369/FrameworkBenchmarks,Verber/FrameworkBenchmarks,sxend/FrameworkBenchmarks,sxend/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,sxend/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,methane/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,zloster/FrameworkBenchmarks,herloct/FrameworkBenchmarks,zloster/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,sgml/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,zapov/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,actframework/FrameworkBenchmarks,zapov/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,methane/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,actframework/FrameworkBenchmarks,methane/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,sgml/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,zloster/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,sxend/FrameworkBenchmarks,herloct/FrameworkBenchmarks,sxend/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,denkab/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,zapov/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,testn/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,testn/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,denkab/FrameworkBenchmarks,leafo/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,herloct/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,Verber/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,valyala/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,sgml/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,doom369/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,dmacd/FB-try1,julienschmidt/FrameworkBenchmarks,leafo/FrameworkBenchmarks,sxend/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,jamming/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,sxend/FrameworkBenchmarks,zloster/FrameworkBenchmarks,youprofit/FrameworkBenchmarks | tornado/setup.py | tornado/setup.py | import subprocess
import sys
import setup_util
import os
from os.path import expanduser
home = expanduser("~")
cwd = "%s/FrameworkBenchmarks/tornado" % home
def start(args):
setup_util.replace_text(
cwd + "/server.py", "127.0.0.1", args.database_host)
subprocess.check_call("pip install -r %s/requirements.txt")
subprocess.Popen("python %s/FrameworkBenchmarks/tornado/server.py --port=8000" % home, shell=True, cwd=cwd)
subprocess.Popen("python %s/FrameworkBenchmarks/tornado/server.py --port=8001" % home, shell=True, cwd=cwd)
subprocess.Popen("python %s/FrameworkBenchmarks/tornado/server.py --port=8002" % home, shell=True, cwd=cwd)
subprocess.Popen("python %s/FrameworkBenchmarks/tornado/server.py --port=8003" % home, shell=True, cwd=cwd)
subprocess.check_call("sudo /usr/local/nginx/sbin/nginx -c " + home + "/FrameworkBenchmarks/php/deploy/nginx.conf", shell=True)
return 0
def stop():
try:
subprocess.call("sudo /usr/local/nginx/sbin/nginx -s stop", shell=True)
except subprocess.CalledProcessError:
#TODO: Better handle exception.
pass
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'server.py' in line:
try:
pid = int(line.split(None, 2)[1])
os.kill(pid, 9)
except OSError:
pass
return 0
| import subprocess
import sys
import setup_util
import os
from os.path import expanduser
home = expanduser("~")
cwd = "%s/FrameworkBenchmarks/tornado" % home
def start(args):
setup_util.replace_text(
cwd + "/server.py", "127.0.0.1", args.database_host)
subprocess.Popen("python %s/FrameworkBenchmarks/tornado/server.py --port=8000" % home, shell=True, cwd=cwd)
subprocess.Popen("python %s/FrameworkBenchmarks/tornado/server.py --port=8001" % home, shell=True, cwd=cwd)
subprocess.Popen("python %s/FrameworkBenchmarks/tornado/server.py --port=8002" % home, shell=True, cwd=cwd)
subprocess.Popen("python %s/FrameworkBenchmarks/tornado/server.py --port=8003" % home, shell=True, cwd=cwd)
subprocess.check_call("sudo /usr/local/nginx/sbin/nginx -c " + home + "/FrameworkBenchmarks/php/deploy/nginx.conf", shell=True)
return 0
def stop():
try:
subprocess.call("sudo /usr/local/nginx/sbin/nginx -s stop", shell=True)
except subprocess.CalledProcessError:
#TODO: Better handle exception.
pass
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'server.py' in line:
try:
pid = int(line.split(None, 2)[1])
os.kill(pid, 9)
except OSError:
pass
return 0
| bsd-3-clause | Python |
fd6fe20c74463d88b957d22d0f9f2f0316a489cf | add energy_future_csv | paul-rs/amaas-core-sdk-python,nedlowe/amaas-core-sdk-python,amaas-fintech/amaas-core-sdk-python,amaas-fintech/amaas-core-sdk-python,paul-rs/amaas-core-sdk-python,nedlowe/amaas-core-sdk-python | amaascore/csv_upload/assets/energy_future.py | amaascore/csv_upload/assets/energy_future.py | import logging.config
import csv
from amaascore.tools.csv_tools import csv_stream_to_objects
from amaascore.assets.energy_future import EnergyFuture
from amaascore.assets.interface import AssetsInterface
from amaasutils.logging_utils import DEFAULT_LOGGING
class EnergyFutureUploader(object):
def __init__(self):
pass
@staticmethod
def json_handler(orderedDict, params):
Dict = dict(orderedDict)
for key, var in params.items():
Dict[key]=var
asset_id = Dict.pop('asset_id', None)
asset_status = 'Active'
energy_future = EnergyFuture(asset_id=asset_id, asset_status=asset_status, **dict(Dict))
return energy_future
@staticmethod
def upload(asset_manager_id, client_id, csvpath):
"""convert csv file rows to objects and insert;
asset_manager_id and client_id from the UI (login)"""
interface = AssetsInterface()
logging.config.dictConfig(DEFAULT_LOGGING)
logger = logging.getLogger(__name__)
params = {'asset_manager_id': asset_manager_id, 'client_id': client_id}
with open(csvpath) as csvfile:
energy_futures = csv_stream_to_objects(stream=csvfile, json_handler=EnergyFutureUploader.json_handler, **params)
for energy_future in energy_futures:
interface.new(energy_future)
logger.info('Creating new equity %s successfully', energy_future.display_name)
@staticmethod
def download(asset_manager_id, asset_id_list):
"""retrieve the assets mainly for test purposes"""
interface = AssetsInterface()
logging.config.dictConfig(DEFAULT_LOGGING)
logger = logging.getLogger(__name__)
energy_futures = []
for asset_id in asset_id_list:
energy_futures.append(interface.retrieve(asset_manager_id=asset_manager_id, asset_id=asset_id))
interface.deactivate(asset_manager_id=asset_manager_id, asset_id=asset_id)
return energy_futures
| apache-2.0 | Python |
|
a68b6c46b7bfe16ecf83cc21398d1746275b03e2 | add a convert tool | wrenchzc/photomanager | convert_olddb.py | convert_olddb.py | import sqlalchemy
import os
from photomanager.db.dbutils import get_db_session
from photomanager.db.models import ImageMeta
def do_convert(db_name):
db_session = get_db_session(db_name)
image_metas = db_session.query(ImageMeta)
for meta in image_metas:
filename = meta.filename
dirname = os.path.dirname(filename)
basename = os.path.basename(filename)
print(f"{filename} split to {dirname} and {basename}")
meta.folder = dirname
meta.filename = basename
db_session.commit()
do_convert("/home/zhangchi/data/Photos/pmindex.db")
| mit | Python |
|
8f897ea096ca4f7e0ee1a920569d18c8bb4a184d | Create SECURITYHUB_ENABLED.py | awslabs/aws-config-rules,awslabs/aws-config-rules,awslabs/aws-config-rules | python-rdklib/SECURITYHUB_ENABLED/SECURITYHUB_ENABLED.py | python-rdklib/SECURITYHUB_ENABLED/SECURITYHUB_ENABLED.py | """
#####################################
## Gherkin ##
#####################################
Rule Name:
SECURITYHUB_ENABLED
Description:
Checks that AWS Security Hub is enabled for an AWS Account. The rule is NON_COMPLIANT if AWS Security Hub is not enabled.
Rationale:
AWS Security Hub gives you a comprehensive view of your high-priority security alerts, and compliance status across AWS accounts.
Indicative Severity:
Medium
Trigger:
Periodic
Reports on:
AWS::::Account
Rule Parameters:
None
Scenarios:
Scenario: 1
Given: SecurityHub is enabled for an AWS Account.
Then: Return COMPLIANT
Scenario: 2
Given: SecurityHub is not enabled for an AWS Account.
Then: Return NON_COMPLIANT
"""
import botocore
from rdklib import Evaluator, Evaluation, ConfigRule, ComplianceType
APPLICABLE_RESOURCES = ['AWS::::Account']
class SECURITYHUB_ENABLED(ConfigRule):
# Set this to false to prevent unnecessary API calls
delete_old_evaluations_on_scheduled_notification = False
def evaluate_periodic(self, event, client_factory, valid_rule_parameters):
client = client_factory.build_client('securityhub')
evaluations = []
try:
security_hub_enabled = client.describe_hub()
# Scenario:1 SecurityHub is enabled for an AWS Account.
if security_hub_enabled:
evaluations.append(Evaluation(ComplianceType.COMPLIANT, event['accountId'], APPLICABLE_RESOURCES[0]))
except botocore.exceptions.ClientError as error:
# Scenario:2 SecurityHub is not enabled for an AWS Account.
if error.response['Error']['Code'] == 'InvalidAccessException':
evaluations.append(Evaluation(ComplianceType.NON_COMPLIANT, event['accountId'], APPLICABLE_RESOURCES[0]))
else:
raise error
return evaluations
def lambda_handler(event, context):
my_rule = SECURITYHUB_ENABLED()
evaluator = Evaluator(my_rule, APPLICABLE_RESOURCES)
return evaluator.handle(event, context)
| cc0-1.0 | Python |
|
446f5785a5db301de68adffe9e114b3ebafe0b6f | add tests for removing failed jobs | StrellaGroup/frappe,saurabh6790/frappe,yashodhank/frappe,yashodhank/frappe,mhbu50/frappe,frappe/frappe,mhbu50/frappe,almeidapaulopt/frappe,yashodhank/frappe,frappe/frappe,almeidapaulopt/frappe,almeidapaulopt/frappe,frappe/frappe,mhbu50/frappe,StrellaGroup/frappe,saurabh6790/frappe,saurabh6790/frappe,yashodhank/frappe,StrellaGroup/frappe,almeidapaulopt/frappe,mhbu50/frappe,saurabh6790/frappe | frappe/tests/test_background_jobs.py | frappe/tests/test_background_jobs.py | import unittest
from rq import Queue
import frappe
from frappe.core.page.background_jobs.background_jobs import remove_failed_jobs
from frappe.utils.background_jobs import get_redis_conn
class TestBackgroundJobs(unittest.TestCase):
def test_remove_failed_jobs(self):
frappe.enqueue(method="frappe.tests.test_background_jobs.fail_function")
conn = get_redis_conn()
queues = Queue.all(conn)
for queue in queues:
if queue.name == "default":
fail_registry = queue.failed_job_registry
self.assertGreater(fail_registry.count, 0)
remove_failed_jobs()
for queue in queues:
if queue.name == "default":
fail_registry = queue.failed_job_registry
self.assertEqual(fail_registry.count, 0)
def fail_function():
return 1 / 0
| mit | Python |
|
5ba8c63daee6c0cb8667c916e10fd813d2cc8d88 | Add in the cmd module, this is simple and can be expanded, although the basic bases are covered | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/modules/cmd.py | salt/modules/cmd.py | '''
A module for shelling out
Keep in mind that this module is insecure, in that it can give whomever has
access to the master root execution access to all salt minions
'''
import subprocess
import tempfile
def run(cmd):
'''
Execute the passed command and return the output
'''
return subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).communicate()[0]
def run_stdout(cmd):
'''
Execute a command, and only return the standard out
'''
return subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE).communicate()[0]
def run_stderr(cmd):
'''
Executa a command and only return the
'''
return subprocess.Popen(cmd,
shell=True,
stderr=subprocess.PIPE).communicate()[0]
def exec_code(lang, code):
'''
Pass in two strings, the first naming the executable language, aka -
python2, python3, ruby, perl, lua, etc. the second string containing
the code you wish to execute. The stdout and stderr will be returned
'''
cfn = tempfile.mkstemp()
open(cfn, 'w+').write(code)
return subprocess.Popen(lang + ' ' + cfn,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).communicate()[0]
| apache-2.0 | Python |
|
e3efa4483f43deb9d2e8515ef3a797c03626f892 | add serializtion tests for pandas (#844) | blaze/distributed,dask/distributed,dask/distributed,blaze/distributed,dask/distributed,mrocklin/distributed,mrocklin/distributed,dask/distributed,mrocklin/distributed | distributed/protocol/tests/test_pandas.py | distributed/protocol/tests/test_pandas.py | from __future__ import print_function, division, absolute_import
from zlib import crc32
import pandas as pd
import pandas.util.testing as tm
import pytest
from dask.dataframe.utils import assert_eq
from distributed.protocol import (serialize, deserialize, decompress, dumps,
loads, to_serialize)
from distributed.protocol.utils import BIG_BYTES_SHARD_SIZE
from distributed.utils import tmpfile
from distributed.utils_test import slow
from distributed.protocol.compression import maybe_compress
dfs = [
pd.DataFrame({}),
pd.DataFrame({'x': [1, 2, 3]}),
pd.DataFrame({'x': [1., 2., 3.]}),
pd.DataFrame({0: [1, 2, 3]}),
pd.DataFrame({'x': [1., 2., 3.], 'y': [4., 5., 6.]}),
pd.DataFrame({'x': [1., 2., 3.]}, index=pd.Index([4, 5, 6], name='bar')),
pd.Series([1., 2., 3.]),
pd.Series([1., 2., 3.], name='foo'),
pd.Series([1., 2., 3.], name='foo',
index=[4, 5, 6]),
pd.Series([1., 2., 3.], name='foo',
index=pd.Index([4, 5, 6], name='bar')),
pd.DataFrame({'x': ['a', 'b', 'c']}),
pd.DataFrame({'x': [b'a', b'b', b'c']}),
pd.DataFrame({'x': pd.Categorical(['a', 'b', 'a'], ordered=True)}),
pd.DataFrame({'x': pd.Categorical(['a', 'b', 'a'], ordered=False)}),
tm.makeCategoricalIndex(),
tm.makeCustomDataframe(5, 3),
tm.makeDataFrame(),
tm.makeDateIndex(),
tm.makeMissingDataframe(),
tm.makeMixedDataFrame(),
tm.makeObjectSeries(),
tm.makePeriodFrame(),
tm.makeRangeIndex(),
tm.makeTimeDataFrame(),
tm.makeTimeSeries(),
tm.makeUnicodeIndex(),
]
@pytest.mark.parametrize('df', dfs)
def test_dumps_serialize_numpy(df):
header, frames = serialize(df)
if 'compression' in header:
frames = decompress(header, frames)
df2 = deserialize(header, frames)
assert_eq(df, df2)
| bsd-3-clause | Python |
|
37371ed2c1ad347106a403d47d5679e7224b489e | Add a test for debug commands. | deepmind/pysc2 | pysc2/tests/debug_test.py | pysc2/tests/debug_test.py | #!/usr/bin/python
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that the debug commands work."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from pysc2 import maps
from pysc2 import run_configs
from pysc2.lib import units
from s2clientprotocol import common_pb2 as sc_common
from s2clientprotocol import debug_pb2 as sc_debug
from s2clientprotocol import sc2api_pb2 as sc_pb
class DebugTest(absltest.TestCase):
def test_multi_player(self):
run_config = run_configs.get()
map_inst = maps.get("Simple64")
with run_config.start() as controller:
create = sc_pb.RequestCreateGame(
local_map=sc_pb.LocalMap(
map_path=map_inst.path, map_data=map_inst.data(run_config)))
create.player_setup.add(type=sc_pb.Participant)
create.player_setup.add(
type=sc_pb.Computer,
race=sc_common.Terran,
difficulty=sc_pb.VeryEasy)
join = sc_pb.RequestJoinGame(race=sc_common.Terran,
options=sc_pb.InterfaceOptions(raw=True))
controller.create_game(create)
controller.join_game(join)
info = controller.game_info()
map_size = info.start_raw.map_size
controller.step(2)
obs = controller.observe()
def get_marines(obs):
return {u.tag: u for u in obs.observation.raw_data.units
if u.unit_type == units.Terran.Marine}
self.assertEmpty(get_marines(obs))
controller.debug(sc_debug.DebugCommand(
create_unit=sc_debug.DebugCreateUnit(
unit_type=units.Terran.Marine,
owner=1,
pos=sc_common.Point2D(x=map_size.x // 2, y=map_size.y // 2),
quantity=5)))
controller.step(2)
obs = controller.observe()
marines = get_marines(obs)
self.assertEqual(5, len(marines))
tags = sorted(marines.keys())
controller.debug([
sc_debug.DebugCommand(kill_unit=sc_debug.DebugKillUnit(
tag=[tags[0]])),
sc_debug.DebugCommand(unit_value=sc_debug.DebugSetUnitValue(
unit_value=sc_debug.DebugSetUnitValue.Life, value=5,
unit_tag=tags[1])),
])
controller.step(2)
obs = controller.observe()
marines = get_marines(obs)
self.assertEqual(4, len(marines))
self.assertNotIn(tags[0], marines)
self.assertEqual(marines[tags[1]].health, 5)
if __name__ == "__main__":
absltest.main()
| apache-2.0 | Python |
|
9d94f581b803e5050f0bf76436cb97d92184b4fb | add tests for country model | USStateDept/FPA_Core,USStateDept/FPA_Core,nathanhilbert/FPA_Core,nathanhilbert/FPA_Core,USStateDept/FPA_Core,nathanhilbert/FPA_Core | openspending/tests/model/test_country.py | openspending/tests/model/test_country.py | import json
import urllib2
from flask import url_for, current_app
from openspending.core import db
from openspending.model.country import Country
from openspending.tests.base import ControllerTestCase
from openspending.command.geometry import create as createcountries
class TestCountryModel(ControllerTestCase):
def setUp(self):
super(TestCountryModel, self).setUp()
createcountries()
def tearDown(self):
pass
def test_all_countries(self):
result = Country.get_all_json()
assert len(result['data']) == 249
assert len(result['data'][0]['regions']) == 8
def test_properties_regions(self):
tempobj = Country.by_gid(1)
assert len(tempobj.regions.keys()) == 10
assert tempobj.label == "Aruba"
def test_properties_regions(self):
tempobj = Country.by_gid(1)
assert tempobj.sovereignty == "Netherlands"
| agpl-3.0 | Python |
|
97d6cce2a5c0c905f0c33c41316c8e65eaed0e08 | Update way we synchronize from citybik.es | wlach/nixi,wlach/nixi | update-bikestations.py | update-bikestations.py | #!/usr/bin/env python
from multiprocessing.pool import ThreadPool
import requests
import json
baseurl = 'http://api.citybik.es/v2/networks/'
networkids = [ 'bixi-montreal', 'bixi-toronto', 'capital-bixi', 'hubway',
'capital-bikeshare', 'citi-bike-nyc', 'barclays-cycle-hire' ]
def process_network(networkid):
r = requests.get(baseurl + networkid)
network = r.json()['network']
# output just the stations that are installed, only the metadata we care
# about
output_stations = []
for station in network['stations']:
# some networks list "uninstalled" stations. don't want those
if not station['extra'].get('installed') or station['extra']['installed']:
output_stations.append({'id': station['id'],
'name': station['name'],
'freeBikes': station['free_bikes'],
'emptySlots': station['empty_slots'],
'latitude': station['latitude'],
'longitude': station['longitude']})
open('%s.json' % networkid, 'w').write(json.dumps(output_stations))
return network['location']
pool = ThreadPool()
locations = pool.map(process_network, networkids)
with open('locations.js', 'w') as f:
f.write('var networks = {')
for (i, networkid) in enumerate(networkids):
location = locations[i]
f.write('"%s": { name: "%s", latitude: %s, longitude: %s },' % (
networkid, location['city'], location['latitude'],
location['longitude']))
f.write('};')
| mit | Python |
|
a73a6215abd74c9e3bddc0b3841bb8bb1705f250 | Add pyunit based test cases. See #1413 | moyogo/fontbakery,graphicore/fontbakery,googlefonts/fontbakery,googlefonts/fontbakery,graphicore/fontbakery,moyogo/fontbakery,googlefonts/fontbakery,moyogo/fontbakery,graphicore/fontbakery | Lib/fontbakery/specifications/googlefonts_test.py | Lib/fontbakery/specifications/googlefonts_test.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals, division
import pytest
from fontbakery.testrunner import (
INFO
, WARN
, ERROR
, SKIP
, PASS
, FAIL
, ENDTEST
, Section
)
from fontTools.ttLib import TTFont
@pytest.fixture
def font_1():
# FIXME: find absolute path via the path of this module
path = 'data/test/cabin/Cabin-Regular.ttf'
# return TTFont(path)
return path
def change_name_table_id(ttFont, nameID, newEntryString, platEncID=0):
for i, nameRecord in enumerate(ttFont['name'].names):
if nameRecord.nameID == nameID and nameRecord.platEncID == platEncID:
nameRecord.string = newEntryString
def delete_name_table_id(ttFont, nameID):
delete = []
for i, nameRecord in enumerate(ttFont['name'].names):
if nameRecord.nameID == nameID:
delete.append(i)
for i in sorted(delete, reverse=True):
del(ttFont['name'].names[i])
def test_id_029(font_1):
""" This test is run via the testRunner and demonstrate how to get
(mutable) objects from the conditions cache and change them.
NOTE: the actual fontbakery tests of conditions should never change
a condition object.
"""
from fontbakery.testrunner import TestRunner
from fontbakery.specifications.googlefonts import specification
from fontbakery.constants import NAMEID_LICENSE_DESCRIPTION
values = dict(fonts=[font_1])
runner = TestRunner(specification, values, explicit_tests=['com.google.fonts/test/029'])
print('Test PASS ...')
# run
for status, message, (section, test, iterargs) in runner.run():
if status == ENDTEST:
assert message == PASS
break
# we could also reuse the `iterargs` that was assigned in the previous
# for loop, but this here is more explicit
iterargs = ((u'font', 0),)
ttFont = runner.get('ttFont', iterargs)
print('Test failing entry ...')
# prepare
change_name_table_id(ttFont, NAMEID_LICENSE_DESCRIPTION, 'failing entry')
# run
for status, message, (section, test, iterargs) in runner.run():
print(status, 'message', message)
if status == ENDTEST:
assert message == FAIL
break
print('Test missing entry ...')
# prepare
delete_name_table_id(ttFont, NAMEID_LICENSE_DESCRIPTION)
# run
for status, message, (section, test, iterargs) in runner.run():
if status == ENDTEST:
assert message == FAIL
break
def test_id_029_shorter(font_1):
""" This is much more direct, as it calls the test directly.
However, since these tests are often generators (using yield)
we still need to get the last (in this case) iteration value,
using `list(generator)[-1]` here.
"""
from fontbakery.specifications.googlefonts import \
check_copyright_entries_match_license
from fontbakery.constants import NAMEID_LICENSE_DESCRIPTION
ttFont = TTFont(font_1)
license = 'OFL.txt'
print('Test PASS ...')
# run
status, message = list(check_copyright_entries_match_license(ttFont, license))[-1]
assert status == PASS
print('Test failing entry ...')
# prepare
change_name_table_id(ttFont, NAMEID_LICENSE_DESCRIPTION, 'failing')
# run
status, message = list(check_copyright_entries_match_license(ttFont, license))[-1]
assert status == FAIL
print('Test missing entry ...')
# prepare
delete_name_table_id(ttFont, NAMEID_LICENSE_DESCRIPTION)
# run
status, message = list(check_copyright_entries_match_license(ttFont, license))[-1]
assert status == FAIL
| apache-2.0 | Python |
|
064c2b53089611e838934c76d8fba19eaad85e75 | add cot verify test stub | escapewindow/scriptworker,mozilla-releng/scriptworker,escapewindow/scriptworker,mozilla-releng/scriptworker | scriptworker/test/test_cot_verify.py | scriptworker/test/test_cot_verify.py | #!/usr/bin/env python
# coding=utf-8
"""Test scriptworker.cot.verify
"""
import logging
import pytest
from scriptworker.exceptions import CoTError
import scriptworker.cot.verify as verify
from . import rw_context
assert rw_context # silence pyflakes
# TODO remove once we use
assert CoTError, verify
assert pytest
log = logging.getLogger(__name__)
# constants helpers and fixtures {{{1
@pytest.yield_fixture(scope='function')
def chain_of_trust(rw_context):
pass
| mpl-2.0 | Python |
|
9376ab25e4b5713b8b354a3a03c37b1e356fa5c2 | Create unlock-device.py | Timothee38/pythonScriptsAVC | unlock-device.py | unlock-device.py | from com.dtmilano.android.viewclient import ViewClient
device, serial = ViewClient.connectToDeviceOrExit()
if device.checkConnected():
print("Device connected - serial: {}".format(serial))
print("Device is going to be unlocked...")
device.wake()
device.unlock()
else:
print("Device is not connected!")
| mit | Python |
|
47b93bee1ebcf5fcf6ea2ff3ad7eaabb831f692c | Add WATERS_Utils folder | Harefoot/TurboQUALTX | ET_Utils/WATERS_Utils/__init__.py | ET_Utils/WATERS_Utils/__init__.py | mit | Python |
||
60f76f01c6961f6aceb3b67643057798aed056c7 | Add python script for validating version files on vaadin.com | travisfw/vaadin,mstahv/framework,magi42/vaadin,Darsstar/framework,Legioth/vaadin,asashour/framework,oalles/vaadin,jdahlstrom/vaadin.react,travisfw/vaadin,Legioth/vaadin,udayinfy/vaadin,fireflyc/vaadin,Peppe/vaadin,fireflyc/vaadin,Scarlethue/vaadin,mstahv/framework,synes/vaadin,magi42/vaadin,magi42/vaadin,sitexa/vaadin,shahrzadmn/vaadin,Peppe/vaadin,shahrzadmn/vaadin,Darsstar/framework,peterl1084/framework,travisfw/vaadin,asashour/framework,oalles/vaadin,asashour/framework,udayinfy/vaadin,fireflyc/vaadin,synes/vaadin,Darsstar/framework,Scarlethue/vaadin,mstahv/framework,jdahlstrom/vaadin.react,Scarlethue/vaadin,Peppe/vaadin,shahrzadmn/vaadin,fireflyc/vaadin,magi42/vaadin,magi42/vaadin,jdahlstrom/vaadin.react,Legioth/vaadin,asashour/framework,Legioth/vaadin,udayinfy/vaadin,peterl1084/framework,Darsstar/framework,kironapublic/vaadin,kironapublic/vaadin,peterl1084/framework,Peppe/vaadin,sitexa/vaadin,oalles/vaadin,travisfw/vaadin,Legioth/vaadin,mstahv/framework,synes/vaadin,Scarlethue/vaadin,udayinfy/vaadin,shahrzadmn/vaadin,udayinfy/vaadin,peterl1084/framework,oalles/vaadin,sitexa/vaadin,peterl1084/framework,Peppe/vaadin,synes/vaadin,sitexa/vaadin,travisfw/vaadin,fireflyc/vaadin,kironapublic/vaadin,oalles/vaadin,synes/vaadin,sitexa/vaadin,jdahlstrom/vaadin.react,Scarlethue/vaadin,kironapublic/vaadin,Darsstar/framework,shahrzadmn/vaadin,mstahv/framework,kironapublic/vaadin,asashour/framework,jdahlstrom/vaadin.react | scripts/ValidateVaadinDownload.py | scripts/ValidateVaadinDownload.py | #coding=UTF-8
import argparse, sys
from urllib.request import urlopen
parse = argparse.ArgumentParser(description="Check vaadin.com version lists")
parse.add_argument("version", help="Released Vaadin version number")
args = parse.parse_args()
if hasattr(args, "echo"):
print(args.echo)
sys.exit(1)
prerelease = None
(major, minor, maintenance) = args.version.split(".", 2)
if "." in maintenance:
(maintenance, prerelease) = maintenance.split(".", 1)
# Version without prerelease tag
version = "%s.%s.%s" % (major, minor, maintenance)
isPrerelease = prerelease is not None
failed = False
vaadin7Latest = "http://vaadin.com/download/LATEST7"
vaadin7Versions = "http://vaadin.com/download/VERSIONS_7"
vaadin6Latest = "http://vaadin.com/download/LATEST"
vaadinPrerelease = "http://vaadin.com/download/PRERELEASES"
try:
latest = urlopen(vaadin7Latest).read().decode().split("\n")
releaseRow = "release/%s.%s/%s" % (major, minor, version)
assert (version in latest[0]) ^ isPrerelease, "Latest version mismatch. %s: %s, was: %s" % ("should not be" if isPrerelease else "should be", args.version, latest[0])
assert (releaseRow in latest[1]) ^ isPrerelease, "Release row mismatch; %s: %s, was %s" % ("should not be" if isPrerelease else "should be", releaseRow, latest[1])
except Exception as e:
failed = True
print("Latest version was not correctly updated: %s" % (e))
try:
assert "%s," % (args.version) in urlopen(vaadin7Versions).read().decode().split("\n"), "Released version not in version list"
except Exception as e:
if isPrerelease:
print("Prerelease version needs to be added manually to versions!")
else:
failed = True
print(e)
try:
latest = urlopen(vaadin6Latest).read().decode().split("\n")
releaseRow = "release/6.8/6.8."
assert ("6.8." in latest[0]), "Latest version mismatch; should be: %sX, was: %s" % ("6.8.", latest[0])
assert (releaseRow in latest[1]), "Release row mismatch; should be: %sX, was %s" % (releaseRow, latest[1])
except Exception as e:
failed = True
print("Latest Vaadin 6 version was updated by release. %s" % (e))
try:
latest = urlopen(vaadinPrerelease).read().decode().split("\n")
assert (args.version in latest[0]) or not isPrerelease, "%s: %s, was: %s" % ("should be", args.version, latest[0])
except Exception as e:
print("Prerelease file was not correctly updated: %s" % (e))
sys.exit(1 if failed else 0)
| apache-2.0 | Python |
|
665b3372e089fda3dde104b0754efa65a87a9bd2 | Test harness for checking wtf the HTTPClientResponseHandler is actually doing with data from the network | sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia | Sketches/MPS/Bookmarks/TestHTTPResponseHandler.py | Sketches/MPS/Bookmarks/TestHTTPResponseHandler.py | #!/usr/bin/python
import base64
from Kamaelia.File.ReadFileAdaptor import ReadFileAdaptor
from Kamaelia.File.Writing import SimpleFileWriter
from Kamaelia.Chassis.Pipeline import Pipeline
from TwitterStream import HTTPClientResponseHandler
from Kamaelia.Util.PureTransformer import PureTransformer
from Kamaelia.Util.Console import ConsoleEchoer
Pipeline(
ReadFileAdaptor("tweets.b64.txt", readmode="line"),
PureTransformer(base64.b64decode),
HTTPClientResponseHandler(suppress_header = True),
SimpleFileWriter("tweets.b64raw.txt"),
).run()
| apache-2.0 | Python |
|
53467bd7d4c9c12b73c66244a91f31f0dbadeeec | Add pagerteam tests file which had been missed despite its existence | healthchecks/healthchecks,iphoting/healthchecks,iphoting/healthchecks,iphoting/healthchecks,healthchecks/healthchecks,healthchecks/healthchecks,iphoting/healthchecks,healthchecks/healthchecks | hc/front/tests/test_add_pagerteam.py | hc/front/tests/test_add_pagerteam.py | from hc.api.models import Channel
from hc.test import BaseTestCase
class AddPagerTeamTestCase(BaseTestCase):
url = "/integrations/add_pagerteam/"
def test_instructions_work(self):
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.url)
self.assertContains(r, "PagerTeam")
def test_it_works(self):
form = {"value": "http://example.org"}
self.client.login(username="alice@example.org", password="password")
r = self.client.post(self.url, form)
self.assertRedirects(r, "/integrations/")
c = Channel.objects.get()
self.assertEqual(c.kind, "pagerteam")
self.assertEqual(c.value, "http://example.org")
self.assertEqual(c.project, self.project)
def test_it_rejects_bad_url(self):
form = {"value": "not an URL"}
self.client.login(username="alice@example.org", password="password")
r = self.client.post(self.url, form)
self.assertContains(r, "Enter a valid URL")
| bsd-3-clause | Python |
|
9f9916d662d1ab130c9685c415c25b19a14733d7 | Add example to illustrate different optimization procedures | d-mittal/pystruct,massmutual/pystruct,wattlebird/pystruct,massmutual/pystruct,wattlebird/pystruct,pystruct/pystruct,amueller/pystruct,d-mittal/pystruct,pystruct/pystruct,amueller/pystruct | examples/svm_objectives.py | examples/svm_objectives.py | # showing the relation between cutting plane and primal objectives
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.cross_validation import train_test_split
from pystruct.problems import CrammerSingerSVMProblem
from pystruct.learners import (StructuredSVM, OneSlackSSVM,
SubgradientStructuredSVM)
# do a binary digit classification
digits = load_digits()
X, y = digits.data, digits.target
X /= X.max()
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# we add a constant 1 feature for the bias
X_train_bias = np.hstack([X_train, np.ones((X_train.shape[0], 1))])
pbl = CrammerSingerSVMProblem(n_features=X_train_bias.shape[1], n_classes=10)
n_slack_svm = StructuredSVM(pbl, verbose=0, check_constraints=False, C=20,
max_iter=500, batch_size=10)
one_slack_svm = OneSlackSSVM(pbl, verbose=0, check_constraints=False, C=20,
max_iter=1000, tol=0.001)
subgradient_svm = SubgradientStructuredSVM(pbl, C=20, learning_rate=0.01,
max_iter=300, decay_exponent=0,
momentum=0, verbose=0)
# n-slack cutting plane ssvm
n_slack_svm.fit(X_train_bias, y_train)
## 1-slack cutting plane ssvm
one_slack_svm.fit(X_train_bias, y_train)
# online subgradient ssvm
subgradient_svm.fit(X_train_bias, y_train)
#plt.plot(n_slack_svm.objective_curve_, label="n-slack lower bound")
plt.plot(n_slack_svm.objective_curve_, label="n-slack lower bound")
plt.plot(one_slack_svm.objective_curve_, label="one-slack lower bound")
plt.plot(one_slack_svm.primal_objective_curve_, label="one-slack primal")
plt.plot(subgradient_svm.objective_curve_, label="subgradient")
plt.legend()
plt.show()
| bsd-2-clause | Python |
|
3a2e9a19feab0c882a9821b7ff555bd1e2693190 | Test effect of change in Laplacian. | charanpald/APGL | exp/sandbox/DeltaLaplacianExp.py | exp/sandbox/DeltaLaplacianExp.py | import numpy
import scipy.sparse
from apgl.graph import GraphUtils
from apgl.util.Util import Util
numpy.set_printoptions(suppress=True, precision=3)
n = 10
W1 = scipy.sparse.rand(n, n, 0.5).todense()
W1 = W1.T.dot(W1)
W2 = W1.copy()
W2[1, 2] = 1
W2[2, 1] = 1
print("W1="+str(W1))
print("W2="+str(W2))
L1 = GraphUtils.normalisedLaplacianSym(scipy.sparse.csr_matrix(W1))
L2 = GraphUtils.normalisedLaplacianSym(scipy.sparse.csr_matrix(W2))
deltaL = L2 - L1
print("L1="+str(L1.todense()))
print("L2="+str(L2.todense()))
print("deltaL="+str(deltaL.todense()))
print("rank(deltaL)=" + str(Util.rank(deltaL.todense()))) | bsd-3-clause | Python |
|
d716098727293c2c90c97a41c57bab57330c176c | Fix read_only tests. | anushbmx/kitsune,safwanrahman/kitsune,mozilla/kitsune,MikkCZ/kitsune,anushbmx/kitsune,mozilla/kitsune,mozilla/kitsune,MikkCZ/kitsune,mozilla/kitsune,safwanrahman/kitsune,anushbmx/kitsune,safwanrahman/kitsune,MikkCZ/kitsune,MikkCZ/kitsune,safwanrahman/kitsune,anushbmx/kitsune | kitsune/sumo/tests/test_readonly.py | kitsune/sumo/tests/test_readonly.py | import copy
from django.conf import settings
from django.db import models
from django.db.utils import DatabaseError
from django.test import TestCase, override_settings
from nose.tools import assert_raises, eq_
from pyquery import PyQuery as pq
from kitsune.questions.models import Question
from kitsune.sumo.urlresolvers import reverse
from kitsune.users.tests import UserFactory
class ReadOnlyModeTest(TestCase):
extra = ('kitsune.sumo.middleware.ReadOnlyMiddleware',)
def setUp(self):
# This has to be done before the db goes into read only mode.
self.user = UserFactory(password='testpass')
models.signals.pre_save.connect(self.db_error)
models.signals.pre_delete.connect(self.db_error)
self.old_settings = copy.copy(settings._wrapped.__dict__)
def tearDown(self):
models.signals.pre_save.disconnect(self.db_error)
models.signals.pre_delete.disconnect(self.db_error)
def db_error(self, *args, **kwargs):
raise DatabaseError("You can't do this in read-only mode.")
@override_settings(READ_ONLY=True)
def test_db_error(self):
assert_raises(DatabaseError, Question.objects.create, id=12)
@override_settings(READ_ONLY=True)
def test_login_error(self):
# This tries to do a db write.
url = reverse('users.login', locale='en-US')
r = self.client.post(url, {
'username': self.user.username,
'password': 'testpass',
}, follow=True)
eq_(r.status_code, 503)
title = pq(r.content)('title').text()
assert title.startswith('Maintenance in progress'), title
@override_settings(READ_ONLY=True)
def test_bail_on_post(self):
r = self.client.post('/en-US/questions')
eq_(r.status_code, 503)
title = pq(r.content)('title').text()
assert title.startswith('Maintenance in progress'), title
| import copy
from django.conf import settings
from django.db import models
from django.db.utils import DatabaseError
from django.test import TestCase
from django.utils import importlib
from nose.tools import assert_raises, eq_
from pyquery import PyQuery as pq
from kitsune.questions.models import Question
from kitsune.sumo.urlresolvers import reverse
from kitsune.users.tests import UserFactory
class ReadOnlyModeTest(TestCase):
extra = ('kitsune.sumo.middleware.ReadOnlyMiddleware',)
def setUp(self):
# This has to be done before the db goes into read only mode.
self.user = UserFactory(password='testpass')
models.signals.pre_save.connect(self.db_error)
models.signals.pre_delete.connect(self.db_error)
self.old_settings = copy.copy(settings._wrapped.__dict__)
settings.SLAVE_DATABASES = ['default']
settings_module = importlib.import_module(settings.SETTINGS_MODULE)
settings_module.read_only_mode(settings._wrapped.__dict__)
self.client.handler.load_middleware()
def tearDown(self):
settings._wrapped.__dict__ = self.old_settings
models.signals.pre_save.disconnect(self.db_error)
models.signals.pre_delete.disconnect(self.db_error)
def db_error(self, *args, **kwargs):
raise DatabaseError("You can't do this in read-only mode.")
def test_db_error(self):
assert_raises(DatabaseError, Question.objects.create, id=12)
def test_login_error(self):
# This tries to do a db write.
url = reverse('users.login', locale='en-US')
r = self.client.post(url, {
'username': self.user.username,
'password': 'testpass',
}, follow=True)
eq_(r.status_code, 503)
title = pq(r.content)('title').text()
assert title.startswith('Maintenance in progress'), title
def test_bail_on_post(self):
r = self.client.post('/en-US/questions')
eq_(r.status_code, 503)
title = pq(r.content)('title').text()
assert title.startswith('Maintenance in progress'), title
| bsd-3-clause | Python |
9a22cf7452723686a5065658ce5c9d31333c8a33 | Add download random leader avatar to examples | StepicOrg/Stepic-API | examples/download_random_leader_avatar.py | examples/download_random_leader_avatar.py | # Run with Python 3
import json
import requests
from random import randint
import shutil
import math
# 1. Get your keys at https://stepic.org/oauth2/applications/ (client type = confidential,
# authorization grant type = client credentials)
client_id = "..."
client_secret = "..."
# 2. Get a token
auth = requests.auth.HTTPBasicAuth(client_id, client_secret)
resp = requests.post('https://stepic.org/oauth2/token/',
data={'grant_type': 'client_credentials'},
auth=auth
)
token = json.loads(resp.text)['access_token']
# 3. Call API (https://stepic.org/api/docs/) using this token.
# Get leaders by count
def get_leaders(count):
pages = math.ceil(count / 20)
leaders = []
for page in range(1, pages + 1):
api_url = 'https://stepic.org/api/leaders/?page={}'.format(page)
response = json.loads(requests.get(api_url, headers={'Authorization': 'Bearer '+ token}).text)
leaders += response['leaders']
if not response['meta']['has_next']:
break
return leaders
# Get user by id
def get_user(id):
api_url = 'https://stepic.org/api/users/{}/'.format(id)
return json.loads(requests.get(api_url, headers={'Authorization': 'Bearer '+ token}).text)['users'][0]
# Download avatar by user id
def download_avatar(id, filename):
avatar_url = get_user(id)['avatar']
response = requests.get(avatar_url, stream=True)
with open('{}.png'.format(filename), 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
# Get leader user randomly from 100 leaders and download his avatar
rand_leader_id = get_leaders(100)[randint(0, 99)]['user']
download_avatar(rand_leader_id, 'leader')
| mit | Python |
|
e79437b7badcbc7e48e5090e2e27b892c323e829 | add script to make bedgraph from lumpy probbaly (-P) output | hall-lab/lumpy-sv,glebkuznetsov/lumpy-sv,hall-lab/lumpy-sv,cc2qe/lumpy-sv,arq5x/lumpy-sv,cc2qe/lumpy-sv,hall-lab/lumpy-sv,hall-lab/lumpy-sv,arq5x/lumpy-sv,glebkuznetsov/lumpy-sv,glebkuznetsov/lumpy-sv,arq5x/lumpy-sv,cc2qe/lumpy-sv,glebkuznetsov/lumpy-sv,arq5x/lumpy-sv,cc2qe/lumpy-sv,cc2qe/lumpy-sv,hall-lab/lumpy-sv,glebkuznetsov/lumpy-sv,arq5x/lumpy-sv | scripts/prob_bedpe_to_bedgraph.py | scripts/prob_bedpe_to_bedgraph.py | #!/usr/bin/env python
import sys
import numpy as np
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-b",
"--bedpe_file",
dest="bedpe_file",
help="BEDPE file")
parser.add_option("-n",
"--name",
default="LUMPY BedGraph",
dest="name",
help="Name")
(options, args) = parser.parse_args()
if not options.bedpe_file:
parser.error('BEDPE file not given')
f = open(options.bedpe_file,'r')
print 'track type=bedGraph name="' + options.name + '"'
for l in f:
A = l.rstrip().split('\t')
L=[float(x) for x in A[11].split()]
R=[float(x) for x in A[12].split()]
l_chr = A[0]
l_start = int(A[1])
r_chr = A[3]
r_start = int(A[4])
c = 0
for p in L:
print '\t'.join( [l_chr,
str(l_start + c),
str(l_start + c + 1),
str(p)])
c+=1
c = 0
for p in R:
print '\t'.join( [r_chr,
str(r_start + c),
str(r_start + c + 1),
str(p)])
c+=1
f.close()
| mit | Python |
|
18728051374484ca93b59d60a4e6941bdc5c6192 | Add missing migration | HelsinkiHacklab/asylum,rambo/asylum,HelsinkiHacklab/asylum,rambo/asylum,hacklab-fi/asylum,hacklab-fi/asylum,hacklab-fi/asylum,rambo/asylum,hacklab-fi/asylum,HelsinkiHacklab/asylum,rambo/asylum,HelsinkiHacklab/asylum | project/creditor/migrations/0010_auto_20190131_1731.py | project/creditor/migrations/0010_auto_20190131_1731.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('creditor', '0009_auto_20160123_2128'),
]
operations = [
migrations.AlterField(
model_name='recurringtransaction',
name='rtype',
field=models.PositiveSmallIntegerField(verbose_name='Recurrence type', choices=[(1, 'Monthly'), (2, 'Yearly'), (3, 'Quarterly')]),
),
]
| mit | Python |
|
efecaf8cdb7ca4623d2efd53590adf976fd36954 | Add test ref #261. | pypa/setuptools,pypa/setuptools,pypa/setuptools | setuptools/tests/test_build_py.py | setuptools/tests/test_build_py.py | import os
import pytest
from setuptools.dist import Distribution
@pytest.yield_fixture
def tmpdir_as_cwd(tmpdir):
with tmpdir.as_cwd():
yield tmpdir
def test_directories_in_package_data_glob(tmpdir_as_cwd):
"""
Directories matching the glob in package_data should
not be included in the package data.
Regression test for #261.
"""
dist = Distribution(dict(
script_name='setup.py',
script_args=['build_py'],
packages=[''],
name='foo',
package_data={'': ['path/*']},
))
os.makedirs('path/subpath')
#with contexts.quiet():
dist.parse_command_line()
dist.run_commands()
| mit | Python |
|
55aec9effab781a422501d67f18e77efb2eba446 | add sequence_handler.py | pucktada/cutkum | sequence_handler.py | sequence_handler.py | import tensorflow as tf
import numpy as np
def get_num_records(filename):
return len([x for x in tf.python_io.tf_record_iterator(filename)])
# Write all examples into a TFRecords file
def save_tfrecord(writer, sources, targets):
for source, target in zip(sources, targets):
ex = make_example(source, target)
writer.write(ex.SerializeToString())
def make_example(source, target):
'''
Generates a SequenceExample out of a sequence of inputs and outputs
:param sequence: Sequence input
:param labels: Sequence output
:return: SequenceExample
'''
# The object we return
ex = tf.train.SequenceExample()
# A non-sequential feature of our example
sequence_length = len(source)
ex.context.feature["length"].int64_list.value.append(sequence_length)
# Feature lists for the two sequential features of our example
fl_source = ex.feature_lists.feature_list["source"]
fl_target = ex.feature_lists.feature_list["target"]
for src, tar in zip(source, target):
#print(type(token))
fl_source.feature.add().int64_list.value.append(src)
fl_target.feature.add().int64_list.value.append(tar)
return ex
def read_and_decode_single_example(filenames, shuffle=False, num_epochs=None):
# first construct a queue containing a list of filenames.
# this lets a user split up there dataset in multiple files to keep size down
#filename_queue = tf.train.string_input_producer([filename], num_epochs=10)
filename_queue = tf.train.string_input_producer(filenames,
shuffle=shuffle, num_epochs=num_epochs)
reader = tf.TFRecordReader()
# One can read a single serialized example from a filename
# serialized_example is a Tensor of type string.
key, serialized_ex = reader.read(filename_queue)
context, sequences = tf.parse_single_sequence_example(serialized_ex,
context_features = {
"length": tf.FixedLenFeature([], dtype=tf.int64)
},
sequence_features={
# We know the length of both fields. If not the
# tf.VarLenFeature could be used
"source": tf.FixedLenSequenceFeature([], dtype=tf.int64),
"target": tf.FixedLenSequenceFeature([], dtype=tf.int64)
})
return (key, context, sequences)
def main():
print('sequence_handler:main')
tmp_filename = 'tf.tmp'
writer = tf.python_io.TFRecordWriter(tmp_filename)
xlist = [[1, 2, 3], [1, 2], [3, 2, 1], [5,6,7,8,9,9,8], [7,6,6,7]]
ylist = [[0, 1, 0], [2, 0], [2, 2, 2], [3,1,3,1,3,1,1], [4,1,4,1]]
print(xlist)
print(ylist)
save_tfrecord(writer, xlist, ylist)
writer.close()
graph = tf.Graph()
with graph.as_default():
key, context, features = read_and_decode_single_example([tmp_filename])
tokens = features['source']
labels = features['target']
with tf.Session(graph=graph) as sess:
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
print('\nread back the file...')
for i in range(len(xlist)):
t, l = sess.run([tokens, labels])
print('tokens:', t)
print('labels:', l)
print('')
# stop our queue threads and properly close the session
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
main() | mit | Python |
|
2526c91b77b538e1f37bd279783de0ac5452c463 | Add test to validate legislative parameters in XML and JSON format. | antoinearnoud/openfisca-france,benjello/openfisca-france,SophieIPP/openfisca-france,benjello/openfisca-france,adrienpacifico/openfisca-france,sgmap/openfisca-france,sgmap/openfisca-france,antoinearnoud/openfisca-france,SophieIPP/openfisca-france,adrienpacifico/openfisca-france | tests/legislation_tests.py | tests/legislation_tests.py | # -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import xml.etree.ElementTree
import openfisca_france
openfisca_france.init_country()
from openfisca_core import conv, legislations, legislationsxml, model
def test_legislation_xml_file():
legislation_tree = xml.etree.ElementTree.parse(model.PARAM_FILE)
legislation_xml_json = conv.check(legislationsxml.xml_legislation_to_json)(legislation_tree.getroot(),
state = conv.default_state)
legislation_xml_json, errors = legislationsxml.validate_node_xml_json(legislation_xml_json,
state = conv.default_state)
if errors is not None:
errors = conv.embed_error(legislation_xml_json, 'errors', errors)
if errors is None:
raise ValueError(unicode(json.dumps(legislation_xml_json, ensure_ascii = False,
indent = 2)).encode('utf-8'))
raise ValueError(u'{0} for: {1}'.format(
unicode(json.dumps(errors, ensure_ascii = False, indent = 2, sort_keys = True)),
unicode(json.dumps(legislation_xml_json, ensure_ascii = False, indent = 2)),
).encode('utf-8'))
legislation_json = legislationsxml.transform_node_xml_json_to_json(legislation_xml_json)
legislation_json, errors = legislations.validate_node_json(legislation_json, state = conv.default_state)
if errors is not None:
errors = conv.embed_error(legislation_json, 'errors', errors)
if errors is None:
raise ValueError(unicode(json.dumps(legislation_json, ensure_ascii = False, indent = 2)).encode('utf-8'))
raise ValueError(u'{0} for: {1}'.format(
unicode(json.dumps(errors, ensure_ascii = False, indent = 2, sort_keys = True)),
unicode(json.dumps(legislation_json, ensure_ascii = False, indent = 2)),
).encode('utf-8'))
| agpl-3.0 | Python |
|
5f3af12d40e7c9ff388385e408d65565cb916def | Add Swagger integration test | openfisca/openfisca-web-api,sgmap/openfisca-web-api,openfisca/openfisca-web-api,sgmap/openfisca-web-api | openfisca_web_api/tests/test_swagger_integration.py | openfisca_web_api/tests/test_swagger_integration.py | # -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from nose.tools import assert_greater
from ..controllers.swagger import (
build_paths,
)
from . import common
def setup_module(module):
common.get_or_load_app()
def smoke_test_build_paths():
assert_greater(build_paths(), 100)
| agpl-3.0 | Python |
|
0507a47f1c15bac5f6eddbeb9c712f5c2b2a9358 | Add tests for msgpack reader. | ericdill/databroker,ericdill/databroker | intake_bluesky/tests/test_msgpack.py | intake_bluesky/tests/test_msgpack.py | import intake_bluesky.msgpack # noqa
import intake
from suitcase.msgpack import Serializer
import os
import pytest
import shutil
import tempfile
import time
import types
from .generic import * # noqa
TMP_DIR = tempfile.mkdtemp()
TEST_CATALOG_PATH = [TMP_DIR]
YAML_FILENAME = 'intake_msgpack_test_catalog.yml'
def teardown_module(module):
try:
shutil.rmtree(TMP_DIR)
except BaseException:
pass
@pytest.fixture(params=['local', 'remote'])
def bundle(request, intake_server, example_data, tmp_path): # noqa
serializer = Serializer(tmp_path)
uid, docs = example_data
for name, doc in docs:
serializer(name, doc)
serializer.close()
fullname = os.path.join(TMP_DIR, YAML_FILENAME)
with open(fullname, 'w') as f:
f.write(f'''
plugins:
source:
- module: intake_bluesky
sources:
xyz:
description: Some imaginary beamline
driver: intake_bluesky.msgpack.BlueskyMsgpackCatalog
container: catalog
args:
paths: {[str(path) for path in serializer.artifacts['all']]}
handler_registry:
NPY_SEQ: ophyd.sim.NumpySeqHandler
metadata:
beamline: "00-ID"
''')
time.sleep(2)
if request.param == 'local':
cat = intake.Catalog(os.path.join(TMP_DIR, YAML_FILENAME))
elif request.param == 'remote':
cat = intake.Catalog(intake_server, page_size=10)
else:
raise ValueError
return types.SimpleNamespace(cat=cat,
uid=uid,
docs=docs)
| bsd-3-clause | Python |
|
2e1d70391b26ae353ca95ce25a08d59f1d8f9f9e | Create multifilebuilder_gtk3.py | ikcalB/linuxcnc-mirror,ikcalB/linuxcnc-mirror,ikcalB/linuxcnc-mirror,ikcalB/linuxcnc-mirror,ikcalB/linuxcnc-mirror,ikcalB/linuxcnc-mirror,ikcalB/linuxcnc-mirror | lib/python/multifilebuilder_gtk3.py | lib/python/multifilebuilder_gtk3.py | # -*- python -*-
# Copyright (C) 2014 Jeff Epler <jepler@unpythonic.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from gi.repository import Gtk
__all__ = ['MultiFileBuilder']
class MultiFileBuilder:
def __init__(self):
self.builders = []
self.domain = None
def set_translation_domain(self, domain):
self.domain = domain
def connect_signals(self, obj):
for b in self.builders: b.connect_signals(obj)
def add_from_file(self, fn):
builder = Gtk.Builder()
if self.domain is not None: builder.set_translation_domain(self.domain)
self.builders.append(builder)
builder.add_from_file(fn)
def add_from_string(self, strg):
builder = Gtk.Builder()
if self.domain is not None: builder.set_translation_domain(self.domain)
self.builders.append(builder)
builder.add_from_string(strg)
def get_object(self, obj):
objects = [builder.get_object(obj) for builder in self.builders]
objects = [o for o in objects if o]
if not objects: return None
if len(objects) > 1: raise ValueError, """Use of object with duplicate ID -> '%s'"""% obj
return objects[0]
| lgpl-2.1 | Python |
|
216c7c21adcbec601ebcc624eac4b5087422e5d2 | add test for sugarsource | sncosmo/sncosmo,sncosmo/sncosmo,sncosmo/sncosmo | sncosmo/tests/test_sugarsource.py | sncosmo/tests/test_sugarsource.py | # Licensed under a 3-clause BSD style license - see LICENSES
"""Tests for SUGARSource (and wrapped in Model)"""
import os
import numpy as np
import pytest
from numpy.testing import assert_allclose
import sncosmo
def sugar_model(Xgr=0, q1=0, q2=0, q3=0, A=0,
phase=np.linspace(-5, 30, 10),
wave=np.linspace(4000, 8000, 10)):
"""
Give a spectral time series of SUGAR model
for a given set of parameters.
"""
source = sncosmo.get_source('sugar')
mag_sugar = source._model['M0'](phase, wave)
keys = ['ALPHA1', 'ALPHA2', 'ALPHA3', 'CCM']
parameters = [q1, q2, q3, A]
for i in range(4):
comp = source._model[keys[i]](phase, wave) * parameters[i]
mag_sugar += comp
# Mag AB used in the training of SUGAR.
mag_sugar += 48.59
wave_factor = (wave ** 2 / 299792458. * 1.e-10)
return (Xgr * 10. ** (-0.4 * mag_sugar) / wave_factor)
@pytest.mark.might_download
def test_sugarsource():
"""Test timeseries output from SUGARSource vs pregenerated timeseries
from the original files."""
source = sncosmo.get_source("sugar")
model = sncosmo.Model(source)
q1 = [-1, 0, 1, 2]
q2 = [1, 0, -1, -2]
q3 = [-1, 1, 0, -2]
A = [-0.1, 0, 0.2, 0.5]
Xgr = [10**(-0.4 * 34), 10**(-0.4 * 33),
10**(-0.4 * 38), 10**(-0.4 * 42)]
time = np.linspace(-5, 30, 10)
wave = np.linspace(4000, 8000, 10)
for i in range(len(q1)):
fluxref = sugar_model(Xgr=Xgr[i],
q1=q1[i],
q2=q2[i],
q3=q3[i],
A=A[i],
phase=time,
wave=wave)
model.set(z=0, t0=0, Xgr=Xgr[i],
q1=q1[i], q2=q2[i],
q3=q3[i], A=A[i])
flux = model.flux(time, wave)
assert_allclose(flux, fluxref, rtol=1e-13)
| bsd-3-clause | Python |
|
da85d929118a9ac51a112a405818838e476a2f80 | Add blank test for updating later.. | flyte/pi-mqtt-gpio | tests/test_pi_mqtt_gpio.py | tests/test_pi_mqtt_gpio.py | def test_noop():
pass
| mit | Python |
|
06455d743590e47bfe5c9e1a6ff745622abe9cb5 | add tests for polymorphism | fishtown-analytics/hologram | tests/test_polymorphism.py | tests/test_polymorphism.py | import pytest
from dataclasses import dataclass
from hologram import JsonSchemaMixin, ValidationError
from hologram.helpers import StrEnum, StrLiteral
from typing import Union
class Bar(StrEnum):
x = "x"
y = "y"
@dataclass
class BarX(JsonSchemaMixin):
bar: StrLiteral(Bar.x)
@dataclass
class BarY(JsonSchemaMixin):
bar: StrLiteral(Bar.y)
@dataclass
class Foo(JsonSchemaMixin):
foo: Union[BarX, BarY]
def test_symmetry():
def assert_symmetry(value):
assert Foo.from_dict(value).to_dict() == value
assert_symmetry({"foo": {"bar": "x"}})
assert_symmetry({"foo": {"bar": "y"}})
def test_subclasses():
foo_x = Foo.from_dict({"foo": {"bar": "x"}})
assert isinstance(foo_x.foo, BarX)
foo_y = Foo.from_dict({"foo": {"bar": "y"}})
assert isinstance(foo_y.foo, BarY)
| mit | Python |
|
64fac50f77c492edf20b0e4161b9da988831f2ed | change author can also be null | c3nav/c3nav,c3nav/c3nav,c3nav/c3nav,c3nav/c3nav | src/c3nav/editor/migrations/0002_auto_20170612_1615.py | src/c3nav/editor/migrations/0002_auto_20170612_1615.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-12 16:15
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('editor', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='change',
name='author',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='changes', to=settings.AUTH_USER_MODEL, verbose_name='Author'),
),
]
| apache-2.0 | Python |
|
a42a6a54f732ca7eba700b867a3025739ad6a271 | Move main code to function because of pylint warning 'Invalid constant name' | vazhnov/list_all_users_in_group | list_all_users_in_group.py | list_all_users_in_group.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import grp
import pwd
import inspect
import argparse
def list_all_users_in_group(groupname):
"""Get list of all users of group.
Get sorted list of all users of group GROUP,
including users with main group GROUP.
Origin in https://github.com/vazhnov/list_all_users_in_group
"""
try:
group = grp.getgrnam(groupname)
# On error "KeyError: 'getgrnam(): name not found: GROUP'"
except KeyError:
return None
group_all_users_set = set(group.gr_mem)
for user in pwd.getpwall():
if user.pw_gid == group.gr_gid:
group_all_users_set.add(user.pw_name)
return sorted(group_all_users_set)
def main():
parser = argparse.ArgumentParser(description=inspect.getdoc(list_all_users_in_group),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-d', '--delimiter', default='\n', help='Use DELIMITER instead of newline for users delimiter')
parser.add_argument('groupname', help='Group name')
args = parser.parse_args()
result = list_all_users_in_group(args.groupname)
if result:
print (args.delimiter.join(result))
if __name__ == "__main__":
main()
| #! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import grp
import pwd
import inspect
import argparse
def list_all_users_in_group(groupname):
"""Get list of all users of group.
Get sorted list of all users of group GROUP,
including users with main group GROUP.
Origin in https://github.com/vazhnov/list_all_users_in_group
"""
try:
group = grp.getgrnam(groupname)
# On error "KeyError: 'getgrnam(): name not found: GROUP'"
except KeyError:
return None
group_all_users_set = set(group.gr_mem)
for user in pwd.getpwall():
if user.pw_gid == group.gr_gid:
group_all_users_set.add(user.pw_name)
return sorted(group_all_users_set)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=inspect.getdoc(list_all_users_in_group),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-d', '--delimiter', default='\n', help='Use DELIMITER instead of newline for users delimiter')
parser.add_argument('groupname', help='Group name')
args = parser.parse_args()
result = list_all_users_in_group(args.groupname)
if result:
print (args.delimiter.join(result))
| cc0-1.0 | Python |
76aed66701ac9f267ef08bde0f0b55e2ad905e68 | Add micro bench arith-modulo | wdv4758h/ZipPy,wdv4758h/ZipPy,wdv4758h/ZipPy,wdv4758h/ZipPy,wdv4758h/ZipPy,wdv4758h/ZipPy,wdv4758h/ZipPy,wdv4758h/ZipPy | graal/edu.uci.python.benchmark/src/micro/arith-modulo.py | graal/edu.uci.python.benchmark/src/micro/arith-modulo.py | # zwei 10/09/13
# arithmetic ops (partially extracted from spectralnorm)
import time
def docompute(num):
for i in range(num):
sum = 0
j = 0
# if i == 0:
# i += 1
while j < i:
if i % 3 == 0:
temp = 1
else:
temp = i % 3
j += temp
sum = sum + j
return sum
def measure(num):
print("Start timing...")
start = time.time()
for run in range(num):
sum = docompute(5000) #5000
print("sum", sum)
duration = "%.3f\n" % (time.time() - start)
print("arith-modulo: " + duration)
for run in range(50):
docompute(1000) #1000
measure(50) | bsd-3-clause | Python |
|
996a4dd0223d8f327dbe822f9f6e430465c6c70f | add django settings for djsupervisor | mvillis/measure-mate,mvillis/measure-mate,rloomans/measure-mate,rloomans/measure-mate,rloomans/measure-mate,rloomans/measure-mate,mvillis/measure-mate,mvillis/measure-mate | measure_mate/settings/supervisord.py | measure_mate/settings/supervisord.py | from measure_mate.settings.base import *
INSTALLED_APPS += (
'djsupervisor',
)
| mit | Python |
|
d6acd4324f5fe5e57750a335b35cd42edd8544b5 | Solve the puzzle. | machinelearningdeveloper/aoc_2016 | 01/solve.py | 01/solve.py | """Report the manhattan distance between a starting point and an ending point,
given a set of directions to follow to get move between the two points."""
from distance import get_distance
from directions import load_directions, follow_directions
def main():
directions = load_directions('directions.txt')
starting_point = (0, 0)
starting_orientation = 'N'
ending_point, _ = follow_directions(starting_point, starting_orientation, *directions)
print(get_distance(starting_point, ending_point))
if __name__ == '__main__':
main()
| mit | Python |
|
8eff29ba9777cd977f04a2c0b68d598ad63c8f47 | Create 02.py | ezralalonde/cloaked-octo-sansa | 02/ex/02.py | 02/ex/02.py | # Define a procedure, stamps, which takes as its input a positive integer in
# pence and returns the number of 5p, 2p and 1p stamps (p is pence) required
# to make up that value. The answer should use as many 5p stamps as possible,
# then 2 pence stamps and finally 1p stamps.
def stamps(nn):
# Your code here
return (nn / 5, (nn % 5) / 2, (nn % 5) % 2)
print stamps(8)
#>>> (1, 1, 1) # one 5p stamp, one 2p stamp and one 1p stamp
print stamps(5)
#>>> (1, 0, 0) # one 5p stamp, no 2p stamps and no 1p stamps
print stamps(29)
#>>> (5, 2, 0) # five 5p stamps, two 2p stamps and no 1p stamps
print stamps(0)
#>>> (0, 0, 0) # no 5p stamps, no 2p stamps and no 1p stamps
| bsd-2-clause | Python |
|
406fcf5297458f5469364faf8180683b89fd527c | Add wmi sampler tests (#5859) | DataDog/integrations-core,DataDog/integrations-core,DataDog/integrations-core,DataDog/integrations-core,DataDog/integrations-core,DataDog/integrations-core,DataDog/integrations-core,DataDog/integrations-core,DataDog/integrations-core,DataDog/integrations-core | datadog_checks_base/tests/test_wmisampler.py | datadog_checks_base/tests/test_wmisampler.py | # (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from tests.utils import requires_windows
try:
from datadog_checks.base.checks.win.wmi import WMISampler
except ImportError:
pass
@requires_windows
@pytest.mark.unit
def test_format_filter_value():
filters = [{'a': 'b'}, {'c': 'd'}]
sampler = WMISampler(logger=None, class_name='MyClass', property_names='my.prop', filters=filters)
formatted_filters = sampler.formatted_filters
assert formatted_filters == " WHERE ( c = 'd' ) OR ( a = 'b' )"
@requires_windows
@pytest.mark.unit
def test_format_filter_list():
filters = [{'a': ['>', 1, 'i_get_ignored']}]
sampler = WMISampler(logger=None, class_name='MyClass', property_names='my.prop', filters=filters)
formatted_filters = sampler.formatted_filters
assert formatted_filters == " WHERE ( a > '1' )"
@requires_windows
@pytest.mark.unit
def test_format_filter_like():
filters = [{'a': '%foo'}]
sampler = WMISampler(logger=None, class_name='MyClass', property_names='my.prop', filters=filters)
formatted_filters = sampler.formatted_filters
assert formatted_filters == " WHERE ( a LIKE '%foo' )"
@requires_windows
@pytest.mark.unit
def test_format_filter_list_expected():
filters = [{'a': ['<', 3]}]
sampler = WMISampler(logger=None, class_name='MyClass', property_names='my.prop', filters=filters)
formatted_filters = sampler.formatted_filters
assert formatted_filters == " WHERE ( a < '3' )"
@requires_windows
@pytest.mark.unit
def test_format_filter_tuple():
# needed for backwards compatibility and hardcoded filters
filters = [{'a': ('<', 3)}]
sampler = WMISampler(logger=None, class_name='MyClass', property_names='my.prop', filters=filters)
formatted_filters = sampler.formatted_filters
assert formatted_filters == " WHERE ( a < '3' )"
| bsd-3-clause | Python |
|
d067d9937ff34787e6f632d86075af29c27d98f8 | Add py solution for 714. Best Time to Buy and Sell Stock with Transaction Fee | ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode | py/best-time-to-buy-and-sell-stock-with-transaction-fee.py | py/best-time-to-buy-and-sell-stock-with-transaction-fee.py | class Solution(object):
def maxProfit(self, prices, fee):
"""
:type prices: List[int]
:type fee: int
:rtype: int
"""
hold, not_hold = None, 0
for p in prices:
hold, not_hold = max(hold, not_hold - p - fee), max(not_hold, None if hold is None else hold + p)
return max(hold, not_hold)
| apache-2.0 | Python |
|
4c2c80e0004a758787beb555fbbe789cce5e82fc | Fix variable referenced before assginment in vmwareapi code. | n0ano/gantt,n0ano/gantt | nova/tests/test_vmwareapi_vm_util.py | nova/tests/test_vmwareapi_vm_util.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Canonical Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import exception
from nova import test
from nova.virt.vmwareapi import fake
from nova.virt.vmwareapi import vm_util
class fake_session(object):
def __init__(self, ret=None):
self.ret = ret
def _call_method(self, *args):
return self.ret
class VMwareVMUtilTestCase(test.TestCase):
def setUp(self):
super(VMwareVMUtilTestCase, self).setUp()
def tearDown(self):
super(VMwareVMUtilTestCase, self).tearDown()
def test_get_datastore_ref_and_name(self):
result = vm_util.get_datastore_ref_and_name(
fake_session([fake.Datastore()]))
self.assertEquals(result[1], "fake-ds")
self.assertEquals(result[2], 1024 * 1024 * 1024)
self.assertEquals(result[3], 1024 * 1024 * 500)
def test_get_datastore_ref_and_name_without_datastore(self):
self.assertRaises(exception.DatastoreNotFound,
vm_util.get_datastore_ref_and_name,
fake_session(), host="fake-host")
self.assertRaises(exception.DatastoreNotFound,
vm_util.get_datastore_ref_and_name,
fake_session(), cluster="fake-cluster")
| apache-2.0 | Python |
|
db8521aefa5f751bd3309d4bedd0a3ed4ec02056 | Create Sparsennetdemo.py (#453) | probml/pyprobml,probml/pyprobml,probml/pyprobml,probml/pyprobml | scripts/sparsennetdemo.py | scripts/sparsennetdemo.py | import itertools
import numpy as np
import matplotlib.pyplot as plt
import numpy.random as npr
import jax.numpy as jnp
from jax import jit, grad, random
from jax.experimental import stax
from jax.experimental.stax import Dense, Softplus
from jax.tree_util import (tree_flatten, tree_unflatten)
from jax.flatten_util import ravel_pytree
from graphviz import Digraph
def generate_data(num_instances, num_vars, key):
subkeys = random.split(key, 4)
X = 10 * random.uniform(subkeys[0], shape=(num_instances, num_vars)) - 5
var = 0.1
n_points = 20
example_points = 10 * random.uniform(subkeys[1], shape=(n_points, num_vars))-5
targets = 10* random.uniform(subkeys[2], shape=(n_points, 1)) -5
y = np.zeros((num_instances,1))
for i in range(num_instances):
dists = np.sum(np.abs(np.tile(X[i,:],(n_points,1)) - example_points), axis=1)
lik = (1/np.sqrt(2* np.pi)) * np.exp(-dists/(2*var))
lik = lik / np.sum(lik)
y[i,0] = lik.T @ targets + random.normal(subkeys[3])/15
return X, y
@jit
def loss(params, batch):
inputs, targets = batch
preds = predict(params, inputs)
return jnp.sum((preds - targets)**2) / 2.0
def data_stream(num_instances, batch_size):
rng = npr.RandomState(0)
num_batches = num_instances // batch_size
while True:
perm = rng.permutation(num_instances)
for i in range(num_batches):
batch_idx = perm[i * batch_size:(i + 1) * batch_size]
yield X[batch_idx], y[batch_idx]
def pgd(alpha, lambd):
step_size = alpha
def init(w0):
return w0
def soft_thresholding(z, threshold):
return jnp.sign(z) * jnp.maximum(jnp.absolute(z) - threshold , 0.0)
def update(i, g, w):
g_flat, unflatten = ravel_pytree(g)
w_flat = ravel_pytree_jit(w)
updated_params = soft_thresholding(w_flat - step_size * g_flat, step_size * lambd)
return unflatten(updated_params)
def get_params(w):
return w
def set_step_size(lr):
step_size = lr
return init, update, get_params, soft_thresholding, set_step_size
ravel_pytree_jit = jit(lambda tree: ravel_pytree(tree)[0])
@jit
def line_search(w, g, batch, beta):
lr_i = 1
g_flat, unflatten_g = ravel_pytree(g)
w_flat = ravel_pytree_jit(w)
z_flat = soft_thresholding(w_flat - lr_i*g_flat, lr_i* lambd)
z = unflatten_g(z_flat)
for i in range(20):
is_converged = loss(z, batch) > loss(w, batch) + g_flat@(z_flat - w_flat) + np.sum((z_flat - w_flat)**2)/(2*lr_i)
lr_i = jnp.where(is_converged,lr_i, beta*lr_i)
return lr_i
@jit
def update(i, opt_state, batch):
params = get_params(opt_state)
g = grad(loss)(params, batch)
lr_i = line_search(params, g, batch, 0.5)
set_step_size(lr_i)
return opt_update(i,g, opt_state)
key = random.PRNGKey(3)
num_epochs = 60000
num_instances, num_vars = 200, 2
batch_size = num_instances
minim, maxim = -5, 5
x, y = generate_data(num_instances, 1, key)
X = np.c_[np.ones_like(x), x]
batches = data_stream(num_instances, batch_size)
init_random_params, predict = stax.serial(
Dense(5), Softplus,
Dense(5), Softplus,
Dense(5), Softplus,
Dense(5), Softplus,
Dense(1))
lambd, step_size = 0.6, 1e-4
opt_init, opt_update, get_params, soft_thresholding, set_step_size = pgd(step_size,lambd)
_, init_params = init_random_params(key, (-1, num_vars))
opt_state = opt_init(init_params)
itercount = itertools.count()
for epoch in range(num_epochs):
opt_state = update(next(itercount), opt_state, next(batches))
labels = {"training" : "Data", "test" : "Deep Neural Net" }
x_test = np.arange(minim, maxim, 1e-5)
x_test = np.c_[np.ones((x_test.shape[0],1)), x_test]
params = get_params(opt_state)
plt.scatter(X[:,1], y, c='k', s=13, label=labels["training"])
plt.plot(x_test[:,1], predict(params, x_test), 'g-',linewidth=3, label=labels["test"])
plt.gca().legend(loc="upper right")
plt.show()
weights, _ = tree_flatten(params)
w = weights[::2]
dot = Digraph(name='Neural Network',format='png',graph_attr={'ordering':'out'}, node_attr={'shape': 'circle', 'color': 'black', 'fillcolor':'#FFFFE0', 'style': 'filled'})
dot.node('00','<x<SUB>0</SUB>>')
dot.node('01','<x<SUB>1</SUB>>')
for i in range(len(w)):
for j in range(w[i].shape[1]):
subscript = '{}{}'.format(i+1, j)
dot.node(subscript, '<h<SUB>{}</SUB>>'.format(subscript))
for k in range(w[i].shape[0]):
origin = '{}{}'.format(i,k)
if np.abs(w[i][k,j])>1e-4:
dot.edge(origin, subscript)
else:
dot.edge(origin, subscript,style='invis')
dot.edge('42','50', style='invis')
dot.view()
| mit | Python |
|
c687ab125af67d769afc781731b1a2b663a5bb2c | Use SystemRandom to generate unpredictable random slugs. Fixed duplicate characters in the choice string, removed iI from it to prevent confusion. Fixes issue #40. | rbarrois/xelpaste,SanketDG/dpaste,SanketDG/dpaste,rbarrois/xelpaste,bartTC/dpaste,rbarrois/xelpaste,bartTC/dpaste,SanketDG/dpaste,bartTC/dpaste | dpaste/models.py | dpaste/models.py | from datetime import datetime
from os import urandom
from random import SystemRandom
from django.db import models
from django.core.urlresolvers import reverse
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
import mptt
from dpaste.highlight import LEXER_DEFAULT
R = SystemRandom()
L = getattr(settings, 'DPASTE_SLUG_LENGTH', 4)
T = getattr(settings, 'DPASTE_SLUG_CHOICES',
'abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNOPQRSTUVWXYZ1234567890')
def generate_secret_id(length=L):
return ''.join([R.choice(T) for i in range(L)])
class Snippet(models.Model):
secret_id = models.CharField(_(u'Secret ID'), max_length=255, blank=True)
content = models.TextField(_(u'Content'), )
lexer = models.CharField(_(u'Lexer'), max_length=30, default=LEXER_DEFAULT)
published = models.DateTimeField(_(u'Published'), blank=True)
expires = models.DateTimeField(_(u'Expires'), blank=True)
parent = models.ForeignKey('self', null=True, blank=True, related_name='children')
class Meta:
ordering = ('-published',)
db_table = 'dpaste_snippet'
def get_linecount(self):
return len(self.content.splitlines())
@property
def is_single(self):
return self.is_root_node() and not self.get_children()
def save(self, *args, **kwargs):
if not self.pk:
self.published = datetime.now()
self.secret_id = generate_secret_id()
super(Snippet, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('snippet_details', kwargs={'snippet_id': self.secret_id})
def __unicode__(self):
return self.secret_id
mptt.register(Snippet, order_insertion_by=['content'])
| import datetime
import random
import mptt
from django.db import models
from django.core.urlresolvers import reverse
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from dpaste.highlight import LEXER_DEFAULT
L = getattr(settings, 'DPASTE_SLUG_LENGTH', 4)
T = getattr(settings, 'DPASTE_SLUG_CHOICES',
'abcdefghijkmnopqrstuvwwxyzABCDEFGHIJKLOMNOPQRSTUVWXYZ1234567890')
def generate_secret_id(length=L):
return ''.join([random.choice(T) for i in range(length)])
class Snippet(models.Model):
secret_id = models.CharField(_(u'Secret ID'), max_length=255, blank=True)
content = models.TextField(_(u'Content'), )
lexer = models.CharField(_(u'Lexer'), max_length=30, default=LEXER_DEFAULT)
published = models.DateTimeField(_(u'Published'), blank=True)
expires = models.DateTimeField(_(u'Expires'), blank=True)
parent = models.ForeignKey('self', null=True, blank=True, related_name='children')
class Meta:
ordering = ('-published',)
db_table = 'dpaste_snippet'
def get_linecount(self):
return len(self.content.splitlines())
@property
def is_single(self):
return self.is_root_node() and not self.get_children()
def save(self, *args, **kwargs):
if not self.pk:
self.published = datetime.datetime.now()
self.secret_id = generate_secret_id()
super(Snippet, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('snippet_details', kwargs={'snippet_id': self.secret_id})
def __unicode__(self):
return self.secret_id
mptt.register(Snippet, order_insertion_by=['content'])
| mit | Python |
1dc439fcf7a823270156708208339a8bf420703c | Create Generic Sitemap abstract django | opps/opps,YACOWS/opps,jeanmask/opps,williamroot/opps,YACOWS/opps,opps/opps,williamroot/opps,opps/opps,YACOWS/opps,jeanmask/opps,williamroot/opps,jeanmask/opps,williamroot/opps,opps/opps,YACOWS/opps,jeanmask/opps | opps/sitemaps/sitemaps.py | opps/sitemaps/sitemaps.py | # -*- coding: utf-8 -*-
from django.contrib.sitemaps import GenericSitemap as DjangoGenericSitemap
from django.contrib.sitemaps import Sitemap as DjangoSitemap
from django.utils import timezone
from opps.articles.models import Article
def InfoDisct(googlenews=False):
article = Article.objects.filter(date_available__lte=timezone.now(),
published=True)
if googlenews:
article = article[:1000]
return {
'queryset': article,
'date_field': 'date_available',
}
class BaseSitemap(DjangoSitemap):
priority = 0.6
def items(self):
return Article.objects.filter(date_available__lte=timezone.now(),
published=True)
def lastmod(self, obj):
return obj.date_available
class GenericSitemap(DjangoGenericSitemap):
limit = 1000
priority = 0.6
| mit | Python |
|
7bab9610bf9278b8dedb55a513f22130e2f629ed | Add PP example | EtienneCmb/tensorpac,EtienneCmb/tensorpac | examples/13_PreferedPhase.py | examples/13_PreferedPhase.py | """This example illustrate hox to find the prefered-phase (PP).
First, the amplitude is binned according to phase slices (360°/nbins). Then,
the PP is defined as the phase where the amplitude is maximum.
"""
import numpy as np
import matplotlib.pyplot as plt
from tensorpac import PacSignals, Pac
plt.style.use('seaborn-poster')
# Generate 100 datasets with a [5, 7]<->[90, 100]hz coupling :
sf = 1024.
ndatasets = 100
data, time = PacSignals(fpha=[5, 7], famp=[95, 105], ndatasets=ndatasets,
sf=sf, noise=3, chi=.7, npts=2000)
# Define a Pac object. Here, we are not going to use the idpac variable :
p = Pac(fpha=[5, 7], famp=(60, 200, 10, 1))
# Extract the phase and the amplitude :
pha = p.filter(sf, data, axis=1, ftype='phase')
amp = p.filter(sf, data, axis=1, ftype='amplitude')
# Introduce a 2*pi/2 phase shift (equivalent to adding a 90° shift) :
pha += np.pi/2
# Now, compute the PP :
ambin, pp, vecbin = p.pp(pha, amp, axis=2, nbins=72)
# Reshape the PP to be (ndatasets, namp) :
pp = np.squeeze(pp).T
# Reshape the amplitude to be (nbins, namp, ndatasets) and take the mean across
# datasets :
ambin = np.squeeze(ambin).mean(-1)
# plt.figure(figsize=(20, 35))
# Plot the prefered phase :
plt.subplot(221)
plt.pcolormesh(p.yvec, np.arange(100), np.rad2deg(pp), cmap='Spectral_r')
cb = plt.colorbar()
plt.clim(vmin=-180., vmax=180.)
plt.axis('tight')
plt.xlabel('Amplitude center frequency (Hz)')
plt.ylabel('Ndatasets')
plt.title("PP for each dataset and for several amplitudes.\n100hz amplitudes"
" are phase locked to 90° (<=> pi/2)")
cb.set_label('PP (in degrees)')
# Then, we show the histogram corresponding to an 100he amplitude :
idx100 = np.abs(p.yvec - 100.).argmin()
plt.subplot(222)
h = plt.hist(pp[:, idx100], color='#ab4642')
plt.xlim((-np.pi, np.pi))
plt.xlabel('PP')
plt.title('PP across trials for the 100hz amplitude')
plt.xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi])
plt.gca().set_xticklabels([r"$-\pi$", r"$-\frac{\pi}{2}$", "$0$",
r"$\frac{\pi}{2}$", r"$\pi$"])
p.polar(ambin.T, vecbin, p.yvec, cmap='Spectral_r', interp=.1, subplot=212,
cblabel='Amplitude bins')
# plt.savefig('pp.png', dpi=300, bbox_inches='tight')
p.show()
| bsd-3-clause | Python |
|
25b7f06a0f185a7e83aab38888e32b41c2c31853 | Create 02.py | ezralalonde/cloaked-octo-sansa | 03/hw/02.py | 03/hw/02.py | # Define a procedure, greatest,
# that takes as input a list
# of positive numbers, and
# returns the greatest number
# in that list. If the input
# list is empty, the output
# should be 0.
def greatest(xs):
greatest = 0
for x in xs:
if x > greatest:
greatest = x
return greatest
#print greatest([4,23,1])
#>>> 23
#print greatest([])
#>>> 0
| bsd-2-clause | Python |
|
321c857d4cc2bacdeaa398d3b4b1fd7769f33718 | Add py-soupsieve package (#12827) | LLNL/spack,iulian787/spack,LLNL/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack,iulian787/spack | var/spack/repos/builtin/packages/py-soupsieve/package.py | var/spack/repos/builtin/packages/py-soupsieve/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PySoupsieve(PythonPackage):
"""A modern CSS selector implementation for Beautiful Soup."""
homepage = "https://github.com/facelessuser/soupsieve"
url = "https://pypi.io/packages/source/s/soupsieve/soupsieve-1.9.3.tar.gz"
version('1.9.3', sha256='8662843366b8d8779dec4e2f921bebec9afd856a5ff2e82cd419acc5054a1a92')
depends_on('py-setuptools', type='build')
depends_on('py-backports-functools-lru-cache', when='^python@:2', type=('build', 'run'))
| lgpl-2.1 | Python |
|
6ca8bb70e8e9c6d40418e836d222648478eb8f31 | Split Questins into students and institutions | klpdotorg/tada,klpdotorg/tada | tada/schools/migrations/0005_auto_20150427_1938.py | tada/schools/migrations/0005_auto_20150427_1938.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('schools', '0004_auto_20150427_1912'),
]
operations = [
migrations.CreateModel(
name='QuestionInstitution',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('question_type', models.IntegerField(default=1, choices=[(1, b'Marks'), (2, b'Grade')])),
('score_min', models.DecimalField(null=True, max_digits=10, decimal_places=2, blank=True)),
('score_max', models.DecimalField(null=True, max_digits=10, decimal_places=2, blank=True)),
('grade', models.CharField(max_length=100, null=True, blank=True)),
('order', models.IntegerField()),
('double_entry', models.BooleanField(default=True)),
('active', models.IntegerField(default=2, null=True, blank=True)),
('assessment', models.ForeignKey(to='schools.AssessmentInstitution')),
],
options={
'ordering': ['order'],
},
),
migrations.CreateModel(
name='QuestionStudent',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('question_type', models.IntegerField(default=1, choices=[(1, b'Marks'), (2, b'Grade')])),
('score_min', models.DecimalField(null=True, max_digits=10, decimal_places=2, blank=True)),
('score_max', models.DecimalField(null=True, max_digits=10, decimal_places=2, blank=True)),
('grade', models.CharField(max_length=100, null=True, blank=True)),
('order', models.IntegerField()),
('double_entry', models.BooleanField(default=True)),
('active', models.IntegerField(default=2, null=True, blank=True)),
('assessment', models.ForeignKey(to='schools.AssessmentStudent')),
],
options={
'ordering': ['order'],
},
),
migrations.AlterUniqueTogether(
name='question',
unique_together=set([]),
),
migrations.RemoveField(
model_name='question',
name='assessment',
),
migrations.AlterField(
model_name='answerinstitution',
name='question',
field=models.ForeignKey(to='schools.QuestionInstitution'),
),
migrations.AlterField(
model_name='answerstudent',
name='question',
field=models.ForeignKey(to='schools.QuestionStudent'),
),
migrations.DeleteModel(
name='Question',
),
migrations.AlterUniqueTogether(
name='questionstudent',
unique_together=set([('assessment', 'name')]),
),
migrations.AlterUniqueTogether(
name='questioninstitution',
unique_together=set([('assessment', 'name')]),
),
]
| mit | Python |
|
687661d05179d7a36629a6bd036cdb8dc6a3c637 | Create BasePage.py | zac11/POMSelenium | BasePage.py | BasePage.py | from selenium import webdriver
from selenium.webdriver.common.by import By
#This is the base class that define attributes and methods to all classes
class BasePage(object):
def __init__(self, driver):
self.driver = driver
self.driver.implicitly_wait(30)
self.driver.timeout=30
#This class represents the login page that we have to create
class LoginPage(BasePage):
email_id=(By.NAME,'email')
pass_word=(By.NAME,'pass')
submit_btn=(By.ID,'u_0_l')
def set_email(self,email_id):
email_element=self.driver.find_element(*LoginPage.email_id)
email_element.send_keys(email_id)
def set_password(self,password):
password_element=self.driver.find_element(*LoginPage.pass_word)
password_element.send_keys(password)
def click_submit_btn(self):
submit_button=self.driver.find_element(*LoginPage.submit_btn)
submit_button.click()
def login(self, email,password):
self.set_email(email)
self.set_password(password)
self.click_submit_btn()
| apache-2.0 | Python |
|
5d6777cc386f6fbd982b5021a55b9a8a0510ef1a | Convert month | sitdh/com-prog | ch06_07_p.py | ch06_07_p.py | months = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC']
month, day, year = input().strip().split("/")
print("%s %s %s" % (day, months[int(month) - 1], year))
| mit | Python |
|
3dc50a3f5dbb674a8b7e5383768bc5ebe72ea077 | add AUC example | wepe/tgboost,wepe/tgboost | examples/classification_auc.py | examples/classification_auc.py | from tgboost import TGBoost
import pandas as pd
train = pd.read_csv('train.csv')
train = train.sample(frac=1.0, axis=0) # shuffle the data
val = train.iloc[0:5000]
train = train.iloc[5000:]
train_y = train.label
train_X = train.drop('label', axis=1)
val_y = val.label
val_X = val.drop('label', axis=1)
params = {'loss': "logisticloss",
'eta': 0.3,
'max_depth': 6,
'num_boost_round': 500,
'scale_pos_weight': 1.0,
'subsample': 0.7,
'colsample_bytree': 0.7,
'colsample_bylevel': 1.0,
'min_sample_split': 10,
'min_child_weight': 2,
'reg_lambda': 10,
'gamma': 0,
'eval_metric': "auc",
'early_stopping_rounds': 20,
'maximize': False,
'num_thread': 16}
tgb = TGBoost()
tgb.fit(train_X, train_y, validation_data=(val_X, val_y), **params)
| mit | Python |
|
301f8cf79d1793826a9b73fca6406c005a1c1638 | Create search.py (#4900) | mitmproxy/mitmproxy,mitmproxy/mitmproxy,mhils/mitmproxy,Kriechi/mitmproxy,mhils/mitmproxy,Kriechi/mitmproxy,mitmproxy/mitmproxy,mhils/mitmproxy,mitmproxy/mitmproxy,mitmproxy/mitmproxy,Kriechi/mitmproxy,mhils/mitmproxy,Kriechi/mitmproxy,mhils/mitmproxy | examples/contrib/search.py | examples/contrib/search.py | import re
import typing
from json import dumps
from mitmproxy import command, ctx, flow
MARKER = ':mag:'
RESULTS_STR = 'Search Results: '
class Search:
def __init__(self):
self.exp = None
@command.command('search')
def _search(self,
flows: typing.Sequence[flow.Flow],
regex: str) -> None:
"""
Defines a command named "search" that matches
the given regular expression against most parts
of each request/response included in the selected flows.
Usage: from the flow list view, type ":search" followed by
a space, then a flow selection expression; e.g., "@shown",
then the desired regular expression to perform the search.
Alternatively, define a custom shortcut in keys.yaml; e.g.:
-
key: "/"
ctx: ["flowlist"]
cmd: "console.command search @shown "
Flows containing matches to the expression will be marked
with the magnifying glass emoji, and their comments will
contain JSON-formatted search results.
To view flow comments, enter the flow view
and navigate to the detail tab.
"""
try:
self.exp = re.compile(regex)
except re.error as e:
ctx.log.error(e)
return
for _flow in flows:
# Erase previous results while preserving other comments:
comments = list()
for c in _flow.comment.split('\n'):
if c.startswith(RESULTS_STR):
break
comments.append(c)
_flow.comment = '\n'.join(comments)
if _flow.marked == MARKER:
_flow.marked = False
results = {k: v for k, v in self.flow_results(_flow).items() if v}
if results:
comments.append(RESULTS_STR)
comments.append(dumps(results, indent=2))
_flow.comment = '\n'.join(comments)
_flow.marked = MARKER
def header_results(self, message):
results = {k: self.exp.findall(v) for k, v in message.headers.items()}
return {k: v for k, v in results.items() if v}
def flow_results(self, _flow):
results = dict()
results.update(
{'flow_comment': self.exp.findall(_flow.comment)})
if _flow.request is not None:
results.update(
{'request_path': self.exp.findall(_flow.request.path)})
results.update(
{'request_headers': self.header_results(_flow.request)})
if _flow.request.text:
results.update(
{'request_body': self.exp.findall(_flow.request.text)})
if _flow.response is not None:
results.update(
{'response_headers': self.header_results(_flow.response)})
if _flow.response.text:
results.update(
{'response_body': self.exp.findall(_flow.response.text)})
return results
addons = [Search()]
| mit | Python |
|
e9c23f9bc3f40b0f99bc7ff045abd7f073a0b568 | change of json structure for lang status | cainesap/mapMakeR,cainesap/mapMakeR,cainesap/mapMakeR | languagesOfTheWorld/fetchGlottologData.py | languagesOfTheWorld/fetchGlottologData.py | ## parse Glottolog JSON data
## PRELIMS
# libs
import json, urllib2
import pandas as pd
# vars
withgeoCount = 0 # count languoids with lat/long coordinates
nongeoCount = 0 # count languoids without lat/long coordinates
maxclass = 0 # what's the longest classification?
NONGEOs = []; IDs = []; NAMEs = []; TYPEs = []; CLASS1s = []; CLASS2s = []; CLASS3s = []; LONGs = []; LATs = []; STATUSes = []; TTIPs = []; ISOs = [] # empty lists for data
## procedure for those languoids with geo-coordinates
def withlatlong(langID, lon, lat, name, langdata, langtype, withgeoCount, maxclass):
withgeoCount += 1
langstatus = langdata['status']
# langstatus = 'Unknown' # default endangerment level: find out if info exists
# if 'endangerment' in langdata['jsondata']:
# langstatus = 'Living' if langdata['jsondata']['endangerment'] is None else langdata['jsondata']['endangerment']
class1 = 'Unknown' # take the 1st language family classification, if any found
if len(langdata['classification']) > 0:
class1 = langdata['classification'][0]['name']
classlist = 'family:' + class1 # start class list for tooltip
class2 = '-' # and the 2nd, if found
if len(langdata['classification']) > 1:
class2 = langdata['classification'][1]['name']
classlist += ',' + class2
class3 = '-' # and the 3rd, if found
if len(langdata['classification']) > 2:
class3 = langdata['classification'][2]['name']
classlist += ',' + class3
if len(langdata['classification']) > maxclass:
maxclass = len(langdata['classification'])
if 'iso639-3' in langdata:
iso = langdata['iso639-3']
else:
iso = 'none'
tooltip = "<strong><a href=\"http://glottolog.org/resource/languoid/id/" + langID + "\" target=\"_blank\">" + name + "</a></strong><br />" + classlist + "<br />type:<em>" + langtype + "</em><br />status:<em>" + langstatus + "</em>"
print "withgeo: ", withgeoCount, name, langtype, iso
print "family: ", class1, class2, class3
print "lat/long: ", lon, lat; print "status: ", langstatus
IDs.append(langID); NAMEs.append(name); TYPEs.append(langtype); CLASS1s.append(class1); CLASS2s.append(class2); CLASS3s.append(class3); LONGs.append(lon); LATs.append(lat); STATUSes.append(langstatus); TTIPs.append(tooltip); ISOs.append(iso) # build lists
return withgeoCount, maxclass
## and for those without geo-coordinates
def nonlatlong(nongeoCount, langID, name, langtype):
nongeoCount += 1
NONGEOs.append(langtype)
print "nongeo: ", nongeoCount, name, langtype
return nongeoCount
## closing print actions
def finishUp():
print "\n-----\n-----\nLanguoids with geo-coords: ", withgeoCount # how many languoids with lat/long coordinates?
print "\n-----\n-----\nLanguoids w/o geo-coords: ", nongeoCount # how many languoids without lat/long coordinates?
from collections import Counter
print(Counter(NONGEOs))
print "\n-----\n-----\nMax classification: ", maxclass # languoid with longest classification?
## merge vectors and convert to dataframe
## Pandas commands from: http://nbviewer.ipython.org/urls/bitbucket.org/hrojas/learn-pandas/raw/master/lessons/01%20-%20Lesson.ipynb
glottoData = zip(IDs, ISOs, NAMEs, TYPEs, CLASS1s, CLASS2s, CLASS3s, LONGs, LATs, STATUSes, TTIPs)
glottoDF = pd.DataFrame(data = glottoData, columns = ['id', 'iso639-3', 'name', 'type', 'family1', 'family2', 'family3', 'long', 'lat', 'status', 'tooltip'])
## export to csv
glottoDF.to_csv('shinyApp/data/glottologLanguoids.csv', encoding='utf-8')
## MAIN
## [1] list of Glottolog language resources (save to file)
response = urllib2.urlopen('http://glottolog.org/resourcemap.json?rsc=language')
data = json.load(response)
with open('glottologResourceMap.json', 'w') as outfile:
json.dump(data, outfile)
outfile.close()
## [2] loop over all languoids, fetch jsondata, make list of dictionaries
nres = len(data['resources'])
print "\n-----\nNumber of rows in resources list: ", nres
for n in range(0, nres):
langID = data['resources'][n]['id']
lon = data['resources'][n]['longitude']
lat = data['resources'][n]['latitude']
name = data['resources'][n]['name']
url = 'http://glottolog.org/resource/languoid/id/' + langID + '.json' # now fetch languoid specific data
langresp = urllib2.urlopen(url)
langdata = json.load(langresp)
langtype = langdata['level'] # 'language' or other?
print "\n-----\nlanguoid ID: ", langID
if lon is not None:
(withgeoCount, maxclass) = withlatlong(langID, lon, lat, name, langdata, langtype, withgeoCount, maxclass) # only those resources for which there are lat/long coordinates
else:
nongeoCount = nonlatlong(nongeoCount, langID, name, langtype) # interested in non-lat/long languoids?
## [3] the end: print statements and print to file
finishUp()
| mit | Python |
|
15b7279b0437cf14c2d5657b99d037beb044949f | Convert JSON to TSV | ENCODE-DCC/WranglerScripts,ENCODE-DCC/WranglerScripts,ENCODE-DCC/WranglerScripts,ENCODE-DCC/WranglerScripts | JSON2TSV.py | JSON2TSV.py | #!/usr/bin/env python
# -*- coding: latin-1 -*-
''' Script to convert a JSON file to TSV. Adapted from http://kailaspatil.blogspot.com/2013/07/python-script-to-convert-json-file-into.html
'''
import fileinput
import json
import csv
import sys
EPILOG = ''' Usage: %(prog)s -i [input JSON file] > [output TSV file] '''
def main():
import argparse
parser = argparse.ArgumentParser(description=__doc__, epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--infile', '-i', help="JSON file to convert to TSV")
args = parser.parse_args()
lines = []
if args.infile:
with open(args.infile, 'r') as f:
for line in f:
lines.append(line)
else:
print >> sys.stderr, "Please supply an input JSON file with -i"
new_json = json.loads(''.join(lines))
keys = {}
for i in new_json:
for k in i.keys():
keys[k] = 1
tab_out = csv.DictWriter(sys.stdout, fieldnames=keys.keys(), dialect='excel-tab')
tab_out.writeheader()
for row in new_json:
tab_out.writerow(row)
if __name__ == '__main__':
main() | mit | Python |
|
7707e65ed591b890d91bcb7bf22923b8c17a113a | Add tests from Gregor's PR | davidfischer/readthedocs.org,wijerasa/readthedocs.org,rtfd/readthedocs.org,davidfischer/readthedocs.org,safwanrahman/readthedocs.org,pombredanne/readthedocs.org,rtfd/readthedocs.org,SteveViss/readthedocs.org,stevepiercy/readthedocs.org,rtfd/readthedocs.org,stevepiercy/readthedocs.org,safwanrahman/readthedocs.org,clarkperkins/readthedocs.org,espdev/readthedocs.org,espdev/readthedocs.org,istresearch/readthedocs.org,davidfischer/readthedocs.org,davidfischer/readthedocs.org,safwanrahman/readthedocs.org,tddv/readthedocs.org,wijerasa/readthedocs.org,istresearch/readthedocs.org,safwanrahman/readthedocs.org,tddv/readthedocs.org,stevepiercy/readthedocs.org,SteveViss/readthedocs.org,techtonik/readthedocs.org,rtfd/readthedocs.org,espdev/readthedocs.org,techtonik/readthedocs.org,espdev/readthedocs.org,clarkperkins/readthedocs.org,clarkperkins/readthedocs.org,techtonik/readthedocs.org,clarkperkins/readthedocs.org,istresearch/readthedocs.org,pombredanne/readthedocs.org,SteveViss/readthedocs.org,istresearch/readthedocs.org,SteveViss/readthedocs.org,tddv/readthedocs.org,wijerasa/readthedocs.org,pombredanne/readthedocs.org,techtonik/readthedocs.org,stevepiercy/readthedocs.org,espdev/readthedocs.org,wijerasa/readthedocs.org | readthedocs/rtd_tests/tests/test_api_permissions.py | readthedocs/rtd_tests/tests/test_api_permissions.py | from functools import partial
from mock import Mock
from unittest import TestCase
from readthedocs.restapi.permissions import APIRestrictedPermission
class APIRestrictedPermissionTests(TestCase):
def get_request(self, method, is_admin):
request = Mock()
request.method = method
request.user.is_staff = is_admin
return request
def assertAllow(self, handler, method, is_admin, obj=None):
if obj is None:
self.assertTrue(handler.has_permission(
request=self.get_request(method, is_admin=is_admin),
view=None))
else:
self.assertTrue(handler.has_object_permission(
request=self.get_request(method, is_admin=is_admin),
view=None,
obj=obj))
def assertDisallow(self, handler, method, is_admin, obj=None):
if obj is None:
self.assertFalse(handler.has_permission(
request=self.get_request(method, is_admin=is_admin),
view=None))
else:
self.assertFalse(handler.has_object_permission(
request=self.get_request(method, is_admin=is_admin),
view=None,
obj=obj))
def test_non_object_permissions(self):
handler = APIRestrictedPermission()
assertAllow = partial(self.assertAllow, handler, obj=None)
assertDisallow = partial(self.assertDisallow, handler, obj=None)
assertAllow('GET', is_admin=False)
assertAllow('HEAD', is_admin=False)
assertAllow('OPTIONS', is_admin=False)
assertDisallow('DELETE', is_admin=False)
assertDisallow('PATCH', is_admin=False)
assertDisallow('POST', is_admin=False)
assertDisallow('PUT', is_admin=False)
assertAllow('GET', is_admin=True)
assertAllow('HEAD', is_admin=True)
assertAllow('OPTIONS', is_admin=True)
assertAllow('DELETE', is_admin=True)
assertAllow('PATCH', is_admin=True)
assertAllow('POST', is_admin=True)
assertAllow('PUT', is_admin=True)
def test_object_permissions(self):
handler = APIRestrictedPermission()
obj = Mock()
assertAllow = partial(self.assertAllow, handler, obj=obj)
assertDisallow = partial(self.assertDisallow, handler, obj=obj)
assertAllow('GET', is_admin=False)
assertAllow('HEAD', is_admin=False)
assertAllow('OPTIONS', is_admin=False)
assertDisallow('DELETE', is_admin=False)
assertDisallow('PATCH', is_admin=False)
assertDisallow('POST', is_admin=False)
assertDisallow('PUT', is_admin=False)
assertAllow('GET', is_admin=True)
assertAllow('HEAD', is_admin=True)
assertAllow('OPTIONS', is_admin=True)
assertAllow('DELETE', is_admin=True)
assertAllow('PATCH', is_admin=True)
assertAllow('POST', is_admin=True)
assertAllow('PUT', is_admin=True)
| mit | Python |
|
44f70a0c8ea9613214ce6305c262a8508b4bc598 | create add_user.py | AlexLamson/rpi-greeter | add_user.py | add_user.py | #!/usr/bin/python
import bluetooth
print("Scanning for bluetooth devices in discoverable mode...")
nearby_devices = bluetooth.discover_devices(lookup_names = True)
for i, (addr, name) in enumerate(nearby_devices):
print("[{}] {} {}".format(i, addr, name))
num = raw_input("Enter the number of your device (or type anything else to quit)\n")
if num.isdigit() and 0 <= int(num) < len(nearby_devices):
addr, name = nearby_devices[int(num)]
maybe_name = raw_input("Enter a name for this device (or press enter to use '{}')\n".format(name))
if maybe_name != '':
name = maybe_name
with open("users.txt", "a") as users_file:
users_file.write("{} {}\n".format(addr, name))
print("Successfully added '{}'".format(name))
else:
exit()
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.