commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
b3ef51e93b090451718ed4c1240b63b8e99cd085 | rename example | scottprahl/miepython | miepython/02_glass.py | miepython/02_glass.py | #!/usr/bin/env python3
"""
Plot the scattering efficiency as a function of wavelength for 4micron glass spheres
"""
import numpy as np
import matplotlib.pyplot as plt
import miepython
num = 100
radius = 2 # in microns
lam = np.linspace(0.2,1.2,num) # also in microns
x = 2*np.pi*radius/lam
# from https://refractiveindex.info/?shelf=glass&book=BK7&page=SCHOTT
m=np.sqrt(1+1.03961212/(1-0.00600069867/lam**2)+0.231792344/(1-0.0200179144/lam**2)+1.01046945/(1-103.560653/lam**2))
qqsca = np.zeros(num)
for i in range(num) :
qext, qsca, qabs, qback, g = miepython.mie(m[i],x[i])
qqsca[i]=qsca
plt.plot(lam*1000,qqsca)
plt.title("BK7 glass spheres 4 micron diameter")
plt.xlabel("Wavelength (nm)")
plt.ylabel("Scattering Efficiency (-)")
plt.show()
| mit | Python |
|
d1fe5a06f5e082fd8196f510e2eba7daa3468ef8 | Add duplicate_nodes.py file | wjkoh/cs262a,wjkoh/cs262a,wjkoh/cs262a | duplicate_nodes.py | duplicate_nodes.py | from shutil import copytree, ignore_patterns
import glob
import os
import sys
if __name__ == '__main__':
data_dir = './parsedData/'
use_symlink = True
orig_nodes = os.listdir(data_dir)
orig_nodes = [os.path.basename(i) for i in glob.glob(os.path.join(data_dir, '1*'))]
for dup_cnt in range(100):
for orig_node in orig_nodes:
src = os.path.join(data_dir, orig_node)
dst = os.path.join(data_dir, 'd%s_%04d' % (orig_node, dup_cnt))
if use_symlink:
src = os.path.relpath(src, data_dir)
os.symlink(src, dst)
else:
copytree(src, dst)
| bsd-3-clause | Python |
|
77f812f76966b90c27131fd65968f548afcdcace | Add loader for basic csv layers without geoms | gem/oq-svir-qgis,gem/oq-svir-qgis,gem/oq-svir-qgis,gem/oq-svir-qgis | svir/dialogs/load_basic_csv_as_layer_dialog.py | svir/dialogs/load_basic_csv_as_layer_dialog.py | # -*- coding: utf-8 -*-
# /***************************************************************************
# Irmt
# A QGIS plugin
# OpenQuake Integrated Risk Modelling Toolkit
# -------------------
# begin : 2013-10-24
# copyright : (C) 2018 by GEM Foundation
# email : devops@openquake.org
# ***************************************************************************/
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import os
import tempfile
from svir.utilities.utils import import_layer_from_csv
from svir.utilities.shared import OQ_BASIC_CSV_TO_LAYER_TYPES
from svir.dialogs.load_output_as_layer_dialog import LoadOutputAsLayerDialog
class LoadBasicCsvAsLayerDialog(LoadOutputAsLayerDialog):
"""
Modal dialog to load as layer a basic csv with no geometries, to be
browsed through its attribute table
"""
def __init__(self, iface, viewer_dock, session, hostname, calc_id,
output_type, path=None, mode=None):
assert output_type in OQ_BASIC_CSV_TO_LAYER_TYPES, output_type
LoadOutputAsLayerDialog.__init__(
self, iface, viewer_dock, session, hostname, calc_id,
output_type, path, mode)
self.create_file_size_indicator()
self.setWindowTitle('Load %s from CSV, as layer' % output_type)
self.populate_out_dep_widgets()
self.adjustSize()
self.set_ok_button()
def set_ok_button(self):
self.ok_button.setEnabled(bool(self.path))
def populate_out_dep_widgets(self):
self.show_file_size()
def load_from_csv(self):
if self.mode == 'testing':
dest_shp = tempfile.mkstemp(suffix='.shp')[1]
else:
dest_shp = None # the destination file will be selected via GUI
csv_path = self.path_le.text()
# extract the name of the csv file and remove the extension
layer_name = os.path.splitext(os.path.basename(csv_path))[0]
self.layer = import_layer_from_csv(
self, csv_path, layer_name, self.iface,
save_as_shp=False, dest_shp=dest_shp,
zoom_to_layer=False, has_geom=False)
| agpl-3.0 | Python |
|
f2a359664bf69a6c8e883d460a49c986b511b80e | add file | EuroPython/ep-tools,PythonSanSebastian/ep-tools,PythonSanSebastian/ep-tools,PythonSanSebastian/ep-tools,EuroPython/ep-tools,EuroPython/ep-tools,EuroPython/ep-tools,PythonSanSebastian/ep-tools | eptools/gspread.py | eptools/gspread.py | """
Functions to access the data in google drive spreadsheets
"""
import pandas as pd
from docstamp.gdrive import (get_spreadsheet,
worksheet_to_dict)
def get_ws_data(api_key_file, doc_key, ws_tab_idx, header=None, start_row=1):
""" Return the content of the spreadsheet in the ws_tab_idx tab of
the spreadsheet with doc_key as a pandas DataFrame.
Parameters
----------
api_key_file: str
Path to the Google API key json file.
doc_key: str
ws_tab_idx: int
Index of the worksheet within the spreadsheet.
header: List[str]
List of values to assign to the header of the result.
start_row: int
Row index from where to start collecting the data.
Returns
-------
content: pandas.DataFrame
"""
spread = get_spreadsheet(api_key_file, doc_key)
ws = spread.get_worksheet(ws_tab_idx)
ws_dict = worksheet_to_dict(ws, header=header, start_row=start_row)
return pd.DataFrame(ws_dict)
def find_one_row(substr, df, col_name):
""" Return one row from `df`. The returned row has in `col_name` column
a value with a sub-string as `substr.
Raise KeyError if no row is found.
"""
for name in df[col_name]:
if substr.lower() in name.lower():
return df[df[col_name] == name]
raise KeyError('Could not find {} in the '
'pandas dataframe.'.format(substr))
| mit | Python |
|
e8d05226f2a8cabf0f38bae6c2e218bd81efa6a1 | Add a utility script for encoding packet traces | haowu4682/gem5,LingxiaoJIA/gem5,LingxiaoJIA/gem5,haowu4682/gem5,haowu4682/gem5,haowu4682/gem5,haowu4682/gem5,LingxiaoJIA/gem5,LingxiaoJIA/gem5,haowu4682/gem5,LingxiaoJIA/gem5,haowu4682/gem5,haowu4682/gem5,haowu4682/gem5,LingxiaoJIA/gem5,LingxiaoJIA/gem5 | util/encode_packet_trace.py | util/encode_packet_trace.py | #!/usr/bin/env python
# Copyright (c) 2013 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Hansson
#
# This script is used to migrate ASCII packet traces to the protobuf
# format currently used in gem5. It assumes that protoc has been
# executed and already generated the Python package for the packet
# messages. This can be done manually using:
# protoc --python_out=. --proto_path=src/proto src/proto/packet.proto
#
# The ASCII trace format uses one line per request on the format cmd,
# addr, size, tick. For example:
# r,128,64,4000
# w,232123,64,500000
# This trace reads 64 bytes from decimal address 128 at tick 4000,
# then writes 64 bytes to address 232123 at tick 500000.
#
# This script can of course also be used as a template to convert
# other trace formats into the gem5 protobuf format
import struct
import sys
import packet_pb2
def EncodeVarint(out_file, value):
"""
The encoding of the Varint32 is copied from
google.protobuf.internal.encoder and is only repeated here to
avoid depending on the internal functions in the library.
"""
bits = value & 0x7f
value >>= 7
while value:
out_file.write(struct.pack('<B', 0x80|bits))
bits = value & 0x7f
value >>= 7
out_file.write(struct.pack('<B', bits))
def encodeMessage(out_file, message):
"""
Encoded a message with the length prepended as a 32-bit varint.
"""
out = message.SerializeToString()
EncodeVarint(out_file, len(out))
out_file.write(out)
def main():
if len(sys.argv) != 3:
print "Usage: ", sys.argv[0], " <ASCII input> <protobuf output>"
exit(-1)
try:
ascii_in = open(sys.argv[1], 'r')
except IOError:
print "Failed to open ", sys.argv[1], " for reading"
exit(-1)
try:
proto_out = open(sys.argv[2], 'wb')
except IOError:
print "Failed to open ", sys.argv[2], " for writing"
exit(-1)
# Write the magic number in 4-byte Little Endian, similar to what
# is done in src/proto/protoio.cc
proto_out.write("gem5")
# Add the packet header
header = packet_pb2.PacketHeader()
header.obj_id = "Converted ASCII trace " + sys.argv[1]
# Assume the default tick rate
header.tick_freq = 1000000000
encodeMessage(proto_out, header)
# For each line in the ASCII trace, create a packet message and
# write it to the encoded output
for line in ascii_in:
cmd, addr, size, tick = line.split(',')
packet = packet_pb2.Packet()
packet.tick = long(tick)
# ReadReq is 1 and WriteReq is 4 in src/mem/packet.hh Command enum
packet.cmd = 1 if cmd == 'r' else 4
packet.addr = long(addr)
packet.size = int(size)
encodeMessage(proto_out, packet)
# We're done
ascii_in.close()
proto_out.close()
if __name__ == "__main__":
main()
| bsd-3-clause | Python |
|
04876b4bea96f983c722cb9bf7845c7cc3b0ecef | add oauth2 example | ikvk/imap_tools | examples/oauth2.py | examples/oauth2.py | from imap_tools import MailBox
# Authenticate to account using OAuth 2.0 mechanism
with MailBox('imap.my.ru').xoauth2('user', 'token123', 'INBOX') as mailbox:
for msg in mailbox.fetch():
print(msg.date_str, msg.subject)
| apache-2.0 | Python |
|
c9a0fb540a9ee8005c1ee2d70613c39455891bee | Add analyze_bound_horizontal tests module | danforthcenter/plantcv,danforthcenter/plantcv,danforthcenter/plantcv | tests/plantcv/test_analyze_bound_horizontal.py | tests/plantcv/test_analyze_bound_horizontal.py | import pytest
import cv2
from plantcv.plantcv import analyze_bound_horizontal, outputs
@pytest.mark.parametrize('pos,exp', [[200, 58], [-1, 0], [100, 0], [150, 11]])
def test_analyze_bound_horizontal(pos, exp, test_data):
# Clear previous outputs
outputs.clear()
# Read in test data
img = cv2.imread(test_data.small_rgb_img)
# img_above_bound_only = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL_PLANT))
mask = cv2.imread(test_data.small_bin_img, -1)
object_contours = test_data.load_composed_contours(test_data.small_composed_contours_file)
# _ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=300)
# _ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=100)
# _ = pcv.analyze_bound_horizontal(img=img_above_bound_only, obj=object_contours, mask=mask, line_position=1756)
# _ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=1756)
_ = analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=pos)
assert outputs.observations["default"]["height_above_reference"]["value"] == exp
def test_analyze_bound_horizontal_grayscale_image(test_data):
# Read in test data
img = cv2.imread(test_data.small_gray_img, -1)
mask = cv2.imread(test_data.small_bin_img, -1)
object_contours = test_data.load_composed_contours(test_data.small_composed_contours_file)
boundary_img = analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=200)
assert len(boundary_img.shape) == 3
| mit | Python |
|
198cf78895db88a8986926038e817ebb2bf75eb2 | add migration for notification tables | uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal | portal/migrations/versions/458dd2fc1172_.py | portal/migrations/versions/458dd2fc1172_.py | from alembic import op
import sqlalchemy as sa
"""empty message
Revision ID: 458dd2fc1172
Revises: 8ecdd6381235
Create Date: 2017-12-21 16:38:49.659073
"""
# revision identifiers, used by Alembic.
revision = '458dd2fc1172'
down_revision = '8ecdd6381235'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('notifications',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.Column('content', sa.Text(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('user_notifications',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('notification_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['notification_id'], ['notifications.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('user_id', 'notification_id', name='_user_notification')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('user_notifications')
op.drop_table('notifications')
# ### end Alembic commands ###
| bsd-3-clause | Python |
|
8c7fa4e16805dc9e8adbd5615c610be8ba92c444 | Add argparse tests for gatherkeys | SUSE/ceph-deploy,zhouyuan/ceph-deploy,shenhequnying/ceph-deploy,ghxandsky/ceph-deploy,zhouyuan/ceph-deploy,ceph/ceph-deploy,SUSE/ceph-deploy,branto1/ceph-deploy,isyippee/ceph-deploy,osynge/ceph-deploy,Vicente-Cheng/ceph-deploy,isyippee/ceph-deploy,ceph/ceph-deploy,trhoden/ceph-deploy,SUSE/ceph-deploy-to-be-deleted,SUSE/ceph-deploy-to-be-deleted,Vicente-Cheng/ceph-deploy,ghxandsky/ceph-deploy,imzhulei/ceph-deploy,codenrhoden/ceph-deploy,shenhequnying/ceph-deploy,codenrhoden/ceph-deploy,trhoden/ceph-deploy,osynge/ceph-deploy,branto1/ceph-deploy,imzhulei/ceph-deploy | ceph_deploy/tests/parser/test_gatherkeys.py | ceph_deploy/tests/parser/test_gatherkeys.py | import pytest
from ceph_deploy.cli import get_parser
class TestParserGatherKeys(object):
def setup(self):
self.parser = get_parser()
def test_gather_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('gatherkeys --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy gatherkeys' in out
assert 'positional arguments:' in out
assert 'optional arguments:' in out
def test_gatherkeys_host_required(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('gatherkeys'.split())
out, err = capsys.readouterr()
assert "error: too few arguments" in err
def test_gatherkeys_one_host(self):
args = self.parser.parse_args('gatherkeys host1'.split())
assert args.mon == ['host1']
def test_gatherkeys_multiple_hosts(self):
hostnames = ['host1', 'host2', 'host3']
args = self.parser.parse_args(['gatherkeys'] + hostnames)
assert args.mon == hostnames
| mit | Python |
|
2803b237af18c6d5cd0613eaf4eccf2b61e65100 | Create afImgPanel.py | aaronfang/personal_scripts | scripts/afImgPanel.py | scripts/afImgPanel.py | import pymel.core as pm
import pymel.all as pa
imgOp = 0.3
imgDep = 10
#get current camera
curCam = pm.modelPanel(pm.getPanel(wf=True),q=True,cam=True)
#select image and creat imagePlane and setup
fileNm = pm.fileDialog2(ds=0,fm=1,cap='open',okc='Select Image')
ImgPln = pm.imagePlane(fn=fileNm[0],lookThrough=curCam,maintainRatio=1)
pm.setAttr(ImgPln[1]+'.displayOnlyIfCurrent',True)
pm.setAttr(ImgPln[0]+'.translateZ',-pm.getAttr(curCam+'.translateZ')/3+-imgDep)
pm.setAttr(ImgPln[1]+'.alphaGain',imgOp)
pm.setAttr(ImgPln[1]+'.textureFilter',1)
#aligh to the camera
#create locator to be the parent and then create parent constraint
pLoc = pm.spaceLocator()
pm.parent(ImgPln[0],pLoc)
pm.parentConstraint(curCam,pLoc)
#Toggle image plane visibility
if(pm.getAttr(ImgPln[1]+'.visibility')):
pm.setAttr(ImgPln[1]+'.visibility',0)
else:
pm.setAttr(ImgPln[1]+'.visibility',1)
| mit | Python |
|
f24fe32329625ec037a9afc8d3bdeed5f41e69a0 | Add a script for easy diffing of two Incars. | gpetretto/pymatgen,aykol/pymatgen,Bismarrck/pymatgen,czhengsci/pymatgen,johnson1228/pymatgen,Bismarrck/pymatgen,gVallverdu/pymatgen,richardtran415/pymatgen,blondegeek/pymatgen,czhengsci/pymatgen,dongsenfo/pymatgen,montoyjh/pymatgen,czhengsci/pymatgen,fraricci/pymatgen,nisse3000/pymatgen,nisse3000/pymatgen,blondegeek/pymatgen,nisse3000/pymatgen,richardtran415/pymatgen,tschaume/pymatgen,fraricci/pymatgen,matk86/pymatgen,mbkumar/pymatgen,czhengsci/pymatgen,dongsenfo/pymatgen,tallakahath/pymatgen,blondegeek/pymatgen,johnson1228/pymatgen,Bismarrck/pymatgen,dongsenfo/pymatgen,Bismarrck/pymatgen,vorwerkc/pymatgen,gpetretto/pymatgen,johnson1228/pymatgen,vorwerkc/pymatgen,montoyjh/pymatgen,xhqu1981/pymatgen,mbkumar/pymatgen,vorwerkc/pymatgen,davidwaroquiers/pymatgen,matk86/pymatgen,gVallverdu/pymatgen,fraricci/pymatgen,tschaume/pymatgen,matk86/pymatgen,gpetretto/pymatgen,setten/pymatgen,richardtran415/pymatgen,setten/pymatgen,tallakahath/pymatgen,ndardenne/pymatgen,gVallverdu/pymatgen,tschaume/pymatgen,ndardenne/pymatgen,montoyjh/pymatgen,mbkumar/pymatgen,gmatteo/pymatgen,vorwerkc/pymatgen,richardtran415/pymatgen,xhqu1981/pymatgen,montoyjh/pymatgen,davidwaroquiers/pymatgen,matk86/pymatgen,nisse3000/pymatgen,Bismarrck/pymatgen,fraricci/pymatgen,davidwaroquiers/pymatgen,tallakahath/pymatgen,tschaume/pymatgen,gpetretto/pymatgen,xhqu1981/pymatgen,ndardenne/pymatgen,setten/pymatgen,gmatteo/pymatgen,davidwaroquiers/pymatgen,gVallverdu/pymatgen,setten/pymatgen,aykol/pymatgen,dongsenfo/pymatgen,mbkumar/pymatgen,blondegeek/pymatgen,aykol/pymatgen,johnson1228/pymatgen,tschaume/pymatgen | scripts/diff_incar.py | scripts/diff_incar.py | #!/usr/bin/env python
'''
Created on Nov 12, 2011
'''
__author__="Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Nov 12, 2011"
import sys
import itertools
from pymatgen.io.vaspio import Incar
from pymatgen.util.string_utils import str_aligned
filepath1 = sys.argv[1]
filepath2 = sys.argv[2]
incar1 = Incar.from_file(filepath1)
incar2 = Incar.from_file(filepath2)
def format_lists(v):
if isinstance(v, (tuple, list)):
return " ".join([str(i) + "*" + str(len(tuple(group))) for (i,group) in itertools.groupby(v)])
return v
d = incar1.diff(incar2)
output = [['SAME PARAMS','', '']]
output.append(['---------------','', ''])
output.extend([(k,format_lists(v),format_lists(v)) for k,v in d['Same'].items() if k != "SYSTEM"])
output.append(['','', ''])
output.append(['DIFFERENT PARAM','', ''])
output.append(['---------------','', ''])
output.extend([(k,format_lists(v['INCAR1']),format_lists(v['INCAR2'])) for k, v in d['Different'].items() if k != "SYSTEM"])
print str_aligned(output, ['', filepath1, filepath2]) | mit | Python |
|
7e2170feef60866b8938595f674ae4dd70c5cc46 | Add benchmark for F.transpose() | sony/nnabla,sony/nnabla,sony/nnabla | python/benchmark/function/test_transpose.py | python/benchmark/function/test_transpose.py | # Copyright 2022 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import nnabla.functions as F
from function_benchmark import FunctionBenchmark, Inspec
def inspecs_params():
inspecs = []
# Reported bad performance cases
# These three cases were optimized well by cuTENSOR.
inspecs.append(([Inspec((32, 144, 28, 1))], (0, 1, 3, 2)))
inspecs.append(([Inspec((32, 144, 28, 3))], (0, 1, 3, 2)))
inspecs.append(([Inspec((768, 50, 50))], (0, 2, 1)))
# From ResNet-50
# Input side
inspecs.append(([Inspec((192, 224, 224, 3))], (0, 3, 1, 2)))
inspecs.append(([Inspec((192, 3, 224, 224))], (0, 2, 3, 1)))
# Output side
inspecs.append(([Inspec((192, 3, 3, 512))], (0, 3, 1, 2)))
inspecs.append(([Inspec((192, 512, 3, 3))], (0, 2, 3, 1)))
inspecs.append(([Inspec((192, 1, 1, 2048))], (0, 3, 1, 2)))
inspecs.append(([Inspec((192, 2048, 1, 1))], (0, 2, 3, 1)))
# Single input
inspecs.append(([Inspec((1, 224, 224, 3))], (0, 3, 1, 2)))
inspecs.append(([Inspec((1, 3, 224, 224))], (0, 2, 3, 1)))
inspecs.append(([Inspec((1, 3, 3, 512))], (0, 3, 1, 2)))
inspecs.append(([Inspec((1, 512, 3, 3))], (0, 2, 3, 1)))
inspecs.append(([Inspec((1, 1, 1, 2048))], (0, 3, 1, 2)))
inspecs.append(([Inspec((1, 2048, 1, 1))], (0, 2, 3, 1)))
# Other
# 2D
inspecs.append(([Inspec((64, 64))], (1, 0)))
inspecs.append(([Inspec((1024, 1024))], (1, 0)))
# 4D
inspecs.append(([Inspec((64, 64, 64, 64))], (0, 1, 2, 3)))
inspecs.append(([Inspec((64, 64, 64, 64))], (0, 1, 3, 2)))
inspecs.append(([Inspec((64, 64, 64, 64))], (0, 3, 2, 1)))
inspecs.append(([Inspec((64, 64, 64, 64))], (0, 2, 1, 3)))
inspecs.append(([Inspec((64, 64, 64, 64))], (0, 3, 1, 2)))
inspecs.append(([Inspec((64, 64, 64, 64))], (0, 2, 3, 1)))
# 4D misaligned
inspecs.append(([Inspec((65, 65, 65, 65))], (0, 1, 2, 3)))
inspecs.append(([Inspec((65, 65, 65, 65))], (0, 1, 3, 2)))
inspecs.append(([Inspec((65, 65, 65, 65))], (0, 3, 2, 1)))
inspecs.append(([Inspec((65, 65, 65, 65))], (0, 2, 1, 3)))
inspecs.append(([Inspec((65, 65, 65, 65))], (0, 3, 1, 2)))
inspecs.append(([Inspec((65, 65, 65, 65))], (0, 2, 3, 1)))
return inspecs
@pytest.mark.parametrize('inspecs, axis', inspecs_params())
def test_transpose(inspecs, axis, nnabla_opts):
fb = FunctionBenchmark(
F.transpose, inspecs, [axis], dict(),
nnabla_opts.ext, nnabla_opts.ext_kwargs)
fb.benchmark()
fb.write(writer=nnabla_opts.function_benchmark_writer)
| apache-2.0 | Python |
|
c36ae47bee44ff8aa8eaf17f8ded88192d7a6573 | implement query term search | anfego/search_IR | queryAnswer.py | queryAnswer.py | import pickle
# Loads the posting Index
index = open("posIndex.dat", "rb");
posIndex = pickle.load(index);
print posIndex['made'];
query = "Juan made of youtube"
# query = raw_input('Please enter your query: ');
queryTerms = ' '.join(query.split());
queryTerms = queryTerms.split(' ');
k = len(queryTerms);
print (queryTerms);
i = 0;
for term in queryTerms:
queryTerms[i] = term.lower();
if term in posIndex.keys():
print "%s -->\t %s\n" % (term, posIndex[term]);
else:
print "%s -->\n" % (term);
i = i +1;
| mit | Python |
|
625d250c7eabcf48292590a6b0ca57f1b3cc7c49 | Add meshprocessing scratch | unidesigner/unidesign | scratch/meshprocessing.py | scratch/meshprocessing.py | import networkx as nx
from time import time
import numpy as np
def mesh2graph(faces):
""" Converts a triangular mesh to a graph only taking
the connectivity into account """
g = nx.Graph()
for i in range(len(faces)):
g.add_edge(faces[i,0], faces[i,1])
g.add_edge(faces[i,1], faces[i,2])
return g
def graphlaplacian(g):
import scipy.sparse as sp
# scipy.sparse.linalg.eigen
n = g.order()
D = sp.identity(n)
A = nx.to_scipy_sparse_matrix(g)
di = A.sum(axis=1).T.tolist()[0]
D.setdiag(di)
L = D - A
return L
def grapheigendecomposition(graphlaplacian, k = 3):
""" k is the number of eigenvalues desired
See http://docs.scipy.org/doc/scipy/reference/sparse.linalg.html
"""
from scipy.sparse.linalg.eigen import lobpcg
guess = np.random.rand(graphlaplacian.shape[0],k) * 100
return lobpcg(graphlaplacian, guess)
if __name__ == '__main__':
faces = np.array([ [0,1,2],
[1,2,3]], dtype = np.uint)
start = time()
import nibabel.gifti as gi
a=gi.read('/home/stephan/Dev/PyWorkspace/connectomeviewer/cviewer/resources/atlases/template_atlas_homo_sapiens_01/Gifti/fsaverage.gii')
faces = a.darrays[1].data[:100,:]
print "Loading took ", time()-start
g = mesh2graph(faces)
print "Making graph ", time()-start
gl = graphlaplacian(g)
print "Getting laplacian ", time()-start
w,v = grapheigendecomposition(gl, k = 3)
# Ev, Evect = eig(gl)
print w
print "Getting eigendecomposition ", time()-start
from scipy.linalg import eig, eigh
Ev, Evect = eigh(gl.todense())
print Ev
#print np.real(Ev)
| bsd-3-clause | Python |
|
6a13511db8401a17a5c6feb7071af821211c2836 | Create sitemap urls | opps/opps,williamroot/opps,opps/opps,williamroot/opps,YACOWS/opps,jeanmask/opps,jeanmask/opps,williamroot/opps,williamroot/opps,opps/opps,opps/opps,YACOWS/opps,jeanmask/opps,YACOWS/opps,YACOWS/opps,jeanmask/opps | opps/sitemaps/urls.py | opps/sitemaps/urls.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, url
from django.contrib.sitemaps import views as sitemap_views
from opps.sitemaps.sitemaps import GenericSitemap, InfoDisct
sitemaps = {
'articles': GenericSitemap(InfoDisct(), priority=0.6),
}
sitemaps_googlenews = {
'articles': GenericSitemap(InfoDisct(True), priority=0.6),
}
urlpatterns = patterns(
'',
url(r'^\.xml$', sitemap_views.index,
{'sitemaps': sitemaps}),
url(r'^-googlenews\.xml$', sitemap_views.sitemap,
{'sitemaps': sitemaps_googlenews,
'template_name': 'sitemap_googlenews.xml'}),
url(r'^-(?P<section>.+)\.xml$', sitemap_views.sitemap,
{'sitemaps': sitemaps}),
)
| mit | Python |
|
04d122d88bb9f71843df924e048b12de1976b847 | Add missing migration | keybar/keybar | src/keybar/migrations/0008_entry_salt.py | src/keybar/migrations/0008_entry_salt.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('keybar', '0007_remove_entry_key'),
]
operations = [
migrations.AddField(
model_name='entry',
name='salt',
field=models.BinaryField(null=True, blank=True),
preserve_default=True,
),
]
| bsd-3-clause | Python |
|
531da297c57c7b359c37a743095c10e7ad0592cf | Add test_container | laike9m/pdir2 | tests/test_container.py | tests/test_container.py | import pdir
def test_acting_like_a_list():
dadada = 1
cadada = 1
vadada = 1
apple1 = 1
xapple2 = 1
result, correct = pdir(), dir()
assert len(correct) == len(result)
for x, y in zip(correct, result):
assert x == y
def test_acting_like_a_list_when_search():
dadada = 1
cadada = 1
vadada = 1
apple1 = 1
xapple2 = 1
result = pdir().s('apple')
assert len(result) == 2
assert list(result) == ['apple1', 'xapple2']
| mit | Python |
|
79ebedc800c31b47bd0cc340de06dafcd6ade7f9 | Add TwrOauth basic test | tchx84/twitter-gobject | tests/test_twr_oauth.py | tests/test_twr_oauth.py | #!/usr/bin/env python
#
# Copyright (c) 2013 Martin Abente Lahaye. - tch@sugarlabs.org
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
import sys
from gi.repository import GObject
sys.path.append("..")
from twitter.twr_oauth import TwrOauth
from twitter.twr_account import TwrAccount
consumer_key = ''
consumer_secret = ''
access_key = ''
access_secret = ''
TwrAccount.set_secrets(consumer_key, consumer_secret,
access_key, access_secret)
def __phase2_failed_cb(oauth, info):
print '[FAILED] phase2: access-downloaded-failed, with %s' % info
loop.quit()
def __phase1_failed_cb(oauth, info):
print '[FAILED] phase1: request-downloaded-failed, with %s' % info
loop.quit()
def __phase2_cb(oauth, info):
print '[OK] phase2: access-downloaded, with %s' % info
TwrAccount.set_secrets(consumer_key, consumer_secret,
info['oauth_token'], info['oauth_token_secret'])
loop.quit()
def __phase1_cb(oauth, info):
print '[OK] phase1: request-downloaded'
url = TwrOauth.AUTHORIZATION_URL % info['oauth_token']
print 'Please visit %s' % url
verifier = raw_input('verifier: ')
TwrAccount.set_secrets(consumer_key, consumer_secret,
info['oauth_token'], info['oauth_token_secret'])
oauth.connect('access-downloaded', __phase2_cb)
oauth.connect('access-downloaded-failed', __phase2_failed_cb)
oauth.access_token(verifier)
oauth = TwrOauth()
oauth.connect('request-downloaded', __phase1_cb)
oauth.connect('request-downloaded-failed', __phase1_failed_cb)
oauth.request_token()
loop = GObject.MainLoop()
loop.run()
| lgpl-2.1 | Python |
|
d15c8eaca5fb115b8600a8e743ae73a9edba9a5b | Initialize P04_datetimeModule | JoseALermaIII/python-tutorials,JoseALermaIII/python-tutorials | books/AutomateTheBoringStuffWithPython/Chapter15/P04_datetimeModule.py | books/AutomateTheBoringStuffWithPython/Chapter15/P04_datetimeModule.py | # This program uses the datetime module to manipulate dates and times.
# The datetime Module
import datetime, time
print(datetime.datetime.now())
dt = datetime.datetime(2015, 10, 21, 16, 29, 0)
print((dt.year, dt.month, dt.day))
print((dt.hour, dt.minute, dt.second))
print(datetime.datetime.fromtimestamp(1000000))
print(datetime.datetime.fromtimestamp(time.time()))
halloween2015 = datetime.datetime(2015, 10, 31, 0, 0, 0)
newyears2016 = datetime.datetime(2016, 1, 1, 0, 0, 0)
oct31_2015 = datetime.datetime(2015, 10, 31, 0, 0, 0)
print(halloween2015 == oct31_2015)
print(halloween2015 > newyears2016)
print(newyears2016 > halloween2015)
print(newyears2016 != oct31_2015)
| mit | Python |
|
8106c22a5c05f438eb9c6436af054fd1e72b103c | Add SK_IGNORE_FASTER_TEXT_FIX define for staging Skia change. | XiaosongWei/blink-crosswalk,XiaosongWei/blink-crosswalk,modulexcite/blink,modulexcite/blink,jtg-gg/blink,nwjs/blink,nwjs/blink,smishenk/blink-crosswalk,Bysmyyr/blink-crosswalk,XiaosongWei/blink-crosswalk,ondra-novak/blink,hgl888/blink-crosswalk-efl,ondra-novak/blink,crosswalk-project/blink-crosswalk-efl,Pluto-tv/blink-crosswalk,kurli/blink-crosswalk,modulexcite/blink,smishenk/blink-crosswalk,smishenk/blink-crosswalk,kurli/blink-crosswalk,PeterWangIntel/blink-crosswalk,ondra-novak/blink,ondra-novak/blink,Bysmyyr/blink-crosswalk,hgl888/blink-crosswalk-efl,jtg-gg/blink,PeterWangIntel/blink-crosswalk,ondra-novak/blink,hgl888/blink-crosswalk-efl,Bysmyyr/blink-crosswalk,jtg-gg/blink,crosswalk-project/blink-crosswalk-efl,PeterWangIntel/blink-crosswalk,crosswalk-project/blink-crosswalk-efl,smishenk/blink-crosswalk,Bysmyyr/blink-crosswalk,smishenk/blink-crosswalk,jtg-gg/blink,nwjs/blink,jtg-gg/blink,modulexcite/blink,ondra-novak/blink,modulexcite/blink,PeterWangIntel/blink-crosswalk,Bysmyyr/blink-crosswalk,Pluto-tv/blink-crosswalk,smishenk/blink-crosswalk,crosswalk-project/blink-crosswalk-efl,smishenk/blink-crosswalk,hgl888/blink-crosswalk-efl,crosswalk-project/blink-crosswalk-efl,XiaosongWei/blink-crosswalk,ondra-novak/blink,modulexcite/blink,Pluto-tv/blink-crosswalk,jtg-gg/blink,crosswalk-project/blink-crosswalk-efl,Pluto-tv/blink-crosswalk,jtg-gg/blink,PeterWangIntel/blink-crosswalk,Bysmyyr/blink-crosswalk,nwjs/blink,hgl888/blink-crosswalk-efl,nwjs/blink,modulexcite/blink,XiaosongWei/blink-crosswalk,modulexcite/blink,crosswalk-project/blink-crosswalk-efl,Bysmyyr/blink-crosswalk,hgl888/blink-crosswalk-efl,jtg-gg/blink,PeterWangIntel/blink-crosswalk,PeterWangIntel/blink-crosswalk,Pluto-tv/blink-crosswalk,kurli/blink-crosswalk,modulexcite/blink,nwjs/blink,crosswalk-project/blink-crosswalk-efl,nwjs/blink,hgl888/blink-crosswalk-efl,PeterWangIntel/blink-crosswalk,PeterWangIntel/blink-crosswalk,jtg-gg/blink,kurli/blink-crosswalk,Bysmyyr/blink-crosswalk,smishenk/blink-crosswalk,Bysmyyr/blink-crosswalk,nwjs/blink,jtg-gg/blink,hgl888/blink-crosswalk-efl,nwjs/blink,PeterWangIntel/blink-crosswalk,XiaosongWei/blink-crosswalk,modulexcite/blink,smishenk/blink-crosswalk,smishenk/blink-crosswalk,XiaosongWei/blink-crosswalk,kurli/blink-crosswalk,hgl888/blink-crosswalk-efl,Pluto-tv/blink-crosswalk,hgl888/blink-crosswalk-efl,kurli/blink-crosswalk,kurli/blink-crosswalk,Pluto-tv/blink-crosswalk,ondra-novak/blink,kurli/blink-crosswalk,XiaosongWei/blink-crosswalk,XiaosongWei/blink-crosswalk,ondra-novak/blink,kurli/blink-crosswalk,kurli/blink-crosswalk,nwjs/blink,Pluto-tv/blink-crosswalk,crosswalk-project/blink-crosswalk-efl,Pluto-tv/blink-crosswalk,XiaosongWei/blink-crosswalk,Bysmyyr/blink-crosswalk,Pluto-tv/blink-crosswalk | public/blink_skia_config.gyp | public/blink_skia_config.gyp | #
# Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# This target is a dependency of Chromium's skia/skia_library.gyp.
# It only contains code suppressions which keep Webkit tests from failing.
{
'targets': [
{
'target_name': 'blink_skia_config',
'type': 'none',
'direct_dependent_settings': {
'defines': [
# Place defines here that require significant Blink rebaselining, or that
# are otherwise best removed in Blink and then rolled into Chromium.
# Defines should be in single quotes and a comma must appear after every one.
# DO NOT remove the define until you are ready to rebaseline, and
# AFTER the flag has been removed from skia.gyp in Chromium.
'SK_DEFERRED_CANVAS_USES_FACTORIES=1',
'SK_IGNORE_FASTER_TEXT_FIX',
],
},
},
],
}
| #
# Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# This target is a dependency of Chromium's skia/skia_library.gyp.
# It only contains code suppressions which keep Webkit tests from failing.
{
'targets': [
{
'target_name': 'blink_skia_config',
'type': 'none',
'direct_dependent_settings': {
'defines': [
# Place defines here that require significant Blink rebaselining, or that
# are otherwise best removed in Blink and then rolled into Chromium.
# Defines should be in single quotes and a comma must appear after every one.
# DO NOT remove the define until you are ready to rebaseline, and
# AFTER the flag has been removed from skia.gyp in Chromium.
'SK_DEFERRED_CANVAS_USES_FACTORIES=1',
],
},
},
],
}
| bsd-3-clause | Python |
f7586e8009ae9d2cfdc471b7dbdc9cf5d171c53b | Create string2.py | laetrid/learning | google/string2.py | google/string2.py | #!/usr/bin/env python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic string exercises
# D. verbing
# Given a string, if its length is at least 3,
# add 'ing' to its end.
# Unless it already ends in 'ing', in which case
# add 'ly' instead.
# If the string length is less than 3, leave it unchanged.
# Return the resulting string.
def verbing(s):
if len(s) >= 3:
if s[-3:] == 'ing':
s = s + 'ly'
else:
s = s + 'ing'
return s
# E. not_bad
# Given a string, find the first appearance of the
# substring 'not' and 'bad'. If the 'bad' follows
# the 'not', replace the whole 'not'...'bad' substring
# with 'good'.
# Return the resulting string.
# So 'This dinner is not that bad!' yields:
# This dinner is good!
def not_bad(s):
if s.find('not') < s.find('bad'):
left_part = s[:s.find('not')]
right_part = s[s.find('bad') + 3:]
s = left_part + 'good' + right_part
return s
# F. front_back
# Consider dividing a string into two halves.
# If the length is even, the front and back halves are the same length.
# If the length is odd, we'll say that the extra char goes in the front half.
# e.g. 'abcde', the front half is 'abc', the back half 'de'.
# Given 2 strings, a and b, return a string of the form
# a-front + b-front + a-back + b-back
def front_back(a, b):
div_a = len(a) / 2 + len(a) % 2
div_b = len(b) / 2 + len(b) % 2
result = a[:div_a] + b[:div_b] + a[div_a:] + b[div_b:]
return result
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# main() calls the above functions with interesting inputs,
# using the above test() to check if the result is correct or not.
def main():
print 'verbing'
test(verbing('hail'), 'hailing')
test(verbing('swiming'), 'swimingly')
test(verbing('do'), 'do')
print
print 'not_bad'
test(not_bad('This movie is not so bad'), 'This movie is good')
test(not_bad('This dinner is not that bad!'), 'This dinner is good!')
test(not_bad('This tea is not hot'), 'This tea is not hot')
test(not_bad("It's bad yet not"), "It's bad yet not")
print
print 'front_back'
test(front_back('abcd', 'xy'), 'abxcdy')
test(front_back('abcde', 'xyz'), 'abcxydez')
test(front_back('Kitten', 'Donut'), 'KitDontenut')
if __name__ == '__main__':
main()
| apache-2.0 | Python |
|
bb940826d78e44a4098023e83d788b3d915b9b1f | Revert "Add the GitHub-supported format extensions." | jbarreras/grip,joeyespo/grip,mgoddard-pivotal/grip,joeyespo/grip,mgoddard-pivotal/grip,ssundarraj/grip,jbarreras/grip,ssundarraj/grip | grip/constants.py | grip/constants.py | # The supported extensions, as defined by https://github.com/github/markup
supported_extensions = ['.md', '.markdown']
# The default filenames when no file is provided
default_filenames = map(lambda ext: 'README' + ext, supported_extensions)
| # The supported extensions, as defined by https://github.com/github/markup
supported_extensions = [
'.markdown', '.mdown', '.mkdn', '.md',
'.textile',
'.rdoc',
'.org',
'.creole',
'.mediawiki', '.wiki',
'.rst',
'.asciidoc', '.adoc', '.asc',
'.pod',
]
# The default filenames when no file is provided
default_filenames = map(lambda ext: 'README' + ext, supported_extensions)
| mit | Python |
f5718764185ce1149ed291601e4fe28f9cd2be06 | add single list module for mini-stl (Python) | ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study | python/mini-stl/single_list.py | python/mini-stl/single_list.py | #!/usr/bin/python -e
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2013 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
class SingleList(object):
class ListNode(object):
def __init__(self):
self.next = None
self.data = None
def __del__(self):
self.next = None
self.data = None
def __init__(self):
self.front_ = None
self.rear_ = None
self.size_ = 0
self.iter_ = None
def __del__(self):
self.clear()
self.iter_ = None
def __iter__(self):
self.iter_ = self.front_
return self
def next(self):
if self.iter_ == None:
raise StopIteration
else:
data = self.iter_.data
self.iter_ = self.iter_.next
return data
def clear(self):
while self.front_ != None:
node = self.front_
self.front_ = self.front_.next
del node
self.front_ = None
self.rear_ = None
self.size_ = 0
def empty(self):
return self.front_ == None
def size(self):
return self.size_
def push_back(self, x):
node = self.ListNode()
node.next = None
node.data = x
if self.front_ == None:
self.front_ = node
self.rear_ = node
else:
self.rear_.next = node
self.rear_ = node
self.size_ += 1
def push_front(self, x):
node = self.ListNode()
node.next = self.front_
node.data = x
if self.front_ == None:
self.rear_ = node
self.front_ = node
self.size_ += 1
def pop_front(self):
if self.front_ == None:
return
node = self.front_
self.front_ = self.front_.next
del node
self.size_ -= 1
def front(self):
if self.front_ == None:
return None
return self.front_.data
def back(self):
if self.rear_ == None:
return None
return self.rear_.data
| bsd-2-clause | Python |
|
0be7f2fe05588d93eb478a4fa648d310055b3ce7 | Add experimental generation code to make drafts from raster images | storborg/pyweaving | pyweaving/generators/raster.py | pyweaving/generators/raster.py | from .. import Draft
from PIL import Image
def point_threaded(im, warp_color=(0, 0, 0), weft_color=(255, 255, 255),
shafts=40, max_float=8, repeats=2):
"""
Given an image, generate a point-threaded drawdown that attempts to
represent the image. Results in a drawdown with bilateral symmetry from a
non-symmetric source image.
"""
draft = Draft(num_shafts=shafts, liftplan=True)
im.thumbnail((shafts, im.size[1]), Image.ANTIALIAS)
im = im.convert('1')
w, h = im.size
assert w == shafts
warp_pattern_size = ((2 * shafts) - 2)
for __ in range(repeats):
for ii in range(warp_pattern_size):
if ii < shafts:
shaft = ii
else:
shaft = warp_pattern_size - ii
draft.add_warp_thread(color=warp_color, shaft=shaft)
imdata = im.getdata()
for __ in range(repeats):
for yy in range(h):
offset = yy * w
pick_shafts = set()
for xx in range(w):
pixel = imdata[offset + xx]
if not pixel:
pick_shafts.add(xx)
draft.add_weft_thread(color=weft_color, shafts=pick_shafts)
return draft
| mit | Python |
|
de325dbe53bbd28eddcbbf188f2689474994249b | add migration for new version of storedmessages | geometalab/osmaxx,geometalab/osmaxx-frontend,geometalab/osmaxx,geometalab/osmaxx,geometalab/drf-utm-zone-info,geometalab/osmaxx,geometalab/osmaxx-frontend,geometalab/osmaxx-frontend,geometalab/drf-utm-zone-info,geometalab/osmaxx-frontend | osmaxx-py/osmaxx/third_party_apps/stored_messages/migrations/0002_message_url.py | osmaxx-py/osmaxx/third_party_apps/stored_messages/migrations/0002_message_url.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('stored_messages', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='message',
name='url',
field=models.URLField(blank=True, null=True),
),
]
| mit | Python |
|
1472011cb8cd323357626443f714284feedfed62 | add merge of ACIS provided data | akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem | scripts/climodat/use_acis.py | scripts/climodat/use_acis.py | """Use data provided by ACIS to replace climodat data"""
import requests
import sys
import psycopg2
import datetime
SERVICE = "http://data.rcc-acis.org/StnData"
def safe(val):
if val in ['M', 'S']:
return None
if val == 'T':
return 0.0001
try:
return float(val)
except:
print("failed to convert %s to float, using None" % (repr(val),))
return None
def main(station, acis_station):
table = "alldata_%s" % (station[:2],)
payload = {"sid": acis_station,
"sdate": "1850-01-01",
"edate": "2017-01-01",
"elems": "maxt,mint,pcpn,snow,snwd"}
req = requests.post(SERVICE, json=payload)
j = req.json()
pgconn = psycopg2.connect(database='coop', host='localhost', port=5555,
user='mesonet')
cursor = pgconn.cursor()
for row in j['data']:
date = row[0]
(high, low, precip, snow, snowd) = map(safe, row[1:])
if all([a is None for a in (high, low, precip, snow, snowd)]):
continue
cursor.execute("""
UPDATE """ + table + """ SET high = %s, low = %s, precip = %s,
snow = %s, snowd = %s WHERE station = %s and day = %s
""", (high, low, precip, snow, snowd, station, date))
if cursor.rowcount == 0:
date = datetime.datetime.strptime(date, '%Y-%m-%d')
sday = "%02i%02i" % (date.month, date.day)
print("Adding entry for %s" % (date,))
cursor.execute("""INSERT into """ + table + """ (station, day,
high, low, precip, snow, snowd, sday, year, month, estimated)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, 'f')
""", (station, date, high, low, precip, snow, snowd, sday,
date.year, date.month))
cursor.close()
pgconn.commit()
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2])
| mit | Python |
|
b333d95f3f4187b9d9b480ba8ff4985a62d65f41 | Add tests for nginx version | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | tests/pytests/unit/modules/test_nginx.py | tests/pytests/unit/modules/test_nginx.py | import pytest
import salt.modules.nginx as nginx
from tests.support.mock import patch
@pytest.fixture
def configure_loader_modules():
return {nginx: {}}
@pytest.mark.parametrize(
"expected_version,nginx_output",
[
("1.2.3", "nginx version: nginx/1.2.3"),
("1", "nginx version: nginx/1"),
("9.1.100a1+abc123", "nginx version: nginx/9.1.100a1+abc123"),
(
"42.9.13.1111111111.whatever",
"nginx version: nginx/42.9.13.1111111111.whatever",
),
],
)
def test_basic_nginx_version_output(expected_version, nginx_output):
with patch.dict(nginx.__salt__, {"cmd.run": lambda *args, **kwargs: nginx_output}):
assert nginx.version() == expected_version
| apache-2.0 | Python |
|
e77b9a5dff36b3318759a18a786c7cc08bb8ac3e | Create Scramble_String.py | UmassJin/Leetcode | Array/Scramble_String.py | Array/Scramble_String.py | Given a string s1, we may represent it as a binary tree by partitioning it to two non-empty substrings recursively.
Below is one possible representation of s1 = "great":
great
/ \
gr eat
/ \ / \
g r e at
/ \
a t
To scramble the string, we may choose any non-leaf node and swap its two children.
For example, if we choose the node "gr" and swap its two children, it produces a scrambled string "rgeat".
rgeat
/ \
rg eat
/ \ / \
r g e at
/ \
a t
We say that "rgeat" is a scrambled string of "great".
Similarly, if we continue to swap the children of nodes "eat" and "at", it produces a scrambled string "rgtae".
rgtae
/ \
rg tae
/ \ / \
r g ta e
/ \
t a
We say that "rgtae" is a scrambled string of "great".
Given two strings s1 and s2 of the same length, determine if s2 is a scrambled string of s1.
class Solution:
# @return a boolean
def isScramble(self, s1, s2):
if len(s1) != len(s2):
return False
if s1 == s2:
return True
length = len(list(s1))
if sorted(s1) != sorted(s2):
return False
for i in xrange(1,length):
if self.isScramble(s1[:i],s2[:i]) and self.isScramble(s1[i:],s2[i:]):
return True
if self.isScramble(s1[:i], s2[-i:]) and self.isScramble(s1[i:],s2[:-i]):
return True
return False
# Note:
# Condition: 1) length_s1 != length_s2
# 2) s1 == s2, s1ไธs2ๅฎๅ
จ็ธ็ญ
# 3) sorted(s1) ไธ sorted(s2)ๆฏไธๆฏ็ธ็ญ
# 4) ๆฏ่พs1[:i] s2[:i] and s1[i:],s2[i:]
# 5) ๆฏ่พs1[:i], s2[length_s2-i:] and s1[i:],s2[length_s2:-i]
| mit | Python |
|
156d3c7035e4b7867d1e715c0bac98cf16d24d77 | add script to fix workspace info for search | briehl/narrative,kbase/narrative,pranjan77/narrative,briehl/narrative,briehl/narrative,briehl/narrative,pranjan77/narrative,kbase/narrative,kbase/narrative,briehl/narrative,pranjan77/narrative,pranjan77/narrative,pranjan77/narrative,kbase/narrative,briehl/narrative,kbase/narrative,pranjan77/narrative,briehl/narrative,pranjan77/narrative,kbase/narrative | src/scripts/fix_workspace_info.py | src/scripts/fix_workspace_info.py | """
Fixes workspace info to do the following.
1. Make sure the "narrative" metadata field contains an int that points to the narrative.
1. Make sure the "narrative_nice_name" metadata field is correct.
2. Make sure the "is_temporary" metadata field exists and is correct.
3. Adds a count of the number of narrative cells.
4. Does nothing at all if there's > 1 narrative in the workspace.
Note that while this fetches the Narrative object, it doesn't modify it in any way.
"""
from biokbase.workspace.client import Workspace
ws_url = "https://ci.kbase.us/services/workspace"
def fix_workspace_info(ws_id, token, verbose=False):
"""
ws_id = id of the workspace to update.
token = auth token of the user who owns the workspace.
"""
assert(token is not None)
assert(str(ws_id).isdigit())
new_meta = dict()
ws = Workspace(url=ws_url, token=token)
if verbose:
print("Checking workspace {}".format(ws_id))
# test if there's exactly 1 Narrative object in the workspace
narr_obj_list = ws.list_objects({'ids': [ws_id]})
if len(narr_obj_list) != 1:
if verbose:
print("\tFound {} Narratives! Skipping this workspace".format(len(narr_obj_list)))
return
narr_info = narr_obj_list[0]
narr_obj_id = narr_info[0]
# fetch the workspace info and narrative object (with metadata)
ws_info = ws.get_workspace_info({'id': int(ws_id)})
ws_meta = ws_info[8]
narr_obj = ws.get_objects2({'objects': [{'ref': '{}/{}'.format(ws_id, narr_obj_id)}]})['data'][0]
narr_name = narr_obj['data']['metadata']['name']
# 1. Test "narrative" key of ws_meta
if str(narr_obj_id) != ws_meta.get('narrative'):
new_meta['narrative'] = str(narr_obj_id)
if verbose:
print("\tUpdating id from {} -> {}".format(ws_meta.get('narrative'), narr_obj_id))
# 2. Test "is_temporary" key.
# Should be true if there's only a single narrative version, and it's name is Untitled, and it only has a single markdown cell.
# Should never reset to be temporary if it's not.
# Really, this is here to add the field if it's not there, and to set things as non-temporary
# if it looks like they should be.
# So, if the marked 'is_temporary' is already false, do nothing.
current_temp = ws_meta.get('is_temporary')
if current_temp == 'true':
# make sure it should be temporary.
if narr_info[4] > 1 or narr_name != 'Untitled':
if verbose:
print("\tNarrative is named {} and has {} versions - marking not temporary".format(narr_name, narr_info[4]))
new_meta['is_temporary'] = 'false'
# get list of cells
# if it's really REALLY old, it has a 'worksheets' field. Removed in Jupyter notebook format 4.
if 'worksheets' in narr_obj['data']:
cells = narr_obj['data']['worksheets'][0]['cells']
else:
cells = narr_obj['data']['cells']
if len(cells) > 1 or cells[0]['cell_type'] != 'markdown':
if verbose:
print("\tNarrative has {} cells and the first is type {} - marking not temporary".format(len(cells), cells[0]['cell_type']))
new_meta['is_temporary'] = 'false'
# 3. Test "narrative_nice_name" key
meta_name = ws_meta.get('narrative_nice_name')
if (meta_name is None and current_temp == 'false') or meta_name != narr_name:
new_meta['narrative_nice_name'] = narr_name
if verbose:
print("\tUpdating 'narrative_nice_name' from {} -> {}".format(meta_name, narr_name))
# 4. Add the total cell count while we're at it.
new_meta['cell_count'] = str(len(cells))
if verbose:
print("\tAdding cell_count of {}".format(str(len(cells))))
ws.alter_workspace_metadata({'wsi': {'id': ws_id}, 'new': new_meta}) | mit | Python |
|
f25b69a6ad6777576e31d0b01c4fc2c2bbe02788 | Create new.py | wulidexixilian/iotprototype,wulidexixilian/iotprototype | simple_mqtt/templates/new.py | simple_mqtt/templates/new.py | mit | Python |
||
0ee5d568ddc1f37abedb94f32d6b7da0439e6a4d | Create title_retriever.py | Souloist/Projects,Souloist/Projects,Souloist/Projects,Souloist/Projects,Souloist/Projects | solutions/title_retriever.py | solutions/title_retriever.py | '''
Script that will scrape the title of the given website
'''
import urllib
import re
def getstock(title):
regex = '<title>(.+?)</title>' #find all contents within title braces
pattern = re.compile(regex) #converts regex into a pattern that can be understood by re module
htmlfile = urllib.urlopen(title) #takes a string arguement
htmltext = htmlfile.read()
titles = re.findall(pattern,htmltext)
return titles
while True:
try:
title = str(raw_input("Please give me a url: "))
if not "http" in title:
title = "http://"+title
break
except IOError:
print "Sorry that url is not valid. Please try another."
print getstock(title)[0]
| mit | Python |
|
071da9c0668d495e052baf5ad4d5bc9e068aa6a7 | Create dict2xml.py | Pilfer/dict2xml | dict2xml.py | dict2xml.py | # Python Dictionary to XML converter
# Written by github.com/Pilfer
# @CodesStuff
class dict2xml:
def __init__(self, debug = False):
self.debug = debug
if self.debug:
print "json2xml class has been loaded"
def genXML(self,xmldict):
tag = xmldict['tag']
attrs = []
kidstack = []
for attr in xmldict['attributes']:
attrs.append(str("%s=\"%s\"") % (attr['name'],attr['value']))
if xmldict['children'] != None:
for child in xmldict['children']:
tmp = self.genXML(child)
kidstack.append(tmp)
if(len(kidstack) == 0):
children = None
else:
children = "\n\t".join(kidstack)
else:
children = None
xmlout = str("<%s %s>%s</%s>") % (tag, ' '.join(attrs), children if children != None else '',tag)
return xmlout
| apache-2.0 | Python |
|
320da5dcc192d654d09ea631e9684f26e97795c0 | add mitm script | tjcsl/hsf-2015-writeups,tjcsl/hsf-2015-writeups | reversing/400a-graphic/mitm.py | reversing/400a-graphic/mitm.py | vals = [0xdeadbeef,0xcafebabe,0xdeadbabe,0x8badf00d,0xb16b00b5,0xcafed00d,0xdeadc0de,0xdeadfa11,0xdefec8ed,0xdeadfeed,0xfee1dead,0xfaceb00b,0xfacefeed,0x000ff1ce,0x12345678,0x743029ab,0xdeed1234,0x00000000,0x11111111,0x11111112,0x11111113,0x42424242]
start = 0xdeadbeef
target = 0x764c648c
group1 = vals[:11]
group2 = vals[11:]
print(len(group1), len(group2))
def recur(begin, rest):
ret = []
if not rest:
return [begin]
for i in rest[0]:
ret += recur(begin + [i], rest[1:])
return ret
def all_possible(l):
l = list(zip([0x0] * len(l), l))
return recur([], l)
def xor_all(l, begin=0x0):
for i in l:
begin ^= i
return begin
group1_xors = {}
group2_xors = {}
for i in all_possible(group1):
group1_xors[xor_all(i, start)] = i
for i in all_possible(group2):
group2_xors[xor_all(i, target)] = i
intersect = set(group1_xors.keys()) & set(group2_xors.keys())
print(intersect)
sol = intersect.pop()
print(hex(sol))
valsol = group1_xors[sol] + group2_xors[sol]
valsol = [i for i in valsol if i != 0]
print(hex(xor_all(valsol, start)))
print(list(map(hex, valsol)))
| mit | Python |
|
d80f9ef4592cde488ece9f95b662f5e1e73eac42 | change database | wecontinue/book-collection,wecontinue/book-collection,wecontinue/book-collection,wecontinue/bookcase,wecontinue/bookcase,wecontinue/bookcase | lib/wunderlist.py | lib/wunderlist.py | #!/usr/bin/env python
from lib.base import BaseHandler
import tornado.locale
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from datetime import datetime
from tornado.options import define, options
import pymongo
if __name__ == "__main__":
define("port", default=8000, type=int, help="run on the given port")
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/wunderlist/search",WunSearchHandler),
(r"/wunderlist/edit",WunEditHandler)
]
settings = dict(
debug=True
)
conn = pymongo.Connection("localhost", 27017)
self.db = conn["continuetry"]
tornado.web.Application.__init__(self, handlers, **settings)
class WunSearchHandler(BaseHandler):
def get(self):
qs = self.get_argument("qs", None)
if not qs:
no_qs = {
"errmsg": "no_qs",
"errcode": 1
}
self.write(no_qs)
return
coll = self.db["sbooks"]
coll_second = self.db["bbooks"]
#add two vote attribute
book_fields = ["isbn", "vote_count","voter","title",
"alt", "author",
"publisher", "image", "price",
"tags", "isdonated", "donor"]
book_fields_two = ["isbn","voter","title",
"alt", "author",
"publisher", "image", "price",
"tags", "isdonated", "donor"]
lst2 = []
lst3 = []
for key2 in coll.find({"isbn": int(qs)}):
lst2.append(key2)
if len(lst2) != 0:
for key in lst2:
del key["_id"]
self.write(key)
else:
for key3 in coll_second.find({"isbn":qs}):
lst3.append(key3)
if len(lst3) != 0:
for key in lst3:
del key["_id"]
self.write(key)
else:
not_exist = {
"errmsg":"not_exist",
"errcode":1
}
self.write(not_exist)
class WunEditHandler(BaseHandler):
def post(self):
isbn = self.get_argument("isbn",None)
if not isbn:
no_isbn = {
"errmsg":"no_isbn",
"errcode":1
}
self.write(no_isbn)
return
Wunbook = {}
lst = []
Wunbook["voter"] = lst
Wunbook["vote_count"] = 0
for key in book_fields_two:
if key == "voter":
Wunbook[key].append(self.get_argument(key,None))
else:
Wunbook[key] = self.get_argument(key,None)
Wunbook["created_at"] = datetime.now().__format__("%Y-%m-%d %H:%M:%S")
coll_second.insert(Wunbook)
# Save success
insert_sucs = {
"errcode": 0
}
self.write(insert_sucs)
if __name__ == "__main__":
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start() | mit | Python |
|
056bd290a4df08876109ef4e2da1115783a06f25 | Add examples for setting classes attribute | plumdog/flask_table,plumdog/flask_table,plumdog/flask_table | examples/classes.py | examples/classes.py | from flask_table import Table, Col
"""If we want to put an HTML class onto the table element, we can set
the "classes" attribute on the table class. This should be an iterable
of that are joined together and all added as classes. If none are set,
then no class is added to the table element.
"""
class Item(object):
def __init__(self, name, description):
self.name = name
self.description = description
class ItemTableOneClass(Table):
classes = ['class1']
name = Col('Name')
description = Col('Description')
class ItemTableTwoClasses(Table):
classes = ['class1', 'class2']
name = Col('Name')
description = Col('Description')
def one_class(items):
table = ItemTableOneClass(items)
# or {{ table }} in jinja
print(table.__html__())
"""Outputs:
<table class="class1">
<thead>
<tr>
<th>Name</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>Name1</td>
<td>Description1</td>
</tr>
</tbody>
</table>
"""
def two_classes(items):
table = ItemTableTwoClasses(items)
# or {{ table }} in jinja
print(table.__html__())
"""Outputs:
<table class="class1 class2">
<thead>
<tr>
<th>Name</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>Name1</td>
<td>Description1</td>
</tr>
</tbody>
</table>
"""
def main():
items = [Item('Name1', 'Description1')]
# user ItemTableOneClass
one_class(items)
print('\n######################\n')
# user ItemTableTwoClasses
two_classes(items)
if __name__ == '__main__':
main()
| bsd-3-clause | Python |
|
f16187d5943158d82fc87611f998283789b5ecdf | Add libarchive 3.1.2 | BansheeMediaPlayer/bockbuild,BansheeMediaPlayer/bockbuild,BansheeMediaPlayer/bockbuild | packages/libarchive.py | packages/libarchive.py | Package ('libarchive', '3.1.2', sources = ['http://libarchive.org/downloads/%{name}-%{version}.tar.gz'],
configure_flags = [
'--enable-bsdtar=shared',
'--enable-bsdcpio=shared',
'--disable-silent-rules',
'--without-nettle']
)
| mit | Python |
|
b9b2b87f0d630de931765c1c9f448e295440e611 | Create fetch_qt_version.py | keithel/unorganized-scripts,keithel/unorganized-scripts,keithel/unorganized-scripts | fetch_qt_version.py | fetch_qt_version.py | """Module to return the Qt version of a Qt codebase.
This module provides a function that returns the version of a Qt codebase, given
the toplevel qt5 repository directory. Note, the `qt5` directory applies to both
Qt 5.x and Qt 6.x
If it is run standalone with a python interpreter and not as part of another
Python module, it must be run from the toplevel directory of a qt5 repository
with the qtbase git submodule cloned and checked out.
"""
from __future__ import print_function # For python2 portability
import os
import sys
import re
def qt_version(qt5_dir: str) -> str:
"""Returns the Qt version of a Qt codebase"""
if not os.path.exists(qt5_dir + "/qtbase"):
print("qtbase doesn't exist. Please pass the path to a qt5 repo. aborting.", file=sys.stderr)
return None
changesFiles = os.listdir(qt5_dir + "/qtbase/dist")
# Every version released has a 'changes-<version #>' file describing what
# changed - we will use that to figure out the closest version number to
# this checked out code.
# Only include versions that have version numbers that conform to standard
# version numbering rules (major.minor.release)
regex = r"^changes-([0-9.]*)"
src = re.search
versions = [m.group(1) for changesFile in changesFiles for m in [src(regex, changesFile)] if m]
# Fetch version from qtbase/.cmake.conf
cmake_conf_path = qt5_dir + "/qtbase/.cmake.conf"
if os.path.exists(cmake_conf_path):
# Qt6 uses CMake, and we can determine version from .cmake.conf
cmake_conf_file = open(cmake_conf_path, 'r')
qt6_version = ""
for line in cmake_conf_file:
if "QT_REPO_MODULE_VERSION" in line:
qt6_version = line.split('"')[1]
break
if qt6_version:
versions.append(qt6_version)
versions.sort(key=lambda s: list(map(int, s.split('.'))))
return versions[-1]
if __name__ == "__main__":
if not os.path.exists("qtbase"):
print("qtbase doesn't exist. Please run from base of qt5 repo. aborting.", file=sys.stderr)
sys.exit(1)
print(qt_version("."))
| apache-2.0 | Python |
|
f9b38f675df9752a4b5309df059c6d15a1e1b3c2 | Add module for range support. | SublimeText/VintageEx | ex_range.py | ex_range.py | from collections import namedtuple
from vintage_ex import EX_RANGE_REGEXP
import location
EX_RANGE = namedtuple('ex_range', 'left left_offset separator right right_offset')
def get_range_parts(range):
parts = EX_RANGE_REGEXP.search(range).groups()
return EX_RANGE(
left=parts[1],
left_offset=parts[3] or '0',
separator=parts[5],
right=parts[7],
right_offset=parts[9] or '0'
)
def calculate_range(view, range):
parsed_range = get_range_parts(range)
if parsed_range.left == '%':
left, left_offset = '1', '0'
right, right_offset = '$', '0'
elif parsed_range.separator:
left, left_offset = parsed_range.left, parsed_range.left_offset
right, right_offset = parsed_range.right, parsed_range.right_offset
return calculate_range_part(view, left) + int(left_offset), \
calculate_range_part(view, right) + int(right_offset)
def calculate_range_part(view, p):
if p.isdigit():
return int(p)
if p.startswith('/') or p.startswith('?'):
if p.startswith('?'):
return location.reverse_search(view, p[1:-1],
end=view.sel()[0].begin())
return location.search(view, p[1:-1])
if p in ('$', '.'):
return location.calculate_relative_ref(view, p)
| mit | Python |
|
15cf6b5d35e2fbaf39d419ddbe5da1b16247ccaa | add test_parse_table_options.py | ickc/pantable | tests/test_parse_table_options.py | tests/test_parse_table_options.py | #!/usr/bin/env python3
"""
`header` and `markdown` is checked by `test_to_bool` instead
"""
from .context import pandoc_tables
import panflute
def test_parse_table_options():
options = {
'caption': None,
'alignment': None,
'width': None,
'table-width': 1.0,
'header': True,
'markdown': True,
'include': None
}
raw_table_list = [['1', '2', '3', '4'], ['5', '6', '7', '8']]
# check init is preserved
assert pandoc_tables.parse_table_options(
options, raw_table_list) == options
# check caption
options['caption'] = '**sad**'
assert str(pandoc_tables.parse_table_options(
options, raw_table_list
)['caption'][0]) == 'Strong(Str(sad))'
# check alignment
options['alignment'] = 'LRCD'
assert pandoc_tables.parse_table_options(
options, raw_table_list
)['alignment'] == [
'AlignLeft',
'AlignRight',
'AlignCenter',
'AlignDefault'
]
options['alignment'] = 'LRC'
assert pandoc_tables.parse_table_options(
options, raw_table_list
)['alignment'] == [
'AlignLeft',
'AlignRight',
'AlignCenter',
'AlignDefault'
]
# check width
options['width'] = [0.1, 0.2, 0.3, 0.4]
assert pandoc_tables.parse_table_options(
options, raw_table_list
)['width'] == [0.1, 0.2, 0.3, 0.4]
# auto-width
raw_table_list = [
['asdfdfdfguhfdhghfdgkla', '334\n2', '**la**', '4'],
['5', '6', '7', '8']
]
options['width'] = None
options['table-width'] = 1.2
assert pandoc_tables.parse_table_options(
options, raw_table_list
)['width'] == [22 / 32 * 1.2, 3 / 32 * 1.2, 6 / 32 * 1.2, 1 / 32 * 1.2]
return
| bsd-3-clause | Python |
|
71dd485685a481f21e03af6db5a4bc1f91a64ce9 | Add service settings migration | opennode/nodeconductor,opennode/nodeconductor,opennode/nodeconductor | nodeconductor/structure/migrations/0018_service_settings_plural_form.py | nodeconductor/structure/migrations/0018_service_settings_plural_form.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('structure', '0017_add_azure_service_type'),
]
operations = [
migrations.AlterModelOptions(
name='servicesettings',
options={'verbose_name': 'Service settings', 'verbose_name_plural': 'Service settings'},
),
]
| mit | Python |
|
93a2caab2963423e40714ada59abcfeab5c57aea | Add NetBox pillar | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/pillar/netbox.py | salt/pillar/netbox.py | # -*- coding: utf-8 -*-
'''
A module that adds data to the Pillar structure from a NetBox API.
Configuring the NetBox ext_pillar
====================================
.. code-block:: yaml
ext_pillar:
- netbox:
api_url: http://netbox_url.com/api/
The following are optional, and determine whether or not the module will
attempt to configure the ``proxy`` pillar data for use with the napalm
proxy-minion:
.. code-block:: yaml
proxy_return: True
proxy_username: admin
api_token: 123abc
Create a token in your NetBox instance at
http://netbox_url.com/user/api-tokens/
By default, this module will query the NetBox API for the platform associated
with the device, and use the 'NAPALM driver' field to set the napalm
proxy-minion driver. (Currently only 'napalm' is supported for drivertype.)
This module assumes you will use SSH keys to authenticate to the network device
If password authentication is desired, it is recommended to create another
``proxy`` key in pillar_roots (or git_pillar) with just the ``passwd`` key and
use :py:func:`salt.renderers.gpg <salt.renderers.gpg>` to encrypt the value.
If any additional options for the proxy setup are needed they should also be
configured in pillar_roots.
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
try:
import requests
import ipaddress
_HAS_DEPENDENCIES = True
except ImportError:
_HAS_DEPENDENCIES = False
log = logging.getLogger(__name__)
def __virtual__():
return _HAS_DEPENDENCIES
def ext_pillar(minion_id, pillar, *args, **kwargs):
'''
Query NetBox API for minion data
'''
# Pull settings from kwargs
api_url = kwargs['api_url'].rstrip('/')
api_token = kwargs.get('api_token', None)
proxy_username = kwargs.get('proxy_username', None)
proxy_return = kwargs.get('proxy_return', True)
ret = {}
headers = {}
if api_token:
headers['Authorization'] = 'Token ' + api_token
# Fetch device from API
device_results = requests.get(
api_url + '/dcim/devices/',
params={'name': minion_id, },
headers=headers,
)
# Check status code for API call
if device_results.status_code != requests.codes.ok:
log.warn('API query failed for "%s", status code: %d',
minion_id, device_results.status_code)
# Assign results from API call to "netbox" key
try:
devices = device_results.json()['results']
if len(devices) == 1:
ret['netbox'] = devices[0]
elif len(devices) > 1:
log.error('More than one device found for "%s"', minion_id)
except Exception:
log.error('Device not found for "%s"', minion_id)
if proxy_return:
# Attempt to add "proxy" key, based on platform API call
try:
# Fetch device from API
platform_results = requests.get(
ret['netbox']['platform']['url'],
headers=headers,
)
# Check status code for API call
if platform_results.status_code != requests.codes.ok:
log.info('API query failed for "%s", status code: %d',
minion_id, platform_results.status_code)
# Assign results from API call to "proxy" key if the platform has a
# napalm_driver defined.
napalm_driver = platform_results.json().get('napalm_driver')
if napalm_driver:
ret['proxy'] = {
'host': str(ipaddress.IPv4Interface(
ret['netbox']['primary_ip4']['address']).ip),
'driver': napalm_driver,
'proxytype': 'napalm',
}
if proxy_username:
ret['proxy']['username'] = proxy_username
except Exception:
log.debug(
'Could not create proxy config data for "%s"', minion_id)
return ret
| apache-2.0 | Python |
|
ca8d7773a2d1a5ce4195ce693ccd66bbf53af394 | Read in proteinGroupts.txt from MS data | dmyersturnbull/pynd-pubs-ms | proteinGroupsParser.py | proteinGroupsParser.py | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 10 08:46:25 2015
@author: student
"""
import pandas as pd
#import numpy as np
# read in file
#peptideNames = """'Protein IDsโ, 'Majority protein IDsโ, 'Peptide counts (all)โ, 'Peptide counts (razor+unique)โ, 'Peptide counts (unique)โ, 'Fasta headersโ, 'Number of proteinsโ, 'Peptidesโ,'Razor + unique peptidesโ, 'Unique peptidesโ,'Peptides Control_Ubโ, 'Peptides Control_UbPโ,'Peptides Control_WCLโ, 'Peptides Control_WCLPโ,'Peptides Pynd_5FC_Ubโ, 'Peptides Pynd_5FC_UbPโ,'Peptides Pynd_5FC_WCLโ, 'Peptides Pynd_5FC_WCLPโ,'Peptides Pynd_AlkKO_Ubโ, 'Peptides Pynd_AlkKO_UbPโ,'Peptides Pynd_AlkKO_WCLโ, 'Peptides Pynd_AlkKO_WCLPโ"""
#colIndices = 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 18, 19, 20, 21, 38, 39, 40, 41, 42, 43, 44, 45, 68, 69, 70, 71, 88, 89, 90, 91, 92, 93, 94, 95, 118, 119, 120, 121, 138, 139, 140, 141, 142, 143, 144, 145, 160, 161, 162, 163, 164, 165, 166, 167, 176, 177, 178, 179, 196, 197, 198, 199, 200, 201, 202, 203, 226, 227, 228, 229, 246, 247, 248, 249, 250, 251, 252, 253, 268, 277, 278, 279, 280, 297, 298, 299, 300, 301, 302, 303, 304, 327, 328, 329, 330, 347, 348, 349, 350, 351, 352, 353, 354, 377, 378, 379, 380, 397, 398, 399, 400, 401, 402, 403, 404, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433
data = '/Users/student/Downloads/PUBS 2015 MS files/proteinGroups.txt'
df = pd.read_table(data, usecols=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 18, 19, 20, 21, 38, 39, 40, 41, 42, 43, 44, 45, 68, 69, 70, 71, 88, 89, 90, 91, 92, 93, 94, 95, 118, 119, 120, 121, 138, 139, 140, 141, 142, 143, 144, 145, 160, 161, 162, 163, 164, 165, 166, 167, 176, 177, 178, 179, 196, 197, 198, 199, 200, 201, 202, 203, 226, 227, 228, 229, 246, 247, 248, 249, 250, 251, 252, 253, 268, 277, 278, 279, 280, 297, 298, 299, 300, 301, 302, 303, 304, 327, 328, 329, 330, 347, 348, 349, 350, 351, 352, 353, 354, 377, 378, 379, 380, 397, 398, 399, 400, 401, 402, 403, 404, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433])
# Junk
#print df.dtypes
#print df['Intensity']
#for i in df.index:
# for j in df.columns:
# print df.index[i], df.columns[j]
# print df.index.values
#print df.columns.values
#print df.keys
# print '%s, %s' % (df.index.values, df[i].columns.values)
| apache-2.0 | Python |
|
3509585cd14bb51fb00b60df1dcb295bc561d679 | Add _version.py file | desihub/desidatamodel,desihub/desidatamodel | py/desidatamodel/_version.py | py/desidatamodel/_version.py | __version__ = '0.2.0.dev71'
| bsd-3-clause | Python |
|
b3383e6c428eccdd67ddc4cfa90e6d22da35412a | Add lib/sccache.py helper script | atom/libchromiumcontent,electron/libchromiumcontent,atom/libchromiumcontent,electron/libchromiumcontent | script/lib/sccache.py | script/lib/sccache.py | import os
import sys
from config import TOOLS_DIR
VERSION = '0.2.6'
SUPPORTED_PLATFORMS = {
'cygwin': 'windows',
'darwin': 'mac',
'linux2': 'linux',
'win32': 'windows',
}
def is_platform_supported(platform):
return platform in SUPPORTED_PLATFORMS
def get_binary_path():
platform = sys.platform
if not is_platform_supported(platform):
return None
platform_dir = SUPPORTED_PLATFORMS[platform]
path = os.path.join(TOOLS_DIR, 'sccache', VERSION, platform_dir, 'sccache')
if platform_dir == 'windows':
path += '.exe'
return path
| mit | Python |
|
b7459feac37753928fcfc1fe25a0f40d21d89ecf | add collections07.py | devlights/try-python | trypython/stdlib/collections07.py | trypython/stdlib/collections07.py | # coding: utf-8
"""
collections.namedtupleใซใคใใฆใฎใตใณใใซใงใใ
namedtupleใฎๅบๆฌ็ใชไฝฟใๆนใซใคใใฆใฏใcollections04.py ใๅ็
งใ
"""
import collections as cols
from trypython.common.commoncls import SampleBase
from trypython.common.commonfunc import pr
class Sample(SampleBase):
def exec(self):
MyVal01 = cols.namedtuple('MyVal01', ['name', 'value'])
obj1 = MyVal01('hello world', 'value01')
pr('obj', obj1)
# namedtuple ใฏใ__dict__ ใๆใใชใ
try:
pr('__dict__', obj1.__dict__)
except AttributeError as e:
pr('__dict__', e)
# namedtuple ใฏใ__slots__ ใซ ็ฉบใฟใใซใ่จญๅฎใใใ
pr('__slots__', obj1.__slots__)
# ------------------------------------------------------------
# namedtuple ใฏใ้ๅธธใฎtupleใจๅๆงใซๅฉ็จใงใใใ
# ใใใซใไปฅไธใฎ3ใคใฎใกใฝใใใๆใคใ
# ใป_make
# ใป_asdict
# ใป_replace
# ------------------------------------------------------------
# ------------------------------------------------------------
# _make ใกใฝใใ
# --------------------
# ๆขๅญใฎsequence, iterable ใใๆฐใใใชใใธใงใฏใใๆง็ฏใใใ
# csvใใใผใฟใใผในใชใฉใฎ่กใใใชใใธใงใฏใใไฝๆใใใฎใซไพฟๅฉใ
# ------------------------------------------------------------
rows = (['hello', 'value01'], ['world', 'value02'])
for item in (MyVal01._make(row) for row in rows):
pr('item', item)
# ------------------------------------------------------------
# _asdict ใกใฝใใ
# --------------------
# ใใฃใผใซใๅใจๅคใฎOrderedDictใ่ฟใใ
# ๆปใๅคใ OrderedDict ใชใฎใงใใใฃใผใซใใฎไธฆใณ้ ใฎ้ใใซๅๅพใงใใใ
# (*) OrderedDictใซใชใฃใใฎใฏใpython 3.1 ใใใ
# ------------------------------------------------------------
obj_dict = obj1._asdict()
pr('obj_dict', obj_dict)
# ่พๆธใใ namedtuple ใๆง็ฏใใๅ ดๅใฏ **kwargs ๅฝขๅผใงๆธกใ
obj2 = MyVal01(**obj_dict)
pr('obj2', obj2)
pr('eq', obj1 == obj2)
# ------------------------------------------------------------
# _replace ใกใฝใใ
# --------------------
# ๆๅฎใใใใฃใผใซใใฎๅคใ็ฝฎใๆใใใๆฐใใ namedtuple ใ่ฟใใ
# namedtuple ใฏใimmutableใชใฎใงใๅธธใซๆฐใใใชใใธใงใฏใใ่ฟใใ
# ------------------------------------------------------------
obj3 = obj2._replace(name='world hello', value='value03')
pr('obj3', obj3)
pr('eq', obj3 == obj2)
# ------------------------------------------------------------
# namedtuple ใซใ็ฌ่ชใฎใกใฝใใใๆใใใๅ ดๅใฏ
# namedtuple ใ่ฆชใฏใฉในใซใใใฏใฉในใๆฐใใซๅฎ็พฉใใใ
# ------------------------------------------------------------
class MyVal02(cols.namedtuple('MyVal02', ['name'])):
__slots__ = ()
@property
def upper_name(self):
return self.name.upper()
obj4 = MyVal02('hello world 2')
pr('obj4.name', obj4.name)
pr('obj4.upper_name', obj4.upper_name)
def go():
obj = Sample()
obj.exec()
if __name__ == '__main__':
go()
| mit | Python |
|
8302536cafa07a078cfb6629b5e9cc85e1798e1e | Add Appalachian Regional Commission. | divergentdave/inspectors-general,lukerosiak/inspectors-general | inspectors/arc.py | inspectors/arc.py | #!/usr/bin/env python
import datetime
import logging
import os
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from utils import utils, inspector
# http://www.arc.gov/oig
# Oldest report: 2003
# options:
# standard since/year options for a year range to fetch from.
#
# Notes for IG's web team:
#
AUDIT_REPORTS_URL = "http://www.arc.gov/about/OfficeofInspectorGeneralAuditandInspectionReports.asp"
SEMIANNUAL_REPORTS_URL = "http://www.arc.gov/about/OfficeofinspectorGeneralSemiannualReports.asp"
def run(options):
year_range = inspector.year_range(options)
# Pull the audit reports
for url in [AUDIT_REPORTS_URL, SEMIANNUAL_REPORTS_URL]:
doc = BeautifulSoup(utils.download(url))
results = doc.select("table p > a")
for result in results:
report = report_from(result, url, year_range)
if report:
inspector.save_report(report)
def report_from(result, landing_url, year_range):
report_url = urljoin(landing_url, result.get('href'))
report_filename = report_url.split("/")[-1]
report_id, _ = os.path.splitext(report_filename)
try:
title = result.parent.find("em").text
except AttributeError:
title = result.parent.contents[0]
estimated_date = False
try:
published_on_text = title.split("โ")[-1].strip()
published_on = datetime.datetime.strptime(published_on_text, '%B %d, %Y')
except ValueError:
# For reports where we can only find the year, set them to Nov 1st of that year
published_on_year = int(result.find_previous("strong").text.replace("Fiscal Year ", ""))
published_on = datetime.datetime(published_on_year, 11, 1)
estimated_date = True
if published_on.year not in year_range:
logging.debug("[%s] Skipping, not in requested range." % report_url)
return
report = {
'inspector': 'arc',
'inspector_url': 'http://www.arc.gov/oig',
'agency': 'arc',
'agency_name': 'Appalachian Regional Commission',
'report_id': report_id,
'url': report_url,
'title': title,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
if estimated_date:
report['estimated_date'] = estimated_date
return report
utils.run(run) if (__name__ == "__main__") else None
| cc0-1.0 | Python |
|
d834216bcc93eac7b324d95498d9580e3f769dfa | Add Government Printing Office. | divergentdave/inspectors-general,lukerosiak/inspectors-general | inspectors/gpo.py | inspectors/gpo.py | #!/usr/bin/env python
import datetime
import logging
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from utils import utils, inspector
# http://www.gpo.gov/oig/
# Oldest report: 2004
# options:
# standard since/year options for a year range to fetch from.
#
# Notes for IG's web team:
#
AUDIT_REPORTS_URL = "http://www.gpo.gov/oig/audits.htm"
SEMIANNUAL_REPORTS_URL = "http://www.gpo.gov/oig/semi-anual.htm"
HEADER_TITLES = [
'Report #',
'Date',
]
def run(options):
year_range = inspector.year_range(options)
# Pull the reports
for url in [AUDIT_REPORTS_URL, SEMIANNUAL_REPORTS_URL]:
doc = BeautifulSoup(utils.download(url))
results = doc.select("div.section1 div.ltext > table tr")
if not results:
results = doc.select("td.three-col-layout-middle div.ltext > table tr")
if not results:
raise AssertionError("No report links found for %s" % url)
for result in results:
if (not result.text.strip() or
result.find("th") or
result.find("strong") or
result.contents[1].text in HEADER_TITLES
):
# Skip header rows
continue
report = report_from(result, url, year_range)
if report:
inspector.save_report(report)
def report_from(result, landing_url, year_range):
title = result.select("td")[-1].text
if "contains sensitive information" in title:
unreleased = True
report_url = None
report_id = "-".join(title.split())[:50]
else:
unreleased = False
link = result.find("a")
report_id = link.text
report_url = urljoin(landing_url, link.get('href'))
estimated_date = False
try:
published_on = datetime.datetime.strptime(report_id.strip(), '%m.%d.%y')
except ValueError:
published_on_year_text = result.find_previous("th").text
published_on_year = int(published_on_year_text.replace("Fiscal Year ", ""))
published_on = datetime.datetime(published_on_year, 11, 1)
estimated_date = True
if published_on.year not in year_range:
logging.debug("[%s] Skipping, not in requested range." % report_url)
return
report = {
'inspector': 'gpo',
'inspector_url': 'http://www.gpo.gov/oig/',
'agency': 'gpo',
'agency_name': 'Government Printing Office',
'file_type': 'pdf',
'report_id': report_id,
'url': report_url,
'title': title,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
if estimated_date:
report['estimated_date'] = estimated_date
if unreleased:
report['unreleased'] = unreleased
report['landing_url'] = landing_url
return report
utils.run(run) if (__name__ == "__main__") else None
| cc0-1.0 | Python |
|
d3c9a6bdc1b8cfb56f9ad408f5257b9ac518b2ac | Add preprocessor | asi1024/ContestLibrary,asi1024/competitive-library,asi1024/competitive-library,asi1024/competitive-library | scripts/preprocess.py | scripts/preprocess.py | #!/usr/bin/env python
import argparse
import os
def preprocess(path):
includes = set()
res = []
def preprocess_line(path, line):
if line.strip().startswith('#'):
line = line.strip()
if line.startswith('#include') and len(line.split('"')) >= 3:
lx = line.split('"')
relpath = ''.join(lx[1:len(lx) - 1])
target_path = os.path.dirname(path) + '/' + relpath
if target_path.startswith('/'):
target_path = target_path[1:]
preprocess_path(os.path.normpath(target_path))
return '\n'
elif line.startswith('#pragma'):
if ''.join(line.split(' ')[1:]).strip() == 'once':
return ''
return line
def preprocess_path(path):
if path not in includes:
has_not_started = True
includes.add(path)
for line in open(path):
s = preprocess_line(path, line)
if has_not_started and s.strip() is not "":
prefix = '//===== {} =====\n\n'.format(os.path.basename(path))
res.append(prefix)
has_not_started = False
res.append(s.rstrip())
preprocess_path(path)
print('\n'.join(res))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('filepath', nargs=1, help='cpp file')
args = parser.parse_args()
filepath = args.filepath[0]
preprocess(filepath)
| mit | Python |
|
e285c097be60f9db5fae075f21b7450f403640d2 | add scaffold for an AvailabilityAssessment class | Moliholy/cvmfs,MicBrain/cvmfs,cvmfs/cvmfs,DrDaveD/cvmfs,cvmfs-testing/cvmfs,trshaffer/cvmfs,DrDaveD/cvmfs,cvmfs/cvmfs,Gangbiao/cvmfs,Gangbiao/cvmfs,cvmfs-testing/cvmfs,cvmfs/cvmfs,cvmfs/cvmfs,MicBrain/cvmfs,Moliholy/cvmfs,cvmfs-testing/cvmfs,Gangbiao/cvmfs,cvmfs-testing/cvmfs,DrDaveD/cvmfs,alhowaidi/cvmfsNDN,djw8605/cvmfs,Moliholy/cvmfs,reneme/cvmfs,djw8605/cvmfs,trshaffer/cvmfs,Gangbiao/cvmfs,djw8605/cvmfs,reneme/cvmfs,alhowaidi/cvmfsNDN,cvmfs/cvmfs,alhowaidi/cvmfsNDN,djw8605/cvmfs,reneme/cvmfs,DrDaveD/cvmfs,Moliholy/cvmfs,Gangbiao/cvmfs,cvmfs-testing/cvmfs,trshaffer/cvmfs,Moliholy/cvmfs,DrDaveD/cvmfs,MicBrain/cvmfs,trshaffer/cvmfs,alhowaidi/cvmfsNDN,alhowaidi/cvmfsNDN,DrDaveD/cvmfs,cvmfs/cvmfs,reneme/cvmfs,trshaffer/cvmfs,cvmfs/cvmfs,djw8605/cvmfs,reneme/cvmfs,MicBrain/cvmfs,MicBrain/cvmfs,DrDaveD/cvmfs | python/cvmfs/availability.py | python/cvmfs/availability.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created by Renรฉ Meusel
This file is part of the CernVM File System auxiliary tools.
"""
import cvmfs
class WrongRepositoryType(Exception):
def __init__(self, repo, expected_type):
assert repo.type != expected_type
self.repo = repo
self.expected_type = expected_type
def __str__(self):
return self.repo.fqrn + " is of type '" + self.repo.type + "' but '" + self.expected_type + "' was expected"
class AvailabilityAssessment:
def _check_repo_type(self, repo, expected_type):
if repo.has_repository_type() and repo.type != expected_type:
raise WrongRepositoryType(repo, expected_type)
return True;
def __init__(self, stratum0_repository, stratum1_repositories = []):
self._check_repo_type(stratum0_repository, 'stratum0')
for stratum1 in stratum1_repositories:
self._check_repo_type(stratum1, 'stratum1')
self.stratum0 = stratum0_repository
self.stratum1s = stratum1_repositories
def assess(self):
pass
| bsd-3-clause | Python |
|
0e9da5d0099b9c7b527250d6bf8051242e77103a | Add script for showing the results | ssh0/growing-string,ssh0/growing-string | triangular_lattice/distances_analyze.py | triangular_lattice/distances_analyze.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# written by Shotaro Fujimoto
# 2016-10-12
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
if __name__ == '__main__':
# result_data_path = "./results/data/distances/beta=0.00_161012_171430.npz"
# result_data_path = "./results/data/distances/beta=5.00_161012_171649.npz"
# result_data_path = "./results/data/distances/beta=10.00_161012_172119.npz"
# result_data_path = "./results/data/distances/beta=15.00_161012_172209.npz"
# result_data_path = "./results/data/distances/beta=20.00_161012_172338.npz"
data = np.load(result_data_path)
beta = data['beta']
num_of_strings = data['num_of_strings']
L = data['L']
frames = data['frames']
distance_list = data['distance_list']
path_length = data['path_length']
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
hist, xedges, yedges = np.histogram2d(distance_list, path_length, bins=100)
xpos, ypos = np.meshgrid(xedges[:-1] + (xedges[1] - xedges[0]) / 2.,
yedges[:-1] + (yedges[1] - yedges[0]) / 2.)
zpos = hist.T
ax.plot_wireframe(xpos, ypos, zpos, rstride=1)
ax.plot(xpos[0], xpos[0], lw=2)
ax.set_aspect('equal')
ax.set_xlim(xedges[0], xedges[-1])
ax.set_ylim(yedges[0], yedges[-1])
ax.set_xlabel('Distance')
ax.set_ylabel('Path length')
ax.set_title('Path length and distances between two points in the cluster'
+ r'($\beta = %2.2f$)' % beta)
plt.show()
| mit | Python |
|
29e170f9f92f8327c71a9dfc2b9fb9e18947db72 | create predictions on pre-trained models | Nickil21/Indeed-ML-codesprint-2017 | source/generate_predictions.py | source/generate_predictions.py | import numpy as np
import pandas as pd
from sklearn.externals import joblib
from data_preprocessing import join_strings
from model import mlb, count_vectorizer_test_x, tfidf_vectorizer_test_x, file_cnt, file_tfidf
count_vectorizer_model, tfidf_vectorizer_model = joblib.load(file_cnt), joblib.load(file_tfidf)
print("Both the trained models have been imported successfully!")
print()
print("Making predictions...")
pred1 = count_vectorizer_model.predict(count_vectorizer_test_x.toarray())
pred2 = tfidf_vectorizer_model.predict(tfidf_vectorizer_test_x.toarray())
# Combine predictions and map the labels if the values do not equal 0, else assign empty string
arr = np.where((pred1 + pred2) != 0, mlb.classes_, "")
# Load the array into a DataFrame constructor and join non-empty strings
predictions = pd.DataFrame(arr).apply(join_strings, axis=1).to_frame("tags")
# Submit predictions
print("Submitting predictions...")
predictions.to_csv("tags.tsv", index=False)
print("done")
| mit | Python |
|
c6358b282ea28dd113c9053dab0fe2fa66f4d59d | Allow metrics to start with a braces expression | zBMNForks/graphite-web,synedge/graphite-web,zBMNForks/graphite-web,cbowman0/graphite-web,synedge/graphite-web,obfuscurity/graphite-web,penpen/graphite-web,zuazo-forks/graphite-web,pu239ppy/graphite-web,Krylon360/vimeo-graphite-web,goir/graphite-web,slackhappy/graphite-web,cybem/graphite-web-iow,evernote/graphite-web,atnak/graphite-web,pu239ppy/graphite-web,bbc/graphite-web,piotr1212/graphite-web,evernote/graphite-web,mcoolive/graphite-web,lfckop/graphite-web,lyft/graphite-web,deniszh/graphite-web,krux/graphite-web,pcn/graphite-web,cbowman0/graphite-web,graphite-project/graphite-web,ceph/graphite-web,JeanFred/graphite-web,goir/graphite-web,lfckop/graphite-web,pu239ppy/graphite-web,AICIDNN/graphite-web,dbn/graphite-web,edwardmlyte/graphite-web,criteo-forks/graphite-web,slackhappy/graphite-web,markolson/graphite-web,Krylon360/evernote-graphite-web,mleinart/graphite-web,phreakocious/graphite-web,Aloomaio/graphite-web,cgvarela/graphite-web,cloudant/graphite-web,synedge/graphite-web,jssjr/graphite-web,EinsamHauer/graphite-web-iow,esnet/graphite-web,bpaquet/graphite-web,jssjr/graphite-web,bbc/graphite-web,Squarespace/graphite-web,Squarespace/graphite-web,g76r/graphite-web,gwaldo/graphite-web,criteo-forks/graphite-web,mleinart/graphite-web,mleinart/graphite-web,g76r/graphite-web,bmhatfield/graphite-web,krux/graphite-web,redice/graphite-web,zuazo-forks/graphite-web,section-io/graphite-web,Talkdesk/graphite-web,cloudant/graphite-web,ceph/graphite-web,pcn/graphite-web,JeanFred/graphite-web,graphite-project/graphite-web,bpaquet/graphite-web,piotr1212/graphite-web,bmhatfield/graphite-web,krux/graphite-web,edwardmlyte/graphite-web,mcoolive/graphite-web,cybem/graphite-web-iow,0x20h/graphite-web,evernote/graphite-web,drax68/graphite-web,blacked/graphite-web,bruce-lyft/graphite-web,zBMNForks/graphite-web,zBMNForks/graphite-web,bpaquet/graphite-web,kkdk5535/graphite-web,esnet/graphite-web,bpaquet/graphite-web,criteo-forks/graphite-web,graphite-server/graphite-web,Talkdesk/graphite-web,Invoca/graphite-web,axibase/graphite-web,DanCech/graphite-web,Krylon360/evernote-graphite-web,nkhuyu/graphite-web,nkhuyu/graphite-web,redice/graphite-web,dhtech/graphite-web,MjAbuz/graphite-web,kkdk5535/graphite-web,disqus/graphite-web,johnseekins/graphite-web,Squarespace/graphite-web,esnet/graphite-web,0x20h/graphite-web,atnak/graphite-web,disqus/graphite-web,lyft/graphite-web,pcn/graphite-web,nkhuyu/graphite-web,obfuscurity/graphite-web,obfuscurity/graphite-web,mcoolive/graphite-web,atnak/graphite-web,cbowman0/graphite-web,AICIDNN/graphite-web,jssjr/graphite-web,cloudant/graphite-web,cgvarela/graphite-web,AICIDNN/graphite-web,bruce-lyft/graphite-web,lyft/graphite-web,Krylon360/vimeo-graphite-web,disqus/graphite-web,MjAbuz/graphite-web,afilipovich/graphite-web,0x20h/graphite-web,dbn/graphite-web,DanCech/graphite-web,SEJeff/graphite-web,Talkdesk/graphite-web,drax68/graphite-web,disqus/graphite-web,ceph/graphite-web,atnak/graphite-web,Invoca/graphite-web,obfuscurity/graphite-web,SEJeff/graphite-web,zBMNForks/graphite-web,graphite-project/graphite-web,piotr1212/graphite-web,Krylon360/evernote-graphite-web,graphite-project/graphite-web,Krylon360/vimeo-graphite-web,g76r/graphite-web,MjAbuz/graphite-web,blacked/graphite-web,Invoca/graphite-web,johnseekins/graphite-web,Krylon360/evernote-graphite-web,evernote/graphite-web,Invoca/graphite-web,kkdk5535/graphite-web,piotr1212/graphite-web,dhtech/graphite-web,Skyscanner/graphite-web,section-io/graphite-web,JeanFred/graphite-web,Skyscanner/graphite-web,ZelunZhang/graphite-web,obfuscurity/graphite-web,JeanFred/graphite-web,SEJeff/graphite-web,bbc/graphite-web,axibase/graphite-web,krux/graphite-web,g76r/graphite-web,bmhatfield/graphite-web,section-io/graphite-web,ceph/graphite-web,Invoca/graphite-web,MjAbuz/graphite-web,graphite-server/graphite-web,gwaldo/graphite-web,SEJeff/graphite-web,brutasse/graphite-web,Aloomaio/graphite-web,section-io/graphite-web,ZelunZhang/graphite-web,edwardmlyte/graphite-web,disqus/graphite-web,slackhappy/graphite-web,phreakocious/graphite-web,graphite-server/graphite-web,jssjr/graphite-web,bbc/graphite-web,markolson/graphite-web,g76r/graphite-web,ZelunZhang/graphite-web,penpen/graphite-web,synedge/graphite-web,SEJeff/graphite-web,phreakocious/graphite-web,Talkdesk/graphite-web,Krylon360/vimeo-graphite-web,johnseekins/graphite-web,ceph/graphite-web,jssjr/graphite-web,pu239ppy/graphite-web,bpaquet/graphite-web,cosm0s/graphite-web,piotr1212/graphite-web,bruce-lyft/graphite-web,redice/graphite-web,Invoca/graphite-web,gwaldo/graphite-web,EinsamHauer/graphite-web-iow,pu239ppy/graphite-web,brutasse/graphite-web,Aloomaio/graphite-web,cybem/graphite-web-iow,synedge/graphite-web,esnet/graphite-web,lyft/graphite-web,axibase/graphite-web,cgvarela/graphite-web,cybem/graphite-web-iow,zuazo-forks/graphite-web,dhtech/graphite-web,Squarespace/graphite-web,zuazo-forks/graphite-web,bruce-lyft/graphite-web,0x20h/graphite-web,EinsamHauer/graphite-web-iow,slackhappy/graphite-web,graphite-server/graphite-web,Krylon360/evernote-graphite-web,penpen/graphite-web,atnak/graphite-web,section-io/graphite-web,afilipovich/graphite-web,DanCech/graphite-web,johnseekins/graphite-web,ZelunZhang/graphite-web,bruce-lyft/graphite-web,johnseekins/graphite-web,DanCech/graphite-web,AICIDNN/graphite-web,afilipovich/graphite-web,Squarespace/graphite-web,lyft/graphite-web,Krylon360/vimeo-graphite-web,mcoolive/graphite-web,AICIDNN/graphite-web,MjAbuz/graphite-web,bruce-lyft/graphite-web,cybem/graphite-web-iow,edwardmlyte/graphite-web,edwardmlyte/graphite-web,DanCech/graphite-web,deniszh/graphite-web,deniszh/graphite-web,drax68/graphite-web,graphite-server/graphite-web,Squarespace/graphite-web,bpaquet/graphite-web,brutasse/graphite-web,kkdk5535/graphite-web,0x20h/graphite-web,blacked/graphite-web,jssjr/graphite-web,zuazo-forks/graphite-web,EinsamHauer/graphite-web-iow,johnseekins/graphite-web,criteo-forks/graphite-web,penpen/graphite-web,cosm0s/graphite-web,kkdk5535/graphite-web,edwardmlyte/graphite-web,afilipovich/graphite-web,cgvarela/graphite-web,goir/graphite-web,dbn/graphite-web,DanCech/graphite-web,cbowman0/graphite-web,axibase/graphite-web,afilipovich/graphite-web,gwaldo/graphite-web,bmhatfield/graphite-web,EinsamHauer/graphite-web-iow,krux/graphite-web,graphite-project/graphite-web,markolson/graphite-web,cybem/graphite-web-iow,cosm0s/graphite-web,bmhatfield/graphite-web,mcoolive/graphite-web,Skyscanner/graphite-web,Krylon360/vimeo-graphite-web,esnet/graphite-web,JeanFred/graphite-web,blacked/graphite-web,g76r/graphite-web,brutasse/graphite-web,deniszh/graphite-web,mleinart/graphite-web,axibase/graphite-web,brutasse/graphite-web,phreakocious/graphite-web,cosm0s/graphite-web,Talkdesk/graphite-web,axibase/graphite-web,ZelunZhang/graphite-web,blacked/graphite-web,deniszh/graphite-web,lfckop/graphite-web,JeanFred/graphite-web,drax68/graphite-web,gwaldo/graphite-web,cloudant/graphite-web,deniszh/graphite-web,cosm0s/graphite-web,penpen/graphite-web,Talkdesk/graphite-web,redice/graphite-web,krux/graphite-web,goir/graphite-web,drax68/graphite-web,dbn/graphite-web,gwaldo/graphite-web,obfuscurity/graphite-web,Aloomaio/graphite-web,cbowman0/graphite-web,zBMNForks/graphite-web,graphite-server/graphite-web,cosm0s/graphite-web,criteo-forks/graphite-web,kkdk5535/graphite-web,goir/graphite-web,evernote/graphite-web,EinsamHauer/graphite-web-iow,dhtech/graphite-web,slackhappy/graphite-web,Skyscanner/graphite-web,piotr1212/graphite-web,lfckop/graphite-web,synedge/graphite-web,goir/graphite-web,dbn/graphite-web,nkhuyu/graphite-web,pcn/graphite-web,pcn/graphite-web,Skyscanner/graphite-web,blacked/graphite-web,dbn/graphite-web,section-io/graphite-web,nkhuyu/graphite-web,cbowman0/graphite-web,dhtech/graphite-web,drax68/graphite-web,AICIDNN/graphite-web,disqus/graphite-web,cgvarela/graphite-web,MjAbuz/graphite-web,Krylon360/evernote-graphite-web,graphite-project/graphite-web,phreakocious/graphite-web,mleinart/graphite-web,bmhatfield/graphite-web,redice/graphite-web,lfckop/graphite-web,redice/graphite-web,lyft/graphite-web,mcoolive/graphite-web,penpen/graphite-web,Aloomaio/graphite-web,lfckop/graphite-web,atnak/graphite-web,ZelunZhang/graphite-web,phreakocious/graphite-web,nkhuyu/graphite-web,markolson/graphite-web,cgvarela/graphite-web,Skyscanner/graphite-web,pu239ppy/graphite-web,criteo-forks/graphite-web,brutasse/graphite-web,bbc/graphite-web,markolson/graphite-web,Aloomaio/graphite-web,cloudant/graphite-web | webapp/graphite/render/grammar.py | webapp/graphite/render/grammar.py | from graphite.thirdparty.pyparsing import *
ParserElement.enablePackrat()
grammar = Forward()
expression = Forward()
# Literals
intNumber = Combine(
Optional('-') + Word(nums)
)('integer')
floatNumber = Combine(
Optional('-') + Word(nums) + Literal('.') + Word(nums)
)('float')
aString = quotedString('string')
# Use lookahead to match only numbers in a list (can't remember why this is necessary)
afterNumber = FollowedBy(",") ^ FollowedBy(")") ^ FollowedBy(LineEnd())
number = Group(
(floatNumber + afterNumber) |
(intNumber + afterNumber)
)('number')
boolean = Group(
CaselessKeyword("true") |
CaselessKeyword("false")
)('boolean')
# Function calls
arg = Group(
boolean |
number |
aString |
expression
)
args = delimitedList(arg)('args')
func = Word(alphas+'_', alphanums+'_')('func')
call = Group(
func + Literal('(').suppress() +
args + Literal(')').suppress()
)('call')
# Metric pattern (aka. pathExpression)
validMetricChars = alphanums + r'''!#$%&"'*+-.:;<=>?@[\]^_`|~'''
pathExpression = Combine(
Optional(Word(validMetricChars)) +
Combine(
ZeroOrMore(
Group(
Literal('{') +
Word(validMetricChars + ',') +
Literal('}') + Optional( Word(validMetricChars) )
)
)
)
)('pathExpression')
expression << Group(call | pathExpression)('expression')
grammar << expression
def enableDebug():
for name,obj in globals().items():
try:
obj.setName(name)
obj.setDebug(True)
except:
pass
| from graphite.thirdparty.pyparsing import *
ParserElement.enablePackrat()
grammar = Forward()
expression = Forward()
# Literals
intNumber = Combine(
Optional('-') + Word(nums)
)('integer')
floatNumber = Combine(
Optional('-') + Word(nums) + Literal('.') + Word(nums)
)('float')
aString = quotedString('string')
# Use lookahead to match only numbers in a list (can't remember why this is necessary)
afterNumber = FollowedBy(",") ^ FollowedBy(")") ^ FollowedBy(LineEnd())
number = Group(
(floatNumber + afterNumber) |
(intNumber + afterNumber)
)('number')
boolean = Group(
CaselessKeyword("true") |
CaselessKeyword("false")
)('boolean')
# Function calls
arg = Group(
boolean |
number |
aString |
expression
)
args = delimitedList(arg)('args')
func = Word(alphas+'_', alphanums+'_')('func')
call = Group(
func + Literal('(').suppress() +
args + Literal(')').suppress()
)('call')
# Metric pattern (aka. pathExpression)
validMetricChars = alphanums + r'''!#$%&"'*+-.:;<=>?@[\]^_`|~'''
pathExpression = Combine(
Word(validMetricChars) +
Combine(
ZeroOrMore(
Group(
Literal('{') +
Word(validMetricChars + ',') +
Literal('}') + Optional( Word(validMetricChars) )
)
)
)
)('pathExpression')
expression << Group(call | pathExpression)('expression')
grammar << expression
def enableDebug():
for name,obj in globals().items():
try:
obj.setName(name)
obj.setDebug(True)
except:
pass
| apache-2.0 | Python |
1fdd1f306d45f6aeee91c7f016f7c37286ee3b3b | clear signing | gpg/gpgme,gpg/gpgme,gpg/gpgme,gpg/gpgme,gpg/gpgme,gpg/gpgme,gpg/gpgme | lang/python/examples/howto/clear-sign-file.py | lang/python/examples/howto/clear-sign-file.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals
# Copyright (C) 2018 Ben McGinnes <ben@gnupg.org>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License and the GNU
# Lesser General Public Licensefor more details.
#
# You should have received a copy of the GNU General Public License and the GNU
# Lesser General Public along with this program; if not, see
# <http://www.gnu.org/licenses/>.
import gpg
import sys
"""
Clear-signs a file with a specified key. If entering both the key and the
filename on the command line, the key must be entered first.
"""
if len(sys.argv) > 3:
logrus = sys.argv[1]
filename = " ".join(sys.argv[2:])
elif len(sys.argv) == 3:
logrus = sys.argv[1]
filename = sys.argv[2]
elif len(sys.argv) == 2:
logrus = sys.argv[1]
filename = input("Enter the path and filename to sign: ")
else:
logrus = input("Enter the fingerprint or key ID to sign with: ")
filename = input("Enter the path and filename to sign: ")
with open(filename, "rb") as f:
text = f.read()
key = list(gpg.Context().keylist(pattern=logrus))
with gpg.Context(armor=True, signers=key) as c:
signed_data, result = c.sign(text, mode=gpg.constants.sig.mode.CLEAR)
with open("{0}.asc".format(filename), "wb") as f:
f.write(signed_data)
| lgpl-2.1 | Python |
|
c199892e07217f164ae694d510b206bfa771090b | remove unused import | sigma/vmw.vco | src/vmw/vco/components.py | src/vmw/vco/components.py | # Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from zope.interface import interface, declarations
from zope.interface.adapter import AdapterRegistry
# The following is taken almots as-is from twisted.python.components
_vcoRegistry = AdapterRegistry()
def _registered(registry, required, provided):
"""
Return the adapter factory for the given parameters in the given
registry, or None if there is not one.
"""
return registry.get(required).selfImplied.get(provided, {}).get('')
def registerAdapter(adapterFactory, origInterface, *interfaceClasses):
"""Register an adapter class.
An adapter class is expected to implement the given interface, by
adapting instances implementing 'origInterface'. An adapter class's
__init__ method should accept one parameter, an instance implementing
'origInterface'.
"""
assert interfaceClasses, "You need to pass an Interface"
# deal with class->interface adapters:
if not isinstance(origInterface, interface.InterfaceClass):
origInterface = declarations.implementedBy(origInterface)
for interfaceClass in interfaceClasses:
factory = _registered(_vcoRegistry, origInterface, interfaceClass)
if factory is not None:
raise ValueError("an adapter (%s) was already registered." % (factory, ))
for interfaceClass in interfaceClasses:
_vcoRegistry.register([origInterface], interfaceClass, '', adapterFactory)
# add global adapter lookup hook for our newly created registry
def _hook(iface, ob, lookup=_vcoRegistry.lookup1):
factory = lookup(declarations.providedBy(ob), iface)
if factory is None:
return None
else:
return factory(ob)
interface.adapter_hooks.append(_hook)
| # Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from zope.interface import interface, implements, declarations
from zope.interface.adapter import AdapterRegistry
# The following is taken almots as-is from twisted.python.components
_vcoRegistry = AdapterRegistry()
def _registered(registry, required, provided):
"""
Return the adapter factory for the given parameters in the given
registry, or None if there is not one.
"""
return registry.get(required).selfImplied.get(provided, {}).get('')
def registerAdapter(adapterFactory, origInterface, *interfaceClasses):
"""Register an adapter class.
An adapter class is expected to implement the given interface, by
adapting instances implementing 'origInterface'. An adapter class's
__init__ method should accept one parameter, an instance implementing
'origInterface'.
"""
assert interfaceClasses, "You need to pass an Interface"
# deal with class->interface adapters:
if not isinstance(origInterface, interface.InterfaceClass):
origInterface = declarations.implementedBy(origInterface)
for interfaceClass in interfaceClasses:
factory = _registered(_vcoRegistry, origInterface, interfaceClass)
if factory is not None:
raise ValueError("an adapter (%s) was already registered." % (factory, ))
for interfaceClass in interfaceClasses:
_vcoRegistry.register([origInterface], interfaceClass, '', adapterFactory)
# add global adapter lookup hook for our newly created registry
def _hook(iface, ob, lookup=_vcoRegistry.lookup1):
factory = lookup(declarations.providedBy(ob), iface)
if factory is None:
return None
else:
return factory(ob)
interface.adapter_hooks.append(_hook)
| mit | Python |
f76c06acf52094cd13cdf7087fa8d3914c2b992a | Add interactive module | lnls-fac/sirius | sirius/interactive.py | sirius/interactive.py |
"""Interactive sirius module
Use this module to define variables and functions to be globally available when
using
'from sirius.interactive import *'
"""
from pyaccel.interactive import *
import sirius.SI_V07 as si_model
import sirius.BO_V901 as bo_model
__all__ = [name for name in dir() if not name.startswith('_')]
print('Names defined in sirius.interactive: ' + ', '.join(__all__) + '.\n')
| mit | Python |
|
f1e6926f964877acc3bfe0d667a199861b431ed7 | add test_xadc | mithro/HDMI2USB-litex-firmware,cr1901/HDMI2USB-litex-firmware,mithro/HDMI2USB-litex-firmware,cr1901/HDMI2USB-litex-firmware,cr1901/HDMI2USB-litex-firmware,mithro/HDMI2USB-litex-firmware,cr1901/HDMI2USB-litex-firmware,mithro/HDMI2USB-litex-firmware | software/test_xadc.py | software/test_xadc.py | def main(wb):
wb.open()
regs = wb.regs
# # #
print("temperature: %fยฐC" %(regs.xadc_temperature.read()*503.975/4096 - 273.15))
print("vccint: %fV" %(regs.xadc_vccint.read()/4096*3))
print("vccaux: %fV" %(regs.xadc_vccaux.read()/4096*3))
print("vccbram: %fV" %(regs.xadc_vccbram.read()/4096*3))
# # #
wb.close()
| bsd-2-clause | Python |
|
c2dab85f24e648c66daae847f19b605271ed858b | Add more threader tests | ergl/pysellus,cgvarela/pysellus,angelsanz/pysellus,Pysellus/pysellus | spec/threader_spec.py | spec/threader_spec.py | import queue
from functools import partial
from doublex import Spy, Mock
from expects import expect, be
from doublex_expects import have_been_called
from pysellus import threader
with description('the threader module'):
with it('should create as many threads as the sum of len(values) of the supplied dict'):
a_stream = Mock()
another_stream = Mock()
foo = Spy()
a_function = Spy()
another_function = Spy()
streams_to_observers = {
a_stream: [a_function],
another_stream: [a_function, another_function]
}
threads = threader.build_threads(streams_to_observers, foo)
expected_length = sum(
len(fn_list) for fn_list in streams_to_observers.values()
)
expect(len(threads)).to(be(expected_length))
with it('should create a properly initialized thread'):
stream = Mock()
observer = Spy()
target = Spy().target_function
thread = threader.make_thread(target, stream, observer)
thread.start()
thread.join()
expect(target).to(have_been_called)
with it('should call the target function with the correct arguments'):
stream = Mock()
observer = Spy()
que = queue.Queue(maxsize=1)
# Return a list with the stream and the observer fn
target_function = lambda s, o: [s, o]
# We can't return from a function running in another thread
# so we put the value on a queue
target_wrapper = lambda q, s, o: q.put(target_function(s, o))
# We define a partial so that we don't have to pass the queue
# as a parameter to make_thread
target_partial = partial(target_wrapper, que)
thread = threader.make_thread(target_partial, stream, observer)
thread.start()
thread.join()
result = que.get()
# result is [stream, observer]
expect(result[0]).to(be(stream))
expect(result[1]).to(be(observer))
| from expects import expect, be
from doublex import Spy, Mock
from pysellus import threader
with description('the threader module'):
with it('should create as many threads as keys * values in the supplied dict'):
a_stream = Mock()
another_stream = Mock()
foo = Spy()
a_function = Spy()
another_function = Spy()
streams_to_observers = {
a_stream: [a_function],
another_stream: [a_function, another_function]
}
threads = threader.build_threads(streams_to_observers, foo)
expected_length = sum(
len(fn_list) for fn_list in streams_to_observers.values()
)
expect(len(threads)).to(be(expected_length))
| mit | Python |
e6a137026ff9b84814199517a452d354e121a476 | Create quiz_3.py | eliecer11/Uip-prog3 | laboratorios/quiz_3.py | laboratorios/quiz_3.py | #dado un intervalo de tiempo en segundos, calcular los segundos restantes
#corresponden para convertirse exactamente en minutos. Este programa debe
#funcionar para 5 oportunidades.
chance = 0
segundos_restantes = 0
while chance < 5:
segundos = int (input("Introduzca sus segundos:"))
chance +=1
if segundos / 60:
segundos_restantes =60-segundos%60
print (segundos_restantes)
| mit | Python |
|
90851f4fdb1eb69bb3d6d953974d9a399d60bd13 | add browser_render.py | EscapeLife/web_crawler | 5.ๅจๆๅ
ๅฎน/5.browser_render.py | 5.ๅจๆๅ
ๅฎน/5.browser_render.py | #!/usr/bin/env python
# coding:utf-8
# ๆธฒๆๆๆ็็ฑปๅฎ็ฐๆนๅผ
# ๅฎๆถๅจ็จไบ่ท่ธช็ญๅพ
ๆถ้ด๏ผๅนถๅจๆชๆญขๆถ้ดๅฐ่พพๆถๅๆถไบไปถๅพช็ฏใๅฆๅ๏ผๅฝๅบ็ฐ็ฝ็ป้ฎ้ขๆถ๏ผไบไปถๅพช็ฏๅฐฑไผๆ ไผๆญขๅฐ่ฟ่กไธๅป ใ
import re
import csv
import time
import lxml.html
try:
from PySide.QtGui import QApplication
from PySide.QtCore import QUrl, QEventLoop, QTimer
from PySide.QtWebKit import QWebView
except ImportError:
from PyQt4.QtGui import QApplication
from PyQt4.QtCore import QUrl, QEventLoop, QTimer
from PyQt4.QtWebKit import QWebView
class BrowserRender(QWebView):
def __init__(self, display=True):
self.app = QApplication([])
QWebView.__init__(self)
if display:
# show the browser
self.show()
def open(self, url, timeout=60):
"""Wait for download to complete and return result"""
loop = QEventLoop()
timer = QTimer()
timer.setSingleShot(True)
timer.timeout.connect(loop.quit)
self.loadFinished.connect(loop.quit)
self.load(QUrl(url))
timer.start(timeout * 1000)
loop.exec_()
if timer.isActive():
# downloaded successfully
timer.stop()
return self.html()
else:
# timed out
print 'Request timed out:', url
def html(self):
"""Shortcut to return the current HTML"""
return self.page().mainFrame().toHtml()
def find(self, pattern):
"""Find all elements that match the pattern"""
return self.page().mainFrame().findAllElements(pattern)
def attr(self, pattern, name, value):
"""Set attribute for matching elements"""
for e in self.find(pattern):
e.setAttribute(name, value)
def text(self, pattern, value):
"""Set attribute for matching elements"""
for e in self.find(pattern):
e.setPlainText(value)
def click(self, pattern):
"""Click matching elements"""
for e in self.find(pattern):
e.evaluateJavaScript("this.click()")
def wait_load(self, pattern, timeout=60):
"""Wait for this pattern to be found in webpage and return matches"""
deadline = time.time() + timeout
while time.time() < deadline:
self.app.processEvents()
matches = self.find(pattern)
if matches:
return matches
print 'Wait load timed out'
def main():
br = BrowserRender()
br.open('http://example.webscraping.com/search')
br.attr('#search_term', 'value', '.')
br.text('#page_size option:checked', '1000')
br.click('#search')
elements = br.wait_load('#results a')
writer = csv.writer(open('countries.csv', 'w'))
for country in [e.toPlainText().strip() for e in elements]:
writer.writerow([country])
if __name__ == '__main__':
main()
| mit | Python |
|
58ac46511964ca1dd3de25d2b6053eb785e3e281 | Add outlier detection util script. | Alexx-G/openface,nmabhi/Webface,Alexx-G/openface,nmabhi/Webface,xinfang/face-recognize,francisleunggie/openface,cmusatyalab/openface,nmabhi/Webface,Alexx-G/openface,francisleunggie/openface,nhzandi/openface,xinfang/face-recognize,Alexx-G/openface,nmabhi/Webface,xinfang/face-recognize,cmusatyalab/openface,francisleunggie/openface,cmusatyalab/openface,nhzandi/openface,nhzandi/openface | util/detect-outliers.py | util/detect-outliers.py | #!/usr/bin/env python2
#
# Detect outlier faces (not of the same person) in a directory
# of aligned images.
# Brandon Amos
# 2016/02/14
#
# Copyright 2015-2016 Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
start = time.time()
import argparse
import cv2
import itertools
import os
import glob
import numpy as np
np.set_printoptions(precision=2)
from sklearn.covariance import EllipticEnvelope
from sklearn.metrics.pairwise import euclidean_distances
import openface
fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, '..', 'models')
openfaceModelDir = os.path.join(modelDir, 'openface')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--networkModel', type=str, help="Path to Torch network model.",
default=os.path.join(openfaceModelDir, 'nn4.small2.v1.t7'))
parser.add_argument('--imgDim', type=int,
help="Default image dimension.", default=96)
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--threshold', type=int, default=0.9)
parser.add_argument('directory')
args = parser.parse_args()
net = openface.TorchNeuralNet(args.networkModel, args.imgDim, cuda=args.cuda)
reps = []
paths = sorted(list(glob.glob(os.path.join(args.directory, '*.png'))))
for imgPath in paths:
reps.append(net.forwardPath(imgPath))
mean = np.mean(reps, axis=0)
dists = euclidean_distances(reps, mean)
outliers = []
for path, dist in zip(paths, dists):
dist = dist.take(0)
if dist > args.threshold:
outliers.append((path, dist))
print("Found {} outlier(s) from {} images.".format(len(outliers), len(paths)))
for path, dist in outliers:
print(" + {} ({:0.2f})".format(path, dist))
if __name__ == '__main__':
main()
| apache-2.0 | Python |
|
dad13d26aaf58ea186891e138ac9a10153363c8a | add vicon data extraction | scauglog/brain_record_toolbox,scauglog/brain_record_toolbox,scauglog/brain_record_toolbox | script_r448_vicon_process.py | script_r448_vicon_process.py | import pickle
import signal_processing as sig_proc
dir_name = '../data/r448/r448_131022_rH/'
img_ext = '.png'
save_img = True
show = False
save_obj = True
sp = sig_proc.Signal_processing(save_img, show, img_ext)
filename='p0_3RW05'
file_events=sp.load_csv(dir_name+filename+'_EVENTS.csv')
file_analog=sp.load_csv(dir_name+filename+'_ANALOG.csv')
data=sp.vicon_extract(file_events)
data=sp.vicon_extract(file_analog,data)
data=sp.synch_vicon_with_TDT(data)
print('\n\n#################')
print('#### END ####') | mit | Python |
|
b46e7e31c5476c48e2a53d5a632354700d554174 | Add test_html_fetchers | Samuel-L/cli-ws,Samuel-L/cli-ws | tests/test_html_fetchers.py | tests/test_html_fetchers.py | import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import unittest
from unittest import mock
from web_scraper.core import html_fetchers
def mocked_requests_get(*args, **kwargs):
"""this method will be used by the mock to replace requests.get"""
class MockResponse:
def __init__(self, html, status_code):
self.html = html
self.status_code = status_code
def text(self):
return self.html
def status_code(self):
return self.status_code
if args[0] == 'http://example.com/':
return MockResponse(200, (200, 'html'))
return MockResponse(404, (404, 'Not Found'))
class TestHtmlFetchersMethods(unittest.TestCase):
@mock.patch('web_scraper.core.html_fetchers.requests.get', side_effect=mocked_requests_get)
def test_fetch_html_document_200(self, mock_get):
"""fetch_html_document should return 200 and html"""
response = html_fetchers.fetch_html_document('http://example.com/') # reponse = tuple, MockResponse object
status_code = response[0][0]
html = response[0][1]
self.assertEqual((status_code, html), (200, 'html'))
@mock.patch('web_scraper.core.html_fetchers.requests.get', side_effect=mocked_requests_get)
def test_fetch_html_document_404(self, mock_get):
"""fetch_html_document should return 404 and 'Not Found'"""
response = html_fetchers.fetch_html_document('http://example.com/nonexistantpath') # reponse = tuple, MockResponse object.
status_code = response[0][0]
html = response[0][1]
self.assertEqual((status_code, html), (404, 'Not Found'))
if __name__ == '__main__':
unittest.main() | mit | Python |
|
b4f8e8d38636a52d3d4b199fdc670ff93eca33f6 | Add prototype for filters module. | rladeira/mltils | mltils/filters.py | mltils/filters.py | # pylint: disable=missing-docstring, invalid-name, import-error
class VarianceFilter(object):
pass
class SimilarityFilter(object):
pass
class CorrelationFilter(object):
pass
| mit | Python |
|
b0f5c33461d08325581cc0ad272c7f2b39b8dc66 | Fix typo. | ahaberlie/MetPy,Unidata/MetPy,dopplershift/MetPy,ahaberlie/MetPy,ahill818/MetPy,dopplershift/MetPy,deeplycloudy/MetPy,ShawnMurd/MetPy,Unidata/MetPy,jrleeman/MetPy,jrleeman/MetPy | metpy/calc/__init__.py | metpy/calc/__init__.py | import basic
from basic import *
__all__ = []
__all__.extend(basic.__all__)
| import basic
from basic import *
__all__ == []
__all__.extend(basic.__all__)
| bsd-3-clause | Python |
167712a6640abca106bbcd50daf5dc22ba90083d | Fix log formatting | ngonzalvez/sentry,alexm92/sentry,BuildingLink/sentry,kevinastone/sentry,TedaLIEz/sentry,JTCunning/sentry,JTCunning/sentry,imankulov/sentry,pauloschilling/sentry,boneyao/sentry,ifduyue/sentry,wujuguang/sentry,camilonova/sentry,ifduyue/sentry,BuildingLink/sentry,felixbuenemann/sentry,1tush/sentry,ewdurbin/sentry,kevinlondon/sentry,1tush/sentry,mvaled/sentry,fotinakis/sentry,gg7/sentry,JamesMura/sentry,mitsuhiko/sentry,argonemyth/sentry,jean/sentry,llonchj/sentry,Kryz/sentry,ifduyue/sentry,jean/sentry,fotinakis/sentry,nicholasserra/sentry,BuildingLink/sentry,camilonova/sentry,Natim/sentry,pauloschilling/sentry,looker/sentry,Kryz/sentry,fuziontech/sentry,songyi199111/sentry,fotinakis/sentry,ifduyue/sentry,kevinlondon/sentry,mvaled/sentry,vperron/sentry,looker/sentry,vperron/sentry,Natim/sentry,mvaled/sentry,wong2/sentry,fuziontech/sentry,zenefits/sentry,TedaLIEz/sentry,gencer/sentry,alexm92/sentry,zenefits/sentry,ngonzalvez/sentry,fuziontech/sentry,daevaorn/sentry,TedaLIEz/sentry,hongliang5623/sentry,gencer/sentry,zenefits/sentry,Natim/sentry,mvaled/sentry,BayanGroup/sentry,mvaled/sentry,daevaorn/sentry,argonemyth/sentry,kevinlondon/sentry,gencer/sentry,imankulov/sentry,kevinastone/sentry,looker/sentry,jean/sentry,songyi199111/sentry,drcapulet/sentry,camilonova/sentry,ngonzalvez/sentry,korealerts1/sentry,1tush/sentry,JackDanger/sentry,JamesMura/sentry,hongliang5623/sentry,korealerts1/sentry,daevaorn/sentry,JackDanger/sentry,drcapulet/sentry,beeftornado/sentry,JamesMura/sentry,mvaled/sentry,gencer/sentry,songyi199111/sentry,llonchj/sentry,drcapulet/sentry,argonemyth/sentry,Kryz/sentry,JamesMura/sentry,JackDanger/sentry,alexm92/sentry,hongliang5623/sentry,wujuguang/sentry,BayanGroup/sentry,wujuguang/sentry,imankulov/sentry,boneyao/sentry,zenefits/sentry,pauloschilling/sentry,ewdurbin/sentry,daevaorn/sentry,felixbuenemann/sentry,jean/sentry,fotinakis/sentry,vperron/sentry,llonchj/sentry,gg7/sentry,looker/sentry,wong2/sentry,kevinastone/sentry,gencer/sentry,beeftornado/sentry,beeftornado/sentry,nicholasserra/sentry,ifduyue/sentry,ewdurbin/sentry,BuildingLink/sentry,BayanGroup/sentry,JamesMura/sentry,gg7/sentry,jokey2k/sentry,looker/sentry,jean/sentry,JTCunning/sentry,mitsuhiko/sentry,wong2/sentry,BuildingLink/sentry,jokey2k/sentry,jokey2k/sentry,felixbuenemann/sentry,zenefits/sentry,korealerts1/sentry,nicholasserra/sentry,boneyao/sentry | src/sentry/tasks/email.py | src/sentry/tasks/email.py | """
sentry.tasks.email
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import logging
from django.core.mail import get_connection
from sentry.tasks.base import instrumented_task
logger = logging.getLogger(__name__)
def _get_user_from_email(group, email):
from sentry.models import Project, User
# TODO(dcramer): we should encode the userid in emails so we can avoid this
for user in User.objects.filter(email__iexact=email):
# Make sure that the user actually has access to this project
if group.project not in Project.objects.get_for_user(
team=group.team, user=user):
logger.warning('User %r does not have access to group %r', user, group)
continue
return user
@instrumented_task(
name='sentry.tasks.email.process_inbound_email',
queue='email')
def process_inbound_email(mailfrom, group_id, payload):
"""
"""
from sentry.models import Event, Group
from sentry.web.forms import NewNoteForm
try:
group = Group.objects.select_related('project', 'team').get(pk=group_id)
except Group.DoesNotExist:
logger.warning('Group does not exist: %d', group_id)
return
user = _get_user_from_email(group, mailfrom)
if user is None:
logger.warning('Inbound email from unknown address: %s', mailfrom)
return
event = group.get_latest_event() or Event()
Event.objects.bind_nodes([event], 'data')
event.group = group
event.project = group.project
form = NewNoteForm({'text': payload})
if form.is_valid():
form.save(event, user)
@instrumented_task(
name='sentry.tasks.email.send_email',
queue='email')
def send_email(message):
connection = get_connection()
connection.send_messages([message])
| """
sentry.tasks.email
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import logging
from django.core.mail import get_connection
from sentry.tasks.base import instrumented_task
logger = logging.getLogger(__name__)
def _get_user_from_email(group, email):
from sentry.models import Project, User
# TODO(dcramer): we should encode the userid in emails so we can avoid this
for user in User.objects.filter(email__iexact=email):
# Make sure that the user actually has access to this project
if group.project not in Project.objects.get_for_user(
team=group.team, user=user):
logger.warning('User %r does not have access to group %r', (user, group))
continue
return user
@instrumented_task(
name='sentry.tasks.email.process_inbound_email',
queue='email')
def process_inbound_email(mailfrom, group_id, payload):
"""
"""
from sentry.models import Event, Group
from sentry.web.forms import NewNoteForm
try:
group = Group.objects.select_related('project', 'team').get(pk=group_id)
except Group.DoesNotExist:
logger.warning('Group does not exist: %d', group_id)
return
user = _get_user_from_email(group, mailfrom)
if user is None:
logger.warning('Inbound email from unknown address: %s', mailfrom)
return
event = group.get_latest_event() or Event()
Event.objects.bind_nodes([event], 'data')
event.group = group
event.project = group.project
form = NewNoteForm({'text': payload})
if form.is_valid():
form.save(event, user)
@instrumented_task(
name='sentry.tasks.email.send_email',
queue='email')
def send_email(message):
connection = get_connection()
connection.send_messages([message])
| bsd-3-clause | Python |
5e1c58db69adad25307d23c240b905eaf68e1671 | Add fade animation | bitoffdev/perkins-blues | src/fade_animation.py | src/fade_animation.py | import animation, colorsys
def colorunpack(color):
color = int(color)
return ((color >> 16) / 255,
((color >> 8) & 255) / 0xff,
(color & 0xff) / 0xff)
def colorpack(color):
return sum(int(color[i] * 0xff) << (16 - 8*i) for i in range(3))
class FadeAnimation(animation.Animation):
"""
animation fades relevant section of light strip solid between two colors
for the duration of the animation
"""
def __init__(self, start_time, stop_time, start_pos, stop_pos, start_color, stop_color):
"""
:param start_time: seconds since the epoch to start animation
:param stop_time: seconds since the epoch to stop animation
:param start_pos: number from 0 to 1 indicating start on strip
:param stop_pos: number from 0 to 1 indicating stop on strip
:param start_color: initial 24-bit integer RGB color
:param stop_color: final 24-bit integer RGB color
"""
self.set_start_time(start_time)
self.set_stop_time(stop_time)
self.set_start_pos(start_pos)
self.set_stop_pos(stop_pos)
self.__start_hsv = colorsys.rgb_to_hsv(*colorunpack(start_color))
self.__stop_hsv = colorsys.rgb_to_hsv(*colorunpack(stop_color))
def get_color(self, time, pos):
"""
:param time: current time as seconds since the epoch
:param pos: position from 0 to 1 to get color for
:return: 24-bit integer RGB color
"""
lerp = (time - self.get_start_time()) \
/ (self.get_stop_time() - self.get_start_time())
lerp = max(0, min(1, lerp))
curr = (self.__start_hsv[0] + (self.__stop_hsv[0]-self.__start_hsv[0])*lerp,
self.__start_hsv[1] + (self.__stop_hsv[1]-self.__start_hsv[1])*lerp,
self.__start_hsv[2] + (self.__stop_hsv[2]-self.__start_hsv[2])*lerp)
return colorpack(colorsys.hsv_to_rgb(*curr))
| mit | Python |
|
f537abe2ff1826a9decd9dace5597cbc4f7f318b | Add 1.6 | Dzionek95/Algorithms,Dzionek95/Algorithms,Dzionek95/Algorithms | 1_arrays_hashtables/string_compression.py | 1_arrays_hashtables/string_compression.py | def compress(string):
count_array = []
element_count = 1
for index, character in enumerate(string[1:]):
print(character, string[index])
if string[index] == character:
element_count = element_count + 1
else:
count_array.append(element_count)
element_count = 1
count_array.append(element_count)
# if len(count_array) == len(string):
# return string
compressed_string = ''
string_position = 0
print(count_array)
for numbers in count_array:
if(numbers != 1):
compressed_string += str(numbers)
compressed_string += string[string_position]
string_position += numbers
return compressed_string
if __name__ == '__main__':
print(compress('aafbbcdaaaaa'))
| mit | Python |
|
3e15f6d64ccbb1f98ff64323a25304db662a45ba | Add nice_number function to format decimals to english | linuxipho/mycroft-core,aatchison/mycroft-core,Dark5ide/mycroft-core,linuxipho/mycroft-core,aatchison/mycroft-core,Dark5ide/mycroft-core,forslund/mycroft-core,MycroftAI/mycroft-core,forslund/mycroft-core,MycroftAI/mycroft-core | mycroft/util/format.py | mycroft/util/format.py |
# -*- coding: iso-8859-15 -*-
# Copyright 2017 Mycroft AI, Inc.
#
# This file is part of Mycroft Core.
#
# Mycroft Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycroft Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
import math
FRACTION_STRING_EN = {
2: 'half',
3: 'third',
4: 'forth',
5: 'fifth',
6: 'sixth',
7: 'seventh',
8: 'eigth',
9: 'ninth',
10: 'tenth',
11: 'eleventh',
12: 'twelveth',
13: 'thirteenth',
14: 'fourteenth',
15: 'fifteenth',
16: 'sixteenth',
17: 'seventeenth',
18: 'eighteenth',
19: 'nineteenth',
20: 'twentyith'
}
def nice_number(number, lang="en-us", speech=True, denominators=None):
"""Format a float to human readable functions
This function formats a float to human understandable functions. Like
4.5 becomes 4 and a half for speech and 4 1/2 for text
Args:
number (str): the float to format
lang (str): the code for the language text is in
speech (bool): to return speech representation or text representation
denominators (iter of ints): denominators to use, default [1 .. 20]
Returns:
(str): The formatted string.
"""
result = convert_number(number, denominators)
if not result:
return str(round(number, 3))
if not speech:
if num == 0:
return str(whole)
else:
return '{} {}/{}'.format(whole, num, den)
lang_lower = str(lang).lower()
if lang_lower.startswith("en"):
return nice_number_en(result)
# TODO: Normalization for other languages
return str(number)
def nice_number_en(result):
""" English conversion for nice_number """
whole, num, den = result
if num == 0:
return str(whole)
den_str = FRACTION_STRING_EN[den]
if whole == 0:
return_string = '{} {}'.format(num, den_str)
else:
return_string = '{} and {} {}'.format(whole, num, den_str)
if num > 1:
return_string += 's'
return return_string
def convert_number(number, denominators):
""" Convert floats to mixed fractions """
int_number = int(number)
if int_number == number:
return int_number, 0, 1
frac_number = abs(number - int_number)
if not denominators:
denominators = range(1, 21)
for denominator in denominators:
numerator = abs(frac_number) * denominator
if (abs(numerator - round(numerator)) < 0.01):
break
else:
return None
return int_number, int(round(numerator)), denominator
| apache-2.0 | Python |
|
296efcc28e19fc76371496881a546f1ca52dc622 | add nagios check for iembot availability | akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem | nagios/check_iembot.py | nagios/check_iembot.py | """Ensure iembot is up."""
import sys
import requests
def main():
"""Go Main Go."""
req = requests.get('http://iembot:9004/room/kdmx.xml')
if req.status_code == 200:
print("OK - len(kdmx.xml) is %s" % (len(req.content), ))
return 0
print("CRITICAL - /room/kdmx.xml returned code %s" % (req.status_code, ))
return 2
if __name__ == '__main__':
sys.exit(main())
| mit | Python |
|
1d0c0741f1605f3786a752288161c679ab271ea2 | Add a utility file for aggregating decorators | bdyetton/prettychart,abought/osf.io,chrisseto/osf.io,sbt9uc/osf.io,DanielSBrown/osf.io,laurenrevere/osf.io,fabianvf/osf.io,MerlinZhang/osf.io,rdhyee/osf.io,alexschiller/osf.io,haoyuchen1992/osf.io,asanfilippo7/osf.io,billyhunt/osf.io,monikagrabowska/osf.io,dplorimer/osf,jnayak1/osf.io,jolene-esposito/osf.io,ckc6cz/osf.io,caseyrollins/osf.io,fabianvf/osf.io,mluo613/osf.io,saradbowman/osf.io,wearpants/osf.io,aaxelb/osf.io,GaryKriebel/osf.io,jnayak1/osf.io,pattisdr/osf.io,ckc6cz/osf.io,Nesiehr/osf.io,monikagrabowska/osf.io,caneruguz/osf.io,doublebits/osf.io,TomHeatwole/osf.io,laurenrevere/osf.io,asanfilippo7/osf.io,samchrisinger/osf.io,RomanZWang/osf.io,binoculars/osf.io,zamattiac/osf.io,SSJohns/osf.io,ticklemepierce/osf.io,zkraime/osf.io,jeffreyliu3230/osf.io,revanthkolli/osf.io,sloria/osf.io,jnayak1/osf.io,zachjanicki/osf.io,brandonPurvis/osf.io,KAsante95/osf.io,mfraezz/osf.io,wearpants/osf.io,emetsger/osf.io,abought/osf.io,fabianvf/osf.io,HalcyonChimera/osf.io,kch8qx/osf.io,haoyuchen1992/osf.io,asanfilippo7/osf.io,monikagrabowska/osf.io,ticklemepierce/osf.io,baylee-d/osf.io,caseyrollins/osf.io,alexschiller/osf.io,cwisecarver/osf.io,adlius/osf.io,emetsger/osf.io,fabianvf/osf.io,SSJohns/osf.io,jmcarp/osf.io,kch8qx/osf.io,chrisseto/osf.io,emetsger/osf.io,samanehsan/osf.io,felliott/osf.io,leb2dg/osf.io,mattclark/osf.io,TomBaxter/osf.io,mfraezz/osf.io,amyshi188/osf.io,zamattiac/osf.io,cslzchen/osf.io,zamattiac/osf.io,aaxelb/osf.io,barbour-em/osf.io,KAsante95/osf.io,brianjgeiger/osf.io,MerlinZhang/osf.io,abought/osf.io,brandonPurvis/osf.io,chrisseto/osf.io,lyndsysimon/osf.io,cldershem/osf.io,barbour-em/osf.io,baylee-d/osf.io,emetsger/osf.io,felliott/osf.io,dplorimer/osf,GageGaskins/osf.io,kwierman/osf.io,Ghalko/osf.io,caneruguz/osf.io,danielneis/osf.io,chennan47/osf.io,TomHeatwole/osf.io,cwisecarver/osf.io,haoyuchen1992/osf.io,caseyrollins/osf.io,zkraime/osf.io,mluke93/osf.io,arpitar/osf.io,erinspace/osf.io,reinaH/osf.io,zamattiac/osf.io,zkraime/osf.io,mluo613/osf.io,ZobairAlijan/osf.io,bdyetton/prettychart,mluo613/osf.io,CenterForOpenScience/osf.io,icereval/osf.io,caneruguz/osf.io,HalcyonChimera/osf.io,ckc6cz/osf.io,acshi/osf.io,acshi/osf.io,jinluyuan/osf.io,samanehsan/osf.io,cwisecarver/osf.io,GageGaskins/osf.io,jinluyuan/osf.io,rdhyee/osf.io,jnayak1/osf.io,cslzchen/osf.io,crcresearch/osf.io,monikagrabowska/osf.io,njantrania/osf.io,GageGaskins/osf.io,brandonPurvis/osf.io,caseyrygt/osf.io,aaxelb/osf.io,hmoco/osf.io,DanielSBrown/osf.io,cslzchen/osf.io,pattisdr/osf.io,hmoco/osf.io,cosenal/osf.io,acshi/osf.io,HarryRybacki/osf.io,ZobairAlijan/osf.io,reinaH/osf.io,danielneis/osf.io,jeffreyliu3230/osf.io,chennan47/osf.io,caseyrygt/osf.io,TomHeatwole/osf.io,haoyuchen1992/osf.io,mfraezz/osf.io,chennan47/osf.io,caseyrygt/osf.io,njantrania/osf.io,Johnetordoff/osf.io,chrisseto/osf.io,petermalcolm/osf.io,kwierman/osf.io,samchrisinger/osf.io,jinluyuan/osf.io,alexschiller/osf.io,alexschiller/osf.io,acshi/osf.io,zachjanicki/osf.io,Nesiehr/osf.io,revanthkolli/osf.io,lyndsysimon/osf.io,doublebits/osf.io,alexschiller/osf.io,MerlinZhang/osf.io,bdyetton/prettychart,sbt9uc/osf.io,rdhyee/osf.io,revanthkolli/osf.io,TomBaxter/osf.io,kch8qx/osf.io,brandonPurvis/osf.io,GageGaskins/osf.io,acshi/osf.io,billyhunt/osf.io,billyhunt/osf.io,amyshi188/osf.io,DanielSBrown/osf.io,mluke93/osf.io,binoculars/osf.io,RomanZWang/osf.io,barbour-em/osf.io,Ghalko/osf.io,jolene-esposito/osf.io,ZobairAlijan/osf.io,reinaH/osf.io,felliott/osf.io,SSJohns/osf.io,brianjgeiger/osf.io,brianjgeiger/osf.io,cslzchen/osf.io,Ghalko/osf.io,wearpants/osf.io,leb2dg/osf.io,hmoco/osf.io,CenterForOpenScience/osf.io,binoculars/osf.io,ckc6cz/osf.io,erinspace/osf.io,CenterForOpenScience/osf.io,baylee-d/osf.io,ticklemepierce/osf.io,doublebits/osf.io,RomanZWang/osf.io,jeffreyliu3230/osf.io,lyndsysimon/osf.io,adlius/osf.io,caseyrygt/osf.io,cosenal/osf.io,amyshi188/osf.io,HalcyonChimera/osf.io,doublebits/osf.io,sloria/osf.io,jmcarp/osf.io,leb2dg/osf.io,petermalcolm/osf.io,caneruguz/osf.io,kwierman/osf.io,mluke93/osf.io,GageGaskins/osf.io,petermalcolm/osf.io,asanfilippo7/osf.io,CenterForOpenScience/osf.io,Nesiehr/osf.io,KAsante95/osf.io,dplorimer/osf,sbt9uc/osf.io,bdyetton/prettychart,Johnetordoff/osf.io,lyndsysimon/osf.io,TomBaxter/osf.io,barbour-em/osf.io,hmoco/osf.io,brianjgeiger/osf.io,aaxelb/osf.io,TomHeatwole/osf.io,wearpants/osf.io,Johnetordoff/osf.io,sloria/osf.io,HarryRybacki/osf.io,mattclark/osf.io,mluke93/osf.io,arpitar/osf.io,jeffreyliu3230/osf.io,MerlinZhang/osf.io,mfraezz/osf.io,jolene-esposito/osf.io,revanthkolli/osf.io,felliott/osf.io,HarryRybacki/osf.io,Johnetordoff/osf.io,Ghalko/osf.io,jmcarp/osf.io,ticklemepierce/osf.io,samchrisinger/osf.io,HalcyonChimera/osf.io,KAsante95/osf.io,zachjanicki/osf.io,billyhunt/osf.io,adlius/osf.io,GaryKriebel/osf.io,jmcarp/osf.io,samanehsan/osf.io,saradbowman/osf.io,samchrisinger/osf.io,sbt9uc/osf.io,GaryKriebel/osf.io,icereval/osf.io,doublebits/osf.io,SSJohns/osf.io,amyshi188/osf.io,cldershem/osf.io,crcresearch/osf.io,jolene-esposito/osf.io,crcresearch/osf.io,danielneis/osf.io,ZobairAlijan/osf.io,cwisecarver/osf.io,mattclark/osf.io,njantrania/osf.io,pattisdr/osf.io,KAsante95/osf.io,kwierman/osf.io,GaryKriebel/osf.io,erinspace/osf.io,RomanZWang/osf.io,RomanZWang/osf.io,monikagrabowska/osf.io,brandonPurvis/osf.io,cosenal/osf.io,petermalcolm/osf.io,danielneis/osf.io,dplorimer/osf,HarryRybacki/osf.io,mluo613/osf.io,mluo613/osf.io,reinaH/osf.io,samanehsan/osf.io,arpitar/osf.io,njantrania/osf.io,kch8qx/osf.io,cldershem/osf.io,DanielSBrown/osf.io,cldershem/osf.io,rdhyee/osf.io,zachjanicki/osf.io,cosenal/osf.io,kch8qx/osf.io,arpitar/osf.io,leb2dg/osf.io,laurenrevere/osf.io,jinluyuan/osf.io,abought/osf.io,billyhunt/osf.io,zkraime/osf.io,adlius/osf.io,Nesiehr/osf.io,icereval/osf.io | website/addons/osfstorage/decorators.py | website/addons/osfstorage/decorators.py | import functools
from webargs import Arg
from webargs import core
from framework.auth.decorators import must_be_signed
from website.models import User
from framework.exceptions import HTTPError
from website.addons.osfstorage import utils
from website.project.decorators import (
must_not_be_registration, must_have_addon,
)
class JSONParser(core.Parser):
def __init__(self, data):
self._data = data
def parse(self, args):
return super(JSONParser, self).parse(args, None, ('json',))
def parse_json(self, _, name, arg):
if self._data:
return core.get_value(self._data, name, arg.multiple)
else:
return core.Missing
def path_validator(path):
return (
path.startswith('/') and
len(path.strip('/').split('/')) < 3
)
file_opt_args = {
'source': Arg({
'path': Arg(str, required=True, validate=path_validator),
'cookie': Arg(None, required=True, use=User.from_cookie, validate=lambda x: x is not None)
}),
'destination': Arg({
'path': Arg(str, required=True, validate=path_validator),
'cookie': Arg(None, required=True, use=User.from_cookie, validate=lambda x: x is not None)
})
}
def waterbutler_opt_hook(func):
@must_be_signed
@utils.handle_odm_errors
@must_not_be_registration
@must_have_addon('osfstorage', 'node')
@functools.wraps(func)
def wrapped(payload, *args, **kwargs):
kwargs.update(JSONParser(payload).parse(file_opt_args))
return func(*args, **kwargs)
return wrapped
| apache-2.0 | Python |
|
bb8e7ee023d678e68d1da3018bf6d1d3d36d55bd | Create new package (#6588) | matthiasdiener/spack,LLNL/spack,mfherbst/spack,iulian787/spack,tmerrick1/spack,krafczyk/spack,tmerrick1/spack,krafczyk/spack,tmerrick1/spack,iulian787/spack,EmreAtes/spack,iulian787/spack,EmreAtes/spack,EmreAtes/spack,EmreAtes/spack,iulian787/spack,matthiasdiener/spack,mfherbst/spack,LLNL/spack,tmerrick1/spack,iulian787/spack,mfherbst/spack,matthiasdiener/spack,krafczyk/spack,LLNL/spack,krafczyk/spack,LLNL/spack,tmerrick1/spack,EmreAtes/spack,krafczyk/spack,mfherbst/spack,matthiasdiener/spack,mfherbst/spack,matthiasdiener/spack,LLNL/spack | var/spack/repos/builtin/packages/perl-statistics-descriptive/package.py | var/spack/repos/builtin/packages/perl-statistics-descriptive/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PerlStatisticsDescriptive(PerlPackage):
"""Module of basic descriptive statistical functions."""
homepage = "http://search.cpan.org/~shlomif/Statistics-Descriptive-3.0612/lib/Statistics/Descriptive.pm"
url = "http://search.cpan.org/CPAN/authors/id/S/SH/SHLOMIF/Statistics-Descriptive-3.0612.tar.gz"
version('3.0612', 'e38cfbc1e3962d099b62a14a57a175f1')
| lgpl-2.1 | Python |
|
81bcfe4a31d8e9ea92497288c3d264755949d809 | check for some statistics on the dataset. | sanja7s/SR_Twitter | stats_about_tweet_data.py | stats_about_tweet_data.py | from collections import defaultdict
import codecs
import matplotlib.pyplot as plt
import pylab as P
import numpy as np
F_IN = "usrs_with_more_than_20_tweets.dat"
#F_OUT = "tweets_with_usrs_with_more_than_20_tweets.dat"
#f_out = "usrs_with_more_than_20_tweets.dat"
USR_TWEETS = defaultdict(int)
def plot_hist(data):
n, bins, patches = P.hist(data, bins=np.logspace(0.1, 3.5), histtype='step', log=True, label="# of tweets per user")
P.setp(patches, 'facecolor', 'g', 'alpha', 0.75)
#y = P.normpdf( bins, mu, sigma)
#l = P.plot(bins, y, 'k--', linewidth=1.5)
#
# create a histogram by providing the bin edges (unequally spaced)
#
#P.figure()
#
# now we create a cumulative histogram of the data
#
#P.grid(True)
#P.ylim(0, 1.05)
#P.legend()
P.gca().set_xscale("log")
P.show()
def filter_users(thrshld=20):
filtered_lst = []
for usr, tweets in USR_TWEETS.iteritems():
if USR_TWEETS[usr] > thrshld:
filtered_lst.append(usr)
return filtered_lst
'''
# DONE once is enough
def filter_dataset(thrshld=20):
user_tweets = tweets_per_user(F_IN)
filtered_lst = filter_users(user_tweets)
cnt_selected_tweets = 0
output_file = codecs.open(F_OUT, 'w', encoding='utf8')
with codecs.open(F_IN,'r', encoding='utf8') as input_file:
for line in input_file:
line_splt = line.split()
usr = line_splt[0]
if usr in filtered_lst:
cnt_selected_tweets += 1
output_file.write(line)
output_file.close()
input_file.close()
print "Filtered dataset for users with more than: ", thrshld, " tweets."
print "New number of tweets: ", cnt_selected_tweets
print "New number of users: ", len(filtered_lst)
'''
def tweets_per_user():
cnt_all_tweets = 0
global USR_TWEETS
with codecs.open(F_IN,'r', encoding='utf8') as input_file:
# the code loops through the input, collects tweets for each user into a dict
for line in input_file:
cnt_all_tweets += 1
line = line.split()
user = line[0]
USR_TWEETS[user] += 1
print "Read ENG tweets: ", cnt_all_tweets, " from: ", len(USR_TWEETS.keys()), " distinct users."
max_tweets = max(USR_TWEETS.values())
print "MAX tweets ", max_tweets, " has/ve the user/s ", \
[usr for usr, tweets in USR_TWEETS.iteritems() if USR_TWEETS[usr] == max_tweets]
input_file.close()
def plot_hist_usr_tweets():
usr_tweets = tweets_per_user()
plot_hist(usr_tweets.values())
def filter_dataset_double_usr_filter(thrshld=20):
filtered_lst = filter_users(USR_TWEETS)
cnt_selected_tweets = 0
output_file = codecs.open(F_OUT, 'w', encoding='utf8')
with codecs.open(F_IN,'r', encoding='utf8') as input_file:
for line in input_file:
line_splt = line.split()
usr = line_splt[0]
usr2 = line_splt[1]
if usr and usr2 in filtered_lst:
cnt_selected_tweets += 1
output_file.write(line)
output_file.close()
input_file.close()
print "Filtered dataset for users with more than: ", thrshld, " tweets."
print "New number of tweets: ", cnt_selected_tweets
print "New number of users: ", len(filtered_lst)
#plot_hist_usr_tweets()
#filter_dataset()
#tweets_per_user()
#print len(filter_users())
filter_dataset_double_usr_filter() | mit | Python |
|
d7c0525a1b62bbe6b8425c0bb2dda0e1fad680b8 | Create enforce.py | devzero-xyz/Andromeda-Plugins | enforce.py | enforce.py | """This plugins allows a user to enforce modes set on channels"""
"""e.g. enforcing +o on nick"""
"""Requires admin"""
from utils import add_cmd, add_handler
import utils
from admin import deop
name = "enforce"
cmds = ["enforce"]
def main(irc):
if name not in irc.plugins.keys():
irc.plugins[name] = {}
@add_cmd
def enforce(irc, event, args):
"""[<channel>] <modes> <nick>
Enforces a mode to <nick> in the current channel if no channel argument is given.
"""
message = event.arguments[0].split()
try:
if irc.is_channel(message[1]):
unrecognised_modes = []
unset_modes = []
set_modes = []
mode_diff = "+"
for pos, mode in enumerate(message[2]):
if mode in utils.argmodes["set"] or mode in utils.argmodes["unset"] or mode in ["+", "-"]:
pass
else: # What on earth is that mode?
unrecognised_modes.append(mode)
for mode in message[2]:
if mode == "+":
mode_diff = "+"
elif mode == "-":
mode_diff = "-"
else:
if mode_diff == "+":
if mode in unset_modes:
irc.reply(event, "This mode {} is already set and could not be unset for {}".format(mode, message[3]))
else:
set_modes.append(mode)
elif mode_diff == "-": # else but who cares?
if mode in set_modes:
irc.reply(event, "This mode {} is already set and could not be set for {}".format(mode, message[3]))
else:
unset_modes.append(mode)
if unrecognised_modes:
irc.reply(event, "I could not recognise these modes: {}".format("".join(unrecognised_modes)))
else:
if len(message) >= 4:
if not "enforce" in irc.channels[message[1]]:
irc.channels[message[1]]["enforce"] = {}
irc.channels[message[1]]["enforce"][message[3]] = {
"set": set_modes or "",
"unset": unset_modes or ""
}
else:
irc.reply(event, "You didn't specify a nick to enforce modes to")
except IndexError:
irc.reply(event, utils.gethelp("enforce"))
def on_mode(irc, conn, event):
modes = utils.split_modes(event.arguments)
irc.notice("BWBellairs", str(modes))
if "enforce" in irc.channels[event.target].keys():
for mode in modes:
subject = mode.split()[1]
mode_type = mode.split()[0]
if subject in irc.channels[event.target]["enforce"].keys():
modes_set = irc.channels[event.target]["enforce"][subject]
if mode_type[0:2] == "+o" and mode_type[1] in modes_set["unset"]:
irc.notice("BWBellairs", "deop him!!!")
add_handler(on_mode, name)
| mit | Python |
|
12f7dddcbe8c7c2160bf8de8f7a9c3082b950003 | Create longest-harmonious-subsequence.py | jaredkoontz/leetcode,kamyu104/LeetCode,jaredkoontz/leetcode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,jaredkoontz/leetcode,jaredkoontz/leetcode,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,kamyu104/LeetCode,jaredkoontz/leetcode,kamyu104/LeetCode,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,yiwen-luo/LeetCode,kamyu104/LeetCode,yiwen-luo/LeetCode | Python/longest-harmonious-subsequence.py | Python/longest-harmonious-subsequence.py | # Time: O(n)
# Space: O(n)
class Solution(object):
def findLHS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
lookup = collections.defaultdict(int)
result = 0
for num in nums:
lookup[num] += 1
for diff in [-1, 1]:
if (num + diff) in lookup:
result = max(result, lookup[num] + lookup[num + diff])
return result
| mit | Python |
|
d7cc3d6590d1d6d46bdf780b93e76ea6aa50334d | Create peak-index-in-a-mountain-array.py | kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015 | Python/peak-index-in-a-mountain-array.py | Python/peak-index-in-a-mountain-array.py | # Time: O(logn)
# Space: O(1)
# Let's call an array A a mountain if the following properties hold:
#
# A.length >= 3
# There exists some 0 < i < A.length - 1
# such that A[0] < A[1] < ... A[i-1] < A[i] > A[i+1] > ... > A[A.length - 1]
# Given an array that is definitely a mountain,
# return any i such that
# A[0] < A[1] < ... A[i-1] < A[i] > A[i+1] > ... > A[A.length - 1].
#
# Example 1:
#
# Input: [0,1,0]
# Output: 1
# Example 2:
#
# Input: [0,2,1,0]
# Output: 1
# Note:
#
# 3 <= A.length <= 10000
# 0 <= A[i] <= 10^6
# A is a mountain, as defined above.
class Solution(object):
def peakIndexInMountainArray(self, A):
"""
:type A: List[int]
:rtype: int
"""
left, right = 0, len(A)
while left < right:
mid = left + (right-left)//2
if A[mid] > A[mid+1]:
right = mid
else:
left = mid+1
return left
| mit | Python |
|
64eab4beaf4e00d47423ea027ec6f40129ee2e95 | Create execi-3.py | rafa-impacta/Exercicio | execi-3.py | execi-3.py | n1 = int(input("Digite um valor: "))
if n1 < 0:
print (n1 * -1)
elif n1 > 10:
n2 = int(input("Digite outro valor: "))
print (n1 - n2)
else:
print (n1/5.0)
| apache-2.0 | Python |
|
7dce21cc8fa3b81e150ed6586db8ca80cd537fc7 | Add compat module to test package | piotr-rusin/spam-lists | test/compat.py | test/compat.py | # -*- coding: utf-8 -*-
'''
A common module for compatibility related imports and
definitions used during testing
'''
from __future__ import unicode_literals
import unittest
from six import assertCountEqual, PY2
try:
from unittest.mock import Mock, MagicMock, patch # @NoMove
except ImportError:
from mock import Mock, MagicMock, patch # @NoMove @UnusedImport
class Py2TestCase(unittest.TestCase):
def assertCountEqual(self, expected_sequence, actual_sequence):
return assertCountEqual(self, expected_sequence, actual_sequence)
if PY2:
unittest.TestCase = Py2TestCase
| mit | Python |
|
9a97847419ad569b1f9f3d302507aca8544944e2 | test file | dcrosta/mongo-disco,sajal/MongoDisco,mongodb/mongo-disco,10genNYUITP/MongoDisco,johntut/MongoDisco | test_scheme.py | test_scheme.py | import unittest
import scheme_mongo
class TestScheme(unittest.TestCase):
def runTest(self):
mongo_uri = "mongodb://localhost/test.in"
wrapper = scheme_mongo.open(mongo_uri)
assert wrapper
for result in wrapper:
print result
if __name__ == '__main__':
unittest.main()
| apache-2.0 | Python |
|
fc95c998dc8c3caee3e0a4590b96c9ed7e0321a7 | add a test suite for Division | srittau/python-htmlgen | test_htmlgen/block.py | test_htmlgen/block.py | from unittest import TestCase
from asserts import assert_equal
from htmlgen import Division
class DivisionTest(TestCase):
def test_render(self):
div = Division()
div.append("Test")
assert_equal([b"<div>", b"Test", b"</div>"], list(iter(div)))
| mit | Python |
|
e4980879f0f4a0d223cccc99a486fb62cbe5807f | change models.py | fwpz/WeiPython,PegasusWang/WeiPython | physics/models.py | physics/models.py | from django.db import models
class Student(models.Model):
"""Student Info"""
stu_id = models.CharField(u'ๅญฆๅท', max_length=30, primary_key=True)
name = models.CharField(u'ๅงๅ', max_length=30)
password = models.CharField(u'ๅฏ็ ', max_length=30)
def __unicode__(self):
return '{stu_id} {name}'.format(stu_id=self.stu_id, name=self.name)
class Teacher(models.Model):
"""Teacher Info"""
name = models.CharField(u'ๅงๅ', max_length=30)
def __unicode__(self):
return self.name
class Questoin(models.Model):
"""Question Info"""
title = models.TextField(u'้ข็ฎ')
content = models.TextField(u'้้กน')
answer = models.CharField(u'็ญๆก', max_length=1)
def __unicode__(self):
return self.title
class Notification(self):
"""Notification Info"""
title = models.TextField(u'้็ฅๆ ้ข')
content = models.TextField(u'้็ฅๅ
ๅฎน')
time = models.DateField(u'้็ฅๆถ้ด')
def __unicode__(self):
return self.title
| from django.db import models
class Student(models.Model):
"""Student Info"""
stu_id = models.CharField(u'ๅญฆๅท', max_length=30, primary_key=True)
name = models.CharField(u'ๅงๅ', max_length=30)
password = models.CharField(u'ๅฏ็ ', max_length=30)
def __unicode__(self):
return '{stu_id} {name}'.format(stu_id=self.stu_id, name=self.name)
class Teacher(models.Model):
"""Teacher Info"""
name = models.CharField(u'ๅงๅ', max_length=30)
def __unicode__(self):
return self.name
class Questoin(models.Model):
"""Question Info"""
title = models.TextField()
content = models.TextField()
answer = models.CharField(max_length=1)
def __unicode__(self):
return self.title
class Notification(self):
"""Notification Info"""
title = models.TextField()
content = models.TextField()
time = models.DateField()
def __unicode__(self):
return self.title
| mit | Python |
964d1f97df600308b23b6a91b9de8811795509a4 | Add a test for the @cachit decorator. | cccfran/sympy,shipci/sympy,wyom/sympy,Titan-C/sympy,ahhda/sympy,meghana1995/sympy,wanglongqi/sympy,pandeyadarsh/sympy,vipulroxx/sympy,jbbskinny/sympy,jaimahajan1997/sympy,Arafatk/sympy,grevutiu-gabriel/sympy,Gadal/sympy,jerli/sympy,mcdaniel67/sympy,Mitchkoens/sympy,wyom/sympy,oliverlee/sympy,ga7g08/sympy,tovrstra/sympy,hrashk/sympy,Curious72/sympy,kaushik94/sympy,Davidjohnwilson/sympy,kumarkrishna/sympy,oliverlee/sympy,jbbskinny/sympy,MridulS/sympy,atreyv/sympy,AunShiLord/sympy,jerli/sympy,oliverlee/sympy,cswiercz/sympy,moble/sympy,kaichogami/sympy,lindsayad/sympy,wanglongqi/sympy,abhiii5459/sympy,madan96/sympy,toolforger/sympy,jamesblunt/sympy,bukzor/sympy,pbrady/sympy,drufat/sympy,AunShiLord/sympy,atreyv/sympy,sahmed95/sympy,drufat/sympy,Curious72/sympy,skidzo/sympy,dqnykamp/sympy,iamutkarshtiwari/sympy,dqnykamp/sympy,rahuldan/sympy,Sumith1896/sympy,Vishluck/sympy,kumarkrishna/sympy,debugger22/sympy,Davidjohnwilson/sympy,lidavidm/sympy,Shaswat27/sympy,moble/sympy,kaichogami/sympy,mafiya69/sympy,ChristinaZografou/sympy,kaichogami/sympy,jaimahajan1997/sympy,ahhda/sympy,srjoglekar246/sympy,kumarkrishna/sympy,ChristinaZografou/sympy,vipulroxx/sympy,skirpichev/omg,sahmed95/sympy,fperez/sympy,MridulS/sympy,Designist/sympy,sahilshekhawat/sympy,abhiii5459/sympy,kmacinnis/sympy,liangjiaxing/sympy,emon10005/sympy,minrk/sympy,ryanGT/sympy,farhaanbukhsh/sympy,garvitr/sympy,ahhda/sympy,sunny94/temp,hargup/sympy,wyom/sympy,abhiii5459/sympy,cswiercz/sympy,hargup/sympy,farhaanbukhsh/sympy,beni55/sympy,chaffra/sympy,kaushik94/sympy,amitjamadagni/sympy,jerli/sympy,AkademieOlympia/sympy,Curious72/sympy,MechCoder/sympy,grevutiu-gabriel/sympy,aktech/sympy,souravsingh/sympy,yukoba/sympy,maniteja123/sympy,kevalds51/sympy,iamutkarshtiwari/sympy,chaffra/sympy,lindsayad/sympy,Mitchkoens/sympy,VaibhavAgarwalVA/sympy,toolforger/sympy,hazelnusse/sympy-old,abloomston/sympy,asm666/sympy,lidavidm/sympy,abloomston/sympy,VaibhavAgarwalVA/sympy,kevalds51/sympy,maniteja123/sympy,Gadal/sympy,atsao72/sympy,abloomston/sympy,pandeyadarsh/sympy,yashsharan/sympy,maniteja123/sympy,vipulroxx/sympy,yashsharan/sympy,drufat/sympy,yashsharan/sympy,liangjiaxing/sympy,pernici/sympy,souravsingh/sympy,Arafatk/sympy,hazelnusse/sympy-old,emon10005/sympy,skidzo/sympy,debugger22/sympy,kaushik94/sympy,postvakje/sympy,pandeyadarsh/sympy,iamutkarshtiwari/sympy,rahuldan/sympy,kmacinnis/sympy,kevalds51/sympy,farhaanbukhsh/sympy,Sumith1896/sympy,debugger22/sympy,asm666/sympy,VaibhavAgarwalVA/sympy,sampadsaha5/sympy,chaffra/sympy,sahilshekhawat/sympy,Shaswat27/sympy,atreyv/sympy,bukzor/sympy,saurabhjn76/sympy,ga7g08/sympy,sampadsaha5/sympy,Designist/sympy,lindsayad/sympy,jbaayen/sympy,meghana1995/sympy,sahilshekhawat/sympy,AkademieOlympia/sympy,liangjiaxing/sympy,yukoba/sympy,beni55/sympy,madan96/sympy,asm666/sympy,sunny94/temp,hrashk/sympy,atsao72/sympy,ChristinaZografou/sympy,AkademieOlympia/sympy,Arafatk/sympy,aktech/sympy,yukoba/sympy,grevutiu-gabriel/sympy,bukzor/sympy,MechCoder/sympy,lidavidm/sympy,jaimahajan1997/sympy,cccfran/sympy,sunny94/temp,kmacinnis/sympy,souravsingh/sympy,Gadal/sympy,meghana1995/sympy,toolforger/sympy,shikil/sympy,amitjamadagni/sympy,saurabhjn76/sympy,dqnykamp/sympy,Titan-C/sympy,jamesblunt/sympy,pbrady/sympy,skidzo/sympy,emon10005/sympy,minrk/sympy,mcdaniel67/sympy,MechCoder/sympy,shipci/sympy,AunShiLord/sympy,moble/sympy,aktech/sympy,saurabhjn76/sympy,cccfran/sympy,postvakje/sympy,atsao72/sympy,sahmed95/sympy,shipci/sympy,shikil/sympy,sampadsaha5/sympy,ga7g08/sympy,cswiercz/sympy,garvitr/sympy,Vishluck/sympy,mattpap/sympy-polys,postvakje/sympy,hrashk/sympy,Designist/sympy,madan96/sympy,wanglongqi/sympy,Vishluck/sympy,Shaswat27/sympy,Mitchkoens/sympy,shikil/sympy,mafiya69/sympy,Sumith1896/sympy,beni55/sympy,rahuldan/sympy,MridulS/sympy,mafiya69/sympy,Titan-C/sympy,jamesblunt/sympy,Davidjohnwilson/sympy,jbbskinny/sympy,mcdaniel67/sympy,KevinGoodsell/sympy,flacjacket/sympy,pbrady/sympy,diofant/diofant,hargup/sympy,garvitr/sympy | sympy/core/tests/test_cache.py | sympy/core/tests/test_cache.py | from sympy.core.cache import cacheit
def test_cacheit_doc():
@cacheit
def testfn():
"test docstring"
pass
assert testfn.__doc__ == "test docstring"
assert testfn.__name__ == "testfn"
| bsd-3-clause | Python |
|
a8ddae9343683ca69067eecbece5ecff6d4e5d1d | Add myStrom switch platform | PetePriority/home-assistant,jaharkes/home-assistant,ct-23/home-assistant,oandrew/home-assistant,florianholzapfel/home-assistant,hmronline/home-assistant,florianholzapfel/home-assistant,morphis/home-assistant,ct-23/home-assistant,joopert/home-assistant,Zac-HD/home-assistant,kyvinh/home-assistant,shaftoe/home-assistant,open-homeautomation/home-assistant,MartinHjelmare/home-assistant,coteyr/home-assistant,dmeulen/home-assistant,hmronline/home-assistant,sffjunkie/home-assistant,hmronline/home-assistant,jamespcole/home-assistant,philipbl/home-assistant,Teagan42/home-assistant,emilhetty/home-assistant,justyns/home-assistant,persandstrom/home-assistant,hmronline/home-assistant,kyvinh/home-assistant,deisi/home-assistant,devdelay/home-assistant,shaftoe/home-assistant,GenericStudent/home-assistant,nnic/home-assistant,jnewland/home-assistant,LinuxChristian/home-assistant,ct-23/home-assistant,oandrew/home-assistant,florianholzapfel/home-assistant,deisi/home-assistant,Julian/home-assistant,deisi/home-assistant,sdague/home-assistant,nevercast/home-assistant,Duoxilian/home-assistant,bdfoster/blumate,miniconfig/home-assistant,luxus/home-assistant,ct-23/home-assistant,sdague/home-assistant,MungoRae/home-assistant,leoc/home-assistant,GenericStudent/home-assistant,keerts/home-assistant,hexxter/home-assistant,devdelay/home-assistant,JshWright/home-assistant,bdfoster/blumate,sfam/home-assistant,varunr047/homefile,PetePriority/home-assistant,tboyce021/home-assistant,deisi/home-assistant,nnic/home-assistant,srcLurker/home-assistant,nugget/home-assistant,aequitas/home-assistant,soldag/home-assistant,postlund/home-assistant,tinloaf/home-assistant,philipbl/home-assistant,instantchow/home-assistant,tboyce021/home-assistant,leoc/home-assistant,ewandor/home-assistant,rohitranjan1991/home-assistant,ma314smith/home-assistant,devdelay/home-assistant,Zac-HD/home-assistant,mezz64/home-assistant,devdelay/home-assistant,ct-23/home-assistant,Cinntax/home-assistant,shaftoe/home-assistant,home-assistant/home-assistant,happyleavesaoc/home-assistant,mikaelboman/home-assistant,Duoxilian/home-assistant,ewandor/home-assistant,eagleamon/home-assistant,DavidLP/home-assistant,robjohnson189/home-assistant,Teagan42/home-assistant,rohitranjan1991/home-assistant,ewandor/home-assistant,xifle/home-assistant,jawilson/home-assistant,ma314smith/home-assistant,robbiet480/home-assistant,open-homeautomation/home-assistant,balloob/home-assistant,persandstrom/home-assistant,deisi/home-assistant,Danielhiversen/home-assistant,bdfoster/blumate,molobrakos/home-assistant,jabesq/home-assistant,coteyr/home-assistant,xifle/home-assistant,alexmogavero/home-assistant,morphis/home-assistant,JshWright/home-assistant,kyvinh/home-assistant,justyns/home-assistant,keerts/home-assistant,alexmogavero/home-assistant,nugget/home-assistant,florianholzapfel/home-assistant,sffjunkie/home-assistant,sfam/home-assistant,HydrelioxGitHub/home-assistant,partofthething/home-assistant,Theb-1/home-assistant,lukas-hetzenecker/home-assistant,jaharkes/home-assistant,leppa/home-assistant,Danielhiversen/home-assistant,happyleavesaoc/home-assistant,jnewland/home-assistant,tinloaf/home-assistant,betrisey/home-assistant,tchellomello/home-assistant,jamespcole/home-assistant,morphis/home-assistant,eagleamon/home-assistant,instantchow/home-assistant,leoc/home-assistant,auduny/home-assistant,robjohnson189/home-assistant,stefan-jonasson/home-assistant,Duoxilian/home-assistant,happyleavesaoc/home-assistant,Smart-Torvy/torvy-home-assistant,Zyell/home-assistant,mikaelboman/home-assistant,hexxter/home-assistant,keerts/home-assistant,mikaelboman/home-assistant,sfam/home-assistant,partofthething/home-assistant,shaftoe/home-assistant,tinloaf/home-assistant,adrienbrault/home-assistant,tboyce1/home-assistant,mKeRix/home-assistant,qedi-r/home-assistant,alexmogavero/home-assistant,Julian/home-assistant,MartinHjelmare/home-assistant,varunr047/homefile,oandrew/home-assistant,nkgilley/home-assistant,home-assistant/home-assistant,sffjunkie/home-assistant,tboyce1/home-assistant,fbradyirl/home-assistant,mKeRix/home-assistant,jabesq/home-assistant,aoakeson/home-assistant,srcLurker/home-assistant,hexxter/home-assistant,sffjunkie/home-assistant,titilambert/home-assistant,robjohnson189/home-assistant,Cinntax/home-assistant,miniconfig/home-assistant,emilhetty/home-assistant,Smart-Torvy/torvy-home-assistant,DavidLP/home-assistant,betrisey/home-assistant,Zyell/home-assistant,jabesq/home-assistant,aequitas/home-assistant,emilhetty/home-assistant,dmeulen/home-assistant,fbradyirl/home-assistant,toddeye/home-assistant,Smart-Torvy/torvy-home-assistant,molobrakos/home-assistant,varunr047/homefile,pschmitt/home-assistant,open-homeautomation/home-assistant,MungoRae/home-assistant,postlund/home-assistant,jamespcole/home-assistant,jnewland/home-assistant,LinuxChristian/home-assistant,bdfoster/blumate,mikaelboman/home-assistant,open-homeautomation/home-assistant,eagleamon/home-assistant,Zac-HD/home-assistant,stefan-jonasson/home-assistant,nevercast/home-assistant,robjohnson189/home-assistant,stefan-jonasson/home-assistant,auduny/home-assistant,justyns/home-assistant,morphis/home-assistant,HydrelioxGitHub/home-assistant,PetePriority/home-assistant,coteyr/home-assistant,dmeulen/home-assistant,jaharkes/home-assistant,MungoRae/home-assistant,jaharkes/home-assistant,caiuspb/home-assistant,Zyell/home-assistant,nnic/home-assistant,ma314smith/home-assistant,kennedyshead/home-assistant,MungoRae/home-assistant,miniconfig/home-assistant,joopert/home-assistant,HydrelioxGitHub/home-assistant,miniconfig/home-assistant,FreekingDean/home-assistant,fbradyirl/home-assistant,Theb-1/home-assistant,betrisey/home-assistant,keerts/home-assistant,tboyce1/home-assistant,sander76/home-assistant,nevercast/home-assistant,DavidLP/home-assistant,hexxter/home-assistant,aronsky/home-assistant,oandrew/home-assistant,mikaelboman/home-assistant,emilhetty/home-assistant,xifle/home-assistant,JshWright/home-assistant,molobrakos/home-assistant,sffjunkie/home-assistant,emilhetty/home-assistant,w1ll1am23/home-assistant,caiuspb/home-assistant,JshWright/home-assistant,aequitas/home-assistant,varunr047/homefile,robbiet480/home-assistant,luxus/home-assistant,pschmitt/home-assistant,adrienbrault/home-assistant,Julian/home-assistant,alexmogavero/home-assistant,LinuxChristian/home-assistant,philipbl/home-assistant,MungoRae/home-assistant,balloob/home-assistant,nkgilley/home-assistant,Theb-1/home-assistant,Julian/home-assistant,auduny/home-assistant,happyleavesaoc/home-assistant,rohitranjan1991/home-assistant,Duoxilian/home-assistant,balloob/home-assistant,soldag/home-assistant,kennedyshead/home-assistant,Smart-Torvy/torvy-home-assistant,xifle/home-assistant,instantchow/home-assistant,dmeulen/home-assistant,caiuspb/home-assistant,eagleamon/home-assistant,aronsky/home-assistant,srcLurker/home-assistant,qedi-r/home-assistant,sander76/home-assistant,mKeRix/home-assistant,bdfoster/blumate,turbokongen/home-assistant,ma314smith/home-assistant,leppa/home-assistant,jawilson/home-assistant,Zac-HD/home-assistant,mKeRix/home-assistant,LinuxChristian/home-assistant,LinuxChristian/home-assistant,tchellomello/home-assistant,FreekingDean/home-assistant,turbokongen/home-assistant,aoakeson/home-assistant,varunr047/homefile,mezz64/home-assistant,persandstrom/home-assistant,titilambert/home-assistant,betrisey/home-assistant,lukas-hetzenecker/home-assistant,hmronline/home-assistant,leoc/home-assistant,aoakeson/home-assistant,w1ll1am23/home-assistant,tboyce1/home-assistant,toddeye/home-assistant,philipbl/home-assistant,MartinHjelmare/home-assistant,stefan-jonasson/home-assistant,nugget/home-assistant,luxus/home-assistant,kyvinh/home-assistant,srcLurker/home-assistant | homeassistant/components/switch/mystrom.py | homeassistant/components/switch/mystrom.py | """
homeassistant.components.switch.mystrom
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for myStrom switches.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/switch.mystrom/
"""
import logging
import requests
from homeassistant.components.switch import SwitchDevice
from homeassistant.const import STATE_UNKNOWN
DEFAULT_NAME = 'myStrom Switch'
_LOGGER = logging.getLogger(__name__)
# pylint: disable=unused-argument, too-many-function-args
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Find and return myStrom switches. """
resource = config.get('resource')
if resource is None:
_LOGGER.error('Missing required variable: resource')
return False
try:
requests.get(resource, timeout=10)
except requests.exceptions.MissingSchema:
_LOGGER.error("Missing resource or schema in configuration. "
"Add http:// to your URL.")
return False
except requests.exceptions.ConnectionError:
_LOGGER.error("No route to device. "
"Please check the IP address in the configuration file.")
return False
add_devices([MyStromSwitch(
config.get('name', DEFAULT_NAME),
config.get('resource'))])
class MyStromSwitch(SwitchDevice):
""" Represents a myStrom switch. """
def __init__(self, name, resource):
self._state = STATE_UNKNOWN
self._name = name
self._resource = resource
self.consumption = 0
@property
def name(self):
""" The name of the switch. """
return self._name
@property
def is_on(self):
""" True if switch is on. """
return self._state
@property
def current_power_mwh(self):
""" Current power consumption in mwh. """
return self.consumption
def turn_on(self, **kwargs):
""" Turn the switch on. """
request = requests.get('{}/relay'.format(self._resource),
params={'state': '1'},
timeout=10)
if request.status_code == 200:
self._state = True
else:
_LOGGER.error("Can't turn on %s. Is device offline?",
self._resource)
def turn_off(self, **kwargs):
""" Turn the switch off. """
request = requests.get('{}/relay'.format(self._resource),
params={'state': '0'},
timeout=10)
if request.status_code == 200:
self._state = False
else:
_LOGGER.error("Can't turn off %s. Is device offline?",
self._resource)
def update(self):
""" Gets the latest data from REST API and updates the state. """
try:
request = requests.get('{}/report'.format(self._resource),
timeout=10)
if request.json()['relay'] is True:
self._state = True
else:
self._state = False
self.consumption = request.json()['power']
except requests.exceptions.ConnectionError:
_LOGGER.error("No route to device '%s'. Is device offline?",
self._resource)
| mit | Python |
|
fbf5ecffb4249e7f881f53f30625a47a6e779592 | Create selective_array_reversing.py | Kunalpod/codewars,Kunalpod/codewars | selective_array_reversing.py | selective_array_reversing.py | #Kunal Gautam
#Codewars : @Kunalpod
#Problem name: Selective Array Reversing
#Problem level: 6 kyu
def sel_reverse(arr,l):
li=[]
if not l:
return arr
for i in range(0,len(arr),l):
if i+l>len(arr):
li+=(list(reversed(arr[i:])))
else:
li+=(list(reversed(arr[i:i+l])))
return li
| mit | Python |
|
afe8e16be43b5e66df0f7bf14832f77009aab151 | Create __init__.py | lucasphbs/TradeVIABTC | oauth/__init__.py | oauth/__init__.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created by bu on 2017-05-10
"""
from __future__ import unicode_literals
import json as complex_json
import requests
from utils import verify_sign
from utils import get_sign
class RequestClient(object):
__headers = {
'Content-Type': 'application/json; charset=utf-8',
'Accept': 'application/json',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36'
}
def __init__(self, access_id, secret_key, headers=dict()):
self.access_id = access_id
self.secret_key = secret_key
self.headers = self.__headers
self.headers.update(headers)
def set_authorization(self, params):
params['access_id'] = self.access_id
self.headers['access_id'] = self.access_id
self.headers['AUTHORIZATION'] = get_sign(params, self.secret_key)
def request(self, method, url, params=dict(), data='', json=dict()):
method = method.upper()
if method == 'GET':
self.set_authorization(params)
result = requests.request('GET', url, params=params, headers=self.headers)
else:
if data:
json.update(complex_json.loads(data))
self.set_authorization(json)
result = requests.request(method, url, json=json, headers=self.headers)
return result
class OAuthClient(object):
def __init__(self, request):
self.request = request
self._body = dict()
self._authorization = ''
@property
def body(self):
raise NotImplementedError('extract body')
@property
def authorization(self):
raise NotImplementedError('authorization')
def verify_request(self, secret_key):
return verify_sign(self.body, secret_key, self.authorization)
class FlaskOAuthClient(OAuthClient):
@property
def body(self):
if self._body:
return self._body
if self.request.method == 'GET':
self._body = self.request.args.to_dict()
elif self.request.is_json:
self._body = self.request.json
access_id = self.request.headers.get('ACCESS_ID')
if access_id:
self._body['access_id'] = access_id
return self._body
@property
def authorization(self):
if self._authorization:
return self._authorization
self._authorization = self.request.headers['AUTHORIZATION']
return self.authorization
| apache-2.0 | Python |
|
a3bbd175ef5640843cb16b0166b462ffaed25242 | standardize logging interface for fs-drift | parallel-fs-utils/fs-drift,bengland2/fsstress,parallel-fs-utils/fs-drift,bengland2/fsstress | fsd_log.py | fsd_log.py | import logging
# standardize use of logging module in fs-drift
def start_log(prefix):
log = logging.getLogger(prefix)
h = logging.StreamHandler()
log_format = prefix + ' %(asctime)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(log_format)
h.setFormatter(formatter)
log.addHandler(h)
log.setLevel(logging.DEBUG)
return log
#with open('/tmp/weights.csv', 'w') as w_f:
| apache-2.0 | Python |
|
52e71001b7e775daaaaf42280ebe06c31291b595 | Add a simplemeshtest variant where all AJ packets of one node are always dropped | freedesktop-unofficial-mirror/telepathy__telepathy-salut,freedesktop-unofficial-mirror/telepathy__telepathy-salut,freedesktop-unofficial-mirror/telepathy__telepathy-salut,freedesktop-unofficial-mirror/telepathy__telepathy-salut | tests/failmeshtest.py | tests/failmeshtest.py | #!/usr/bin/env python
from twisted.internet import reactor
from mesh import Mesh, MeshNode, packet_type, ATTEMPT_JOIN
import sys
NUMNODES = 5
NUMPACKETS = 10
DELAY = 0.1
nodes = []
# We're optimists
success = True
class TestMeshNode(MeshNode):
nodes = 1
def __init__ (self, name, mesh):
MeshNode.__init__(self, name, mesh)
def node_connected(self):
MeshNode.node_connected(self)
print "Connected"
def newNode (self, data):
MeshNode.newNode (self, data)
print "node0 - Added " + data
self.nodes += 1
if self.nodes == NUMNODES - 1:
print "Everybody who could joined"
for x in xrange(0, NUMPACKETS):
reactor.callLater(0.1 * x, (lambda y: self.pushInput(str(y) + "\n")), x)
def leftNode (self, data):
MeshNode.leftNode (self, data)
print data.rstrip() + " left"
reactor.stop()
class FailMeshNode (MeshNode):
def __init__ (self, name, mesh):
MeshNode.__init__(self, name, mesh)
def sendPacket (self, data):
if packet_type(data) != ATTEMPT_JOIN:
MeshNode.sendPacket(self, data)
class TestMesh(Mesh):
expected = {}
done = 0
def gotOutput(self, node, sender, data):
global success
if self.expected.get(node) == None:
self.expected[node] = 0
if (self.expected.get(node, int(data)) != int(data)):
print "Got " + data.rstrip() + " instead of " + \
str(self.expected[node]) + " from " + node.name
success = False
reactor.crash()
if not sender in node.peers:
print "Sender " + sender + " not in node peers"
success = False
reactor.crash()
self.expected[node] = int(data) + 1
if self.expected[node] == 10:
self.done += 1
if self.done == NUMNODES - 2:
for x in self.nodes:
x.stats()
self.nodes[-2].disconnect()
m = TestMesh()
n = TestMeshNode("node0", m)
nodes.append(n)
m.addMeshNode(n)
for x in xrange(1, NUMNODES - 1):
nodes.append(m.addNode("node" + str(x)))
x = NUMNODES - 1
n = FailMeshNode("node" + str(x), m)
nodes.append(n)
m.addMeshNode(n)
# Connect all nodes to all others. 1024 bytes/s bandwidth, 50ms delay and 0%
# packet loss.. (bandwidth and delay aren't implemented just yet)
m.connect_full(1024, 50, 0.30)
def timeout():
global success
print "TIMEOUT!"
success = False
reactor.crash()
reactor.callLater(60, timeout)
reactor.run()
if not success:
print "FAILED"
sys.exit(-1)
print "SUCCESS"
| lgpl-2.1 | Python |
|
57fc053939702f4baf04604a9226873c98526ae5 | Add test for Moniker | openlawlibrary/pygls,openlawlibrary/pygls,openlawlibrary/pygls | tests/lsp/test_moniker.py | tests/lsp/test_moniker.py | ############################################################################
# Copyright(c) Open Law Library. All rights reserved. #
# See ThirdPartyNotices.txt in the project root for additional notices. #
# #
# Licensed under the Apache License, Version 2.0 (the "License") #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http: // www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
############################################################################
import unittest
from typing import List, Optional
from pygls.lsp.methods import TEXT_DOCUMENT_MONIKER
from pygls.lsp.types import (Moniker, MonikerKind, MonikerOptions, MonikerParams, Position,
TextDocumentIdentifier, UniquenessLevel)
from ..conftest import CALL_TIMEOUT, ClientServer
class TestMoniker(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.client_server = ClientServer()
cls.client, cls.server = cls.client_server
@cls.server.feature(
TEXT_DOCUMENT_MONIKER,
MonikerOptions(),
)
def f(params: MonikerParams) -> Optional[List[Moniker]]:
if params.text_document.uri == 'file://return.list':
return [
Moniker(
scheme='test_scheme',
identifier='test_identifier',
unique=UniquenessLevel.Global,
kind=MonikerKind.Local,
),
]
else:
return None
cls.client_server.start()
@classmethod
def tearDownClass(cls):
cls.client_server.stop()
def test_capabilities(self):
capabilities = self.server.server_capabilities
assert capabilities.moniker_provider
def test_moniker_return_list(self):
response = self.client.lsp.send_request(
TEXT_DOCUMENT_MONIKER,
MonikerParams(
text_document=TextDocumentIdentifier(uri='file://return.list'),
position=Position(line=0, character=0),
)
).result(timeout=CALL_TIMEOUT)
assert response
assert response[0]['scheme'] == 'test_scheme'
assert response[0]['identifier'] == 'test_identifier'
assert response[0]['unique'] == 'global'
assert response[0]['kind'] == 'local'
def test_references_return_none(self):
response = self.client.lsp.send_request(
TEXT_DOCUMENT_MONIKER,
MonikerParams(
text_document=TextDocumentIdentifier(uri='file://return.none'),
position=Position(line=0, character=0),
)
).result(timeout=CALL_TIMEOUT)
assert response is None
if __name__ == '__main__':
unittest.main()
| apache-2.0 | Python |
|
20c4df8c61ee1f625ebd77c8613fc470a3e87438 | add another lazy function | chaleaoch/jianshu_repo,chaleaoch/jianshu_repo | lazy_function/another_lazy_class.py | lazy_function/another_lazy_class.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class lazy_property(object):
def __init__(self, func, name=None, doc=None):
self._func = func
self._name = name or func.func_name
self.__doc__ = doc or func.__doc__
def __get__(self, obj, objtype=None):
if obj is None:
return self
value = self._func(obj)
setattr(obj, self._name, value)
return value
class BaseRequest(object):
def form(self):
return 123
form = lazy_property(form)
bb = BaseRequest()
print bb.form
print bb.form
bb = BaseRequest()
print bb.form
print bb.form | mit | Python |
|
1d8fccf6943adf40c77d5d2df002330719dcfcd1 | test for S3Sync | aws-quickstart/taskcat,aws-quickstart/taskcat,aws-quickstart/taskcat | tests/test_s3_sync.py | tests/test_s3_sync.py | import os
import unittest
from pathlib import Path
import mock
from taskcat._s3_sync import S3Sync
class TestS3Sync(unittest.TestCase):
def test_init(self):
m_s3_client = mock.Mock()
m_s3_client.list_objects_v2.return_value = {
"Contents": [{"Key": "test_prefix/test_object", "ETag": "test_etag"}]
}
m_s3_client.delete_objects.return_value = {}
m_s3_client.upload_file.return_value = None
prefix = "test_prefix"
base_path = "./" if os.getcwd().endswith("/tests") else "./tests/"
base_path = Path(base_path + "data/").resolve()
S3Sync(
m_s3_client,
"test_bucket",
prefix,
str(base_path / "lambda_build_with_submodules"),
)
m_s3_client.list_objects_v2.assert_called_once()
m_s3_client.delete_objects.assert_called_once()
m_s3_client.upload_file.assert_called()
| apache-2.0 | Python |
|
0f1cf524c2b90d77e17d516a30d62632ebb5ed2f | Add pipeline for untar'ing GCS blobs. | GoogleCloudPlatform/healthcare,GoogleCloudPlatform/healthcare,GoogleCloudPlatform/healthcare,GoogleCloudPlatform/healthcare,GoogleCloudPlatform/healthcare,GoogleCloudPlatform/healthcare,GoogleCloudPlatform/healthcare,GoogleCloudPlatform/healthcare | datathon/datathon_etl_pipelines/generic_imagining/untar_gcs.py | datathon/datathon_etl_pipelines/generic_imagining/untar_gcs.py | r"""Untar .tar and .tar.gz GCS files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from datathon_etl_pipelines.dofns.read_tar_file import ReadTarFile
from datathon_etl_pipelines.utils import get_setup_file
import tensorflow as tf
def write_file(element):
path, contents = element
with tf.io.gfile.GFile(path, 'wb') as fp:
fp.write(contents)
def main():
"""Build and execute the Apache Beam pipeline using the commandline arguments."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--input_tars',
required=True,
nargs='+',
help="""One or more wildcard patterns that give the full paths to the
input tar files on GCS.""")
parser.add_argument(
'--output_dir',
required=True,
help="""The output directory to write the untar'd files to.""")
args, pipeline_args = parser.parse_known_args()
beam_options = PipelineOptions(pipeline_args)
# serialize and provide global imports, functions, etc. to workers.
beam_options.view_as(SetupOptions).save_main_session = True
beam_options.view_as(SetupOptions).setup_file = get_setup_file()
if args.output_dir.endswith('/'):
out_dir = args.output_dir[:-1]
else:
out_dir = args.output_dir
def get_full_output_path(relative_path):
if relative_path.startswith('/'):
return out_dir + relative_path
else:
return '{}/{}'.format(out_dir, relative_path)
with beam.Pipeline(options=beam_options) as p:
_ = \
(p
| beam.Create(tf.io.gfile.glob(args.input_tars))
| 'Untar' >> beam.ParDo(ReadTarFile(), get_full_output_path)
| 'Write' >> beam.Map(write_file))
if __name__ == '__main__':
main()
| apache-2.0 | Python |
|
0bf6f0b6021b2ca3801b0d68c0ee63e39ddc36df | Make a ValueBuffer class | google/CFU-Playground,google/CFU-Playground,google/CFU-Playground,google/CFU-Playground | proj/avg_pdti8/util.py | proj/avg_pdti8/util.py | #!/bin/env python
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nmigen import Mux, Signal, signed
from nmigen_cfu import InstructionBase, SimpleElaboratable, TestBase, Cfu, CfuTestBase
from nmigen.sim import Delay, Settle
import unittest
class ValueBuffer(SimpleElaboratable):
"""Buffers a signal.
Parameters:
inp: A Signal
The signal to buffer
Interface:
capture: Signal()
Input.
When high, captures input while transparently placing on output.
When low, output is equal to last captured input.
output: Signal(like inp)
Output. The last captured input.
"""
def __init__(self, inp):
self.capture = Signal()
self.input = inp
self.output = Signal.like(inp)
def elab(self, m):
captured = Signal.like(self.input)
with m.If(self.capture):
m.d.sync += captured.eq(self.input)
m.d.comb += self.output.eq(Mux(self.capture, self.input, captured))
class ValueBufferTest(TestBase):
def create_dut(self):
self.in_signal = Signal(4)
return ValueBuffer(self.in_signal)
def test(self):
DATA = [
((0, 0), 0),
((1, 5), 5),
((0, 3), 5),
((0, 2), 5),
((0, 2), 5),
((1, 2), 2),
((0, 2), 2),
((0, 2), 2),
]
def process():
for n, ((capture, in_sig), expected_output) in enumerate(DATA):
yield self.in_signal.eq(in_sig)
yield self.dut.capture.eq(capture)
yield Settle()
self.assertEqual((yield self.dut.output), expected_output, f"cycle={n}")
yield
self.run_sim(process, True)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | Python |
|
f2f4accf304cfe1aaed042f7df35bc0ee86a6c59 | Add enums for service/record/assignment/transaction type | otovo/python-netsgiro | netsgiro/enums.py | netsgiro/enums.py | from enum import IntEnum
class ServiceType(IntEnum):
NONE = 0
OCR_GIRO = 9
AVTALEGIRO = 21
class RecordType(IntEnum):
TRANSMISSION_START = 10
ASSIGNMENT_START = 20
TRANSACTION_AMOUNT_1 = 30
TRANSACTION_AMOUNT_2 = 31
TRANSACTION_AMOUNT_3 = 32 # Only for TransactionType 20 and 21
TRANSACTION_SPECIFICATION = 49
AGREEMENTS = 78 # TODO Better name?
ASSIGNMENT_END = 88
TRANSMISSION_END = 89
class AvtaleGiroAssignmentType(IntEnum):
PAYMENT_REQUEST = 0 # TODO Better name?
AGREEMENTS = 24 # TODO Better name?
CANCELATION = 36 # TODO Better name?
class AvtaleGiroTransactionType(IntEnum):
NO_NOTIFICATION_FROM_BANK = 2 # TODO Better name?
NOTIFICATION_FROM_BANK = 21 # TODO Better name?
CANCELATION = 93 # TODO Better name?
AGREEMENTS = 94 # TODO Better name?
class OcrGiroTransactionType(IntEnum):
FROM_GIRO_DEBITED_ACCOUNT = 10
FROM_STANDING_ORDERS = 11
FROM_DIRECT_REMITTANCE = 12
FROM_BUSINESS_TERMINAL_GIRO = 13
FROM_COUNTER_GIRO = 14
FROM_AVTALEGIRO = 15
FROM_TELEGIRO = 16
FROM_CASH_GIRO = 17
REVERSING_WITH_KID = 18
PURCHASE_WITH_KID = 19
REVERSING_WITH_TEXT = 20
PURCHASE_WITH_TEXT = 21
| apache-2.0 | Python |
|
ca16e36b79e9c7dcd5cb31d899ef9c50ebf602c1 | add unit test for _nearest_neighbor() | UDST/urbanaccess | urbanaccess/tests/test_network.py | urbanaccess/tests/test_network.py | import pytest
import pandas as pd
from urbanaccess import network
@pytest.fixture
def nearest_neighbor_dfs():
data = {
'id': (1, 2, 3),
'x': [-122.267546, -122.264479, -122.219119],
'y': [37.802919, 37.808042, 37.782288]
}
osm_nodes = pd.DataFrame(data).set_index('id')
data = {
'node_id_route': ['1_transit_a', '2_transit_a',
'3_transit_a', '4_transit_a'],
'x': [-122.265417, -122.266910, -122.269741, -122.238638],
'y': [37.806372, 37.802687, 37.799480, 37.797234]
}
transit_nodes = pd.DataFrame(data).set_index('node_id_route')
data = {'node_id_route': ['1_transit_a', '2_transit_a',
'3_transit_a', '4_transit_a'],
'nearest_osm_node': [2, 1, 1, 3]}
index = range(4)
expected_transit_nodes = pd.concat(
[transit_nodes, pd.DataFrame(data, index).set_index('node_id_route')],
axis=1)
return osm_nodes, transit_nodes, expected_transit_nodes
def test_nearest_neighbor(nearest_neighbor_dfs):
osm_nodes, transit_nodes, expected_transit_nodes = nearest_neighbor_dfs
transit_nodes['nearest_osm_node'] = network._nearest_neighbor(
osm_nodes[['x', 'y']],
transit_nodes[['x', 'y']])
assert expected_transit_nodes.equals(transit_nodes)
| agpl-3.0 | Python |
|
7c5dbbcd1de6376a025117fe8f00516f2fcbb40d | Add regressiontest for crypto_onetimeauth_verify | RaetProtocol/libnacl,johnttan/libnacl,saltstack/libnacl | tests/unit/test_auth_verify.py | tests/unit/test_auth_verify.py | # Import nacl libs
import libnacl
# Import python libs
import unittest
class TestAuthVerify(unittest.TestCase):
'''
Test onetimeauth functions
'''
def test_auth_verify(self):
msg = b'Anybody can invent a cryptosystem he cannot break himself. Except Bruce Schneier.'
key1 = libnacl.utils.rand_nonce()
key2 = libnacl.utils.rand_nonce()
sig1 = libnacl.crypto_auth(msg, key1)
sig2 = libnacl.crypto_auth(msg, key2)
self.assertTrue(libnacl.crypto_auth_verify(sig1, msg, key1))
with self.assertRaises(ValueError) as context:
libnacl.crypto_auth_verify(sig1, msg, key2)
self.assertTrue('Failed to auth msg' in context.exception)
with self.assertRaises(ValueError) as context:
libnacl.crypto_auth_verify(sig2, msg, key1)
self.assertTrue('Failed to auth msg' in context.exception)
self.assertTrue(libnacl.crypto_auth_verify(sig2, msg, key2))
'''
Test onetimeauth functions
'''
def test_onetimeauth_verify(self):
msg = b'Anybody can invent a cryptosystem he cannot break himself. Except Bruce Schneier.'
key1 = libnacl.utils.rand_nonce()
key2 = libnacl.utils.rand_nonce()
sig1 = libnacl.crypto_onetimeauth(msg, key1)
sig2 = libnacl.crypto_onetimeauth(msg, key2)
self.assertTrue(libnacl.crypto_onetimeauth_verify(sig1, msg, key1))
with self.assertRaises(ValueError) as context:
libnacl.crypto_onetimeauth_verify(sig1, msg, key2)
self.assertTrue('Failed to auth msg' in context.exception)
with self.assertRaises(ValueError) as context:
libnacl.crypto_onetimeauth_verify(sig2, msg, key1)
self.assertTrue('Failed to auth msg' in context.exception)
self.assertTrue(libnacl.crypto_onetimeauth_verify(sig2, msg, key2))
| apache-2.0 | Python |
|
80ccffb269b04af02224c1121c41d4e7c503bc30 | Add unit test for intersperse | brechtm/rinohtype,brechtm/rinohtype,brechtm/rinohtype | tests/util/test_intersperse.py | tests/util/test_intersperse.py | # This file is part of rinohtype, the Python document preparation system.
#
# Copyright (c) Brecht Machiels.
#
# Use of this source code is subject to the terms of the GNU Affero General
# Public License v3. See the LICENSE file or http://www.gnu.org/licenses/.
from rinoh.util import intersperse
def test_intersperse():
separator = "."
letters = [127, 0, 0, 1]
localhost = list(intersperse(letters, separator))
assert [127, ".", 0, ".", 0, ".", 1] == localhost
| agpl-3.0 | Python |
|
8f18a1b75b68d8c97efd57673b160a9ceda608a3 | Add Manifest class | Bloomie/murano-repository,Bloomie/murano-repository,EkaterinaFedorova/murano-repository,EkaterinaFedorova/murano-repository | manifest.py | manifest.py | __author__ = 'fervent'
| apache-2.0 | Python |
|
e56d9337cc5c63ef61afe8ffdee2019e19af0963 | Add test for resolved issue 184 | armandobs14/rdflib,RDFLib/rdflib,yingerj/rdflib,yingerj/rdflib,avorio/rdflib,dbs/rdflib,avorio/rdflib,armandobs14/rdflib,dbs/rdflib,marma/rdflib,RDFLib/rdflib,avorio/rdflib,dbs/rdflib,ssssam/rdflib,yingerj/rdflib,RDFLib/rdflib,ssssam/rdflib,armandobs14/rdflib,dbs/rdflib,marma/rdflib,avorio/rdflib,yingerj/rdflib,ssssam/rdflib,marma/rdflib,RDFLib/rdflib,ssssam/rdflib,armandobs14/rdflib,marma/rdflib | test/test_issue184.py | test/test_issue184.py | from rdflib.term import Literal
from rdflib.term import URIRef
from rdflib.graph import ConjunctiveGraph
def test_escaping_of_triple_doublequotes():
"""
Issue 186 - Check escaping of multiple doublequotes.
A serialization/deserialization roundtrip of a certain class of
Literals fails when there are both, newline characters and multiple subsequent
quotation marks in the lexical form of the Literal. In this case invalid N3
is emitted by the serializer, which in turn cannot be parsed correctly.
"""
g=ConjunctiveGraph()
g.add((URIRef('http://foobar'), URIRef('http://fooprop'), Literal('abc\ndef"""""')))
# assert g.serialize(format='n3') == '@prefix ns1: <http:// .\n\nns1:foobar ns1:fooprop """abc\ndef\\"\\"\\"\\"\\"""" .\n\n'
g2=ConjunctiveGraph()
g2.parse(data=g.serialize(format='n3'), format='n3')
assert g.isomorphic(g2) is True | bsd-3-clause | Python |
|
0988a2a18688a8b8e07d94e1609405c17bbe717d | Add test suite for the playlist plugin | jackwilsdon/beets,jackwilsdon/beets,ibmibmibm/beets,SusannaMaria/beets,sampsyo/beets,beetbox/beets,ibmibmibm/beets,ibmibmibm/beets,shamangeorge/beets,jackwilsdon/beets,ibmibmibm/beets,SusannaMaria/beets,beetbox/beets,shamangeorge/beets,SusannaMaria/beets,shamangeorge/beets,shamangeorge/beets,jackwilsdon/beets,beetbox/beets,beetbox/beets,sampsyo/beets,SusannaMaria/beets,sampsyo/beets,sampsyo/beets | test/test_playlist.py | test/test_playlist.py | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Thomas Scholtes.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import division, absolute_import, print_function
import os
import tempfile
import unittest
from test import _common
from test import helper
import beets
class PlaylistTest(unittest.TestCase, helper.TestHelper):
def setUp(self):
self.setup_beets()
self.lib = beets.library.Library(':memory:')
i1 = _common.item()
i1.path = beets.util.normpath('/a/b/c.mp3')
i1.title = u'some item'
i1.album = u'some album'
self.lib.add(i1)
self.lib.add_album([i1])
i2 = _common.item()
i2.path = beets.util.normpath('/d/e/f.mp3')
i2.title = 'another item'
i2.album = 'another album'
self.lib.add(i2)
self.lib.add_album([i2])
i3 = _common.item()
i3.path = beets.util.normpath('/x/y/z.mp3')
i3.title = 'yet another item'
i3.album = 'yet another album'
self.lib.add(i3)
self.lib.add_album([i3])
self.playlist_dir = tempfile.TemporaryDirectory()
with open(os.path.join(self.playlist_dir.name, 'test.m3u'), 'w') as f:
f.write('{0}\n'.format(beets.util.displayable_path(i1.path)))
f.write('{0}\n'.format(beets.util.displayable_path(i2.path)))
self.config['directory'] = '/'
self.config['playlist']['relative_to'] = 'library'
self.config['playlist']['playlist_dir'] = self.playlist_dir.name
self.load_plugins('playlist')
def tearDown(self):
self.unload_plugins()
self.playlist_dir.cleanup()
self.teardown_beets()
def test_query_name(self):
q = u'playlist:test'
results = self.lib.items(q)
self.assertEqual(set([i.title for i in results]), set([
u'some item',
u'another item',
]))
def test_query_path(self):
q = u'playlist:{0}/test.m3u'.format(self.playlist_dir.name)
results = self.lib.items(q)
self.assertEqual(set([i.title for i in results]), set([
u'some item',
u'another item',
]))
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| mit | Python |
|
71577ec62406c0119ea2282a3011ebbc368a3a04 | add test_pollbot.py | bmintz/reactor-bot | tests/test_pollbot.py | tests/test_pollbot.py | #!/usr/bin/env python3
import pytest
import poll_bot
class TestPollBot:
def test_extract_emoji(self):
lines_and_emojis = {
' M)-ystery meat': 'M',
'๐ dog sandwiches': '๐',
'3 blind mice': '3',
'๐บ๐ธ flags': '๐บ๐ธ',
'<:python3:232720527448342530> python3!': '<:python3:232720527448342530>',
}
for input, output in lines_and_emojis.items():
assert poll_bot.extract_emoji(input) == output | mit | Python |
|
1e9980aff2370b96171011f7fa50d4517957fa86 | Add a script to check TOI coverage for a bbox and zoom range | tilezen/tilepacks | tilepack/check_toi.py | tilepack/check_toi.py | import mercantile
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('min_lon',
type=float,
help='Bounding box minimum longitude/left')
parser.add_argument('min_lat',
type=float,
help='Bounding box minimum latitude/bottom')
parser.add_argument('max_lon',
type=float,
help='Bounding box maximum longitude/right')
parser.add_argument('max_lat',
type=float,
help='Bounding box maximum latitude/top')
parser.add_argument('min_zoom',
type=int,
help='The minimum zoom level to include')
parser.add_argument('max_zoom',
type=int,
help='The maximum zoom level to include')
args = parser.parse_args()
print("zoom\tmissing from toi\tin aoi")
for zoom in range(args.min_zoom, args.max_zoom + 1):
tiles_in_aoi = set([
'{}/{}/{}'.format(z, x, y)
for x, y, z in mercantile.tiles(
args.min_lon, args.min_lat, args.max_lon, args.max_lat,
[zoom]
)
])
with open('toi.z{}.txt'.format(zoom), 'r') as f:
tiles_in_toi = set([
l.strip()
for l in f.readlines()
])
print("{zoom:2d}\t{tiles_not_in_toi}\t{tiles_in_aoi}".format(
zoom=zoom,
tiles_not_in_toi=len(tiles_in_aoi - tiles_in_toi),
tiles_in_aoi=len(tiles_in_aoi),
))
if __name__ == '__main__':
main()
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.