commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
c8c679221e0a36ac6074c0869bfc4b75d9745ae2 | Create a.py | y-sira/atcoder,y-sira/atcoder | abc066/a.py | abc066/a.py | a, b, c = map(int, input().split())
print(min(a + b, b + c, a + c))
| mit | Python |
|
365152787cae36c12691e4da52a0575bd56d7d1b | Add tests for tril, triu and find | cupy/cupy,cupy/cupy,cupy/cupy,cupy/cupy | tests/cupyx_tests/scipy_tests/sparse_tests/test_extract.py | tests/cupyx_tests/scipy_tests/sparse_tests/test_extract.py | import unittest
import numpy
try:
import scipy.sparse
scipy_available = True
except ImportError:
scipy_available = False
import cupy
from cupy import testing
from cupyx.scipy import sparse
@testing.parameterize(*testing.product({
'shape': [(8, 3), (4, 4), (3, 8)],
'a_format': ['dense', 'csr', 'csc', 'coo'],
'out_format': [None, 'csr', 'csc'],
}))
@unittest.skipUnless(scipy_available, 'requires scipy')
class TestExtract(unittest.TestCase):
density = 0.75
def _make_matrix(self, dtype):
a = testing.shaped_random(self.shape, numpy, dtype=dtype)
a[a > self.density] = 0
b = cupy.array(a)
if self.a_format == 'csr':
a = scipy.sparse.csr_matrix(a)
b = sparse.csr_matrix(b)
elif self.a_format == 'csc':
a = scipy.sparse.csc_matrix(a)
b = sparse.csc_matrix(b)
elif self.a_format == 'coo':
a = scipy.sparse.coo_matrix(a)
b = sparse.coo_matrix(b)
return a, b
@testing.for_dtypes('fdFD')
def test_tril(self, dtype):
np_a, cp_a = self._make_matrix(dtype)
m, n = self.shape
for k in range(-m+1, n):
np_out = scipy.sparse.tril(np_a, k=k, format=self.out_format)
cp_out = sparse.tril(cp_a, k=k, format=self.out_format)
assert np_out.format == cp_out.format
assert np_out.nnz == cp_out.nnz
cupy.testing.assert_allclose(np_out.todense(), cp_out.todense())
@testing.for_dtypes('fdFD')
def test_triu(self, dtype):
np_a, cp_a = self._make_matrix(dtype)
m, n = self.shape
for k in range(-m+1, n):
np_out = scipy.sparse.triu(np_a, k=k, format=self.out_format)
cp_out = sparse.triu(cp_a, k=k, format=self.out_format)
assert np_out.format == cp_out.format
assert np_out.nnz == cp_out.nnz
cupy.testing.assert_allclose(np_out.todense(), cp_out.todense())
@testing.for_dtypes('fdFD')
def test_find(self, dtype):
if self.out_format is not None:
unittest.SkipTest()
np_a, cp_a = self._make_matrix(dtype)
np_row, np_col, np_data = scipy.sparse.find(np_a)
cp_row, cp_col, cp_data = sparse.find(cp_a)
# Note: Check the results by reconstructing the sparse matrix from the
# results of find, as SciPy and CuPy differ in the data order.
np_out = scipy.sparse.coo_matrix((np_data, (np_row, np_col)),
shape=self.shape)
cp_out = sparse.coo_matrix((cp_data, (cp_row, cp_col)),
shape=self.shape)
cupy.testing.assert_allclose(np_out.todense(), cp_out.todense())
| mit | Python |
|
a4d3056bbbe71d73d901c13927264157c9c51842 | Add lc004_median_of_two_sorted_arrays.py | bowen0701/algorithms_data_structures | lc004_median_of_two_sorted_arrays.py | lc004_median_of_two_sorted_arrays.py | """Leetcode 4. Median of Two Sorted Arrays
Hard
There are two sorted arrays nums1 and nums2 of size m and n respectively.
Find the median of the two sorted arrays.
The overall run time complexity should be O(log (m+n)).
You may assume nums1 and nums2 cannot be both empty.
Example 1:
nums1 = [1, 3]
nums2 = [2]
The median is 2.0
Example 2:
nums1 = [1, 2]
nums2 = [3, 4]
The median is (2 + 3)/2 = 2.5
"""
class Solution(object):
def findMedianSortedArrays(self, num1, num2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
|
8a836213f7466de51c6d3d18d1a5ba74bb28de4a | Add hdf5-vol-async package. (#26874) | LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/hdf5-vol-async/package.py | var/spack/repos/builtin/packages/hdf5-vol-async/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Hdf5VolAsync(CMakePackage):
"""This package enables asynchronous IO in HDF5."""
homepage = "https://sdm.lbl.gov/"
git = "https://github.com/hpc-io/vol-async"
maintainers = ['hyoklee']
version('v1.0')
depends_on('argobots@main')
depends_on('hdf5@develop-1.13+mpi+threadsafe')
def cmake_args(self):
"""Populate cmake arguments for HDF5 VOL."""
args = [
self.define('BUILD_SHARED_LIBS:BOOL', True),
self.define('BUILD_TESTING:BOOL=ON', self.run_tests)
]
return args
| lgpl-2.1 | Python |
|
034fe49d29f229e8fafc6b1034fc2685cd896eb2 | Create create-studio-item | Xi-Plus/Xiplus-Wikipedia-Bot,Xi-Plus/Xiplus-Wikipedia-Bot | my-ACG/create-studio-item/edit.py | my-ACG/create-studio-item/edit.py | # -*- coding: utf-8 -*-
import argparse
import csv
import os
import re
import urllib.parse
os.environ['PYWIKIBOT_DIR'] = os.path.dirname(os.path.realpath(__file__))
import pywikibot
site = pywikibot.Site()
site.login()
datasite = site.data_repository()
def main(studio):
data = {
'labels': {
'zh-tw': {
'language': 'zh-tw',
'value': studio
},
},
'sitelinks': {
'zhwiki': {
'site': 'zhwiki',
'title': studio,
'badges': [],
},
},
# https://www.mediawiki.org/wiki/Wikibase/DataModel/JSON#Snaks
'claims': {
'P3': [{
'mainsnak': {
'snaktype': 'value',
'property': 'P3',
'datatype': 'wikibase-item',
'datavalue': {
'value': {
'entity-type': 'item',
'numeric-id': 65,
},
'type': 'wikibase-entityid',
},
},
'type': 'statement',
'rank': 'normal',
}],
},
}
# claim = pywikibot.page.Claim(datasite, 'P25', datatype='wikibase-item')
# item.editEntity({'claims': [claim.toJSON()]})
print(data)
item = datasite.editEntity({}, data, summary=u'ๅปบ็ซๆฐ้
็ฎไธฆ้ฃ็ต')
print(item['entity']['id'])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('studio')
args = parser.parse_args()
main(args.studio)
| mit | Python |
|
9b572d4f53b23f3dc51dbfb98d46d0daa68d3569 | fix pep8 on core admin profile | YACOWS/opps,YACOWS/opps,jeanmask/opps,williamroot/opps,opps/opps,opps/opps,YACOWS/opps,opps/opps,jeanmask/opps,YACOWS/opps,opps/opps,jeanmask/opps,jeanmask/opps,williamroot/opps,williamroot/opps,williamroot/opps | opps/core/admin/profile.py | opps/core/admin/profile.py | # -*- coding: utf-8 -*-
from django.contrib import admin
from opps.core.models import Profile
class ProfileAdmin(admin.ModelAdmin):
pass
admin.site.register(Profile, ProfileAdmin)
| # -*- coding: utf-8 -*-
from django.contrib import admin
from opps.core.models import Profile
class ProfileAdmin(admin.ModelAdmin):
pass
admin.site.register(Profile, ProfileAdmin)
| mit | Python |
419e001591566df909b03ffd0abff12171b62491 | Create binary_search_iter.py | lironhp/animated-octo-doodle | binary_search_iter.py | binary_search_iter.py | #GLOBALS
#=======
FIRST_IDX = 0
def chop(number, int_list):
list_size = length(int_list)
start_idx = FIRST_IDX
end_idx = list_size-1
current_idx = end_idx/2
itr_counter = list_size
while itr_counter>0:
current_value = int_list[current_idx]
if current_value == number:
return current_idx
else if current_value > number:
end_idx = current_idx - 1
else if current_value < number:
start_idx = current_idx+1
current_idx = (end_idx + start_idx)/2
itr_counter /=2
if __name__=="__main__":
| mit | Python |
|
6c4c26f5383740257b8bca56ce1ea9011053aff6 | add new package : keepalived (#14463) | LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,LLNL/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,iulian787/spack | var/spack/repos/builtin/packages/keepalived/package.py | var/spack/repos/builtin/packages/keepalived/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Keepalived(AutotoolsPackage):
"""
Keepalived implements a set of checkers to dynamically and adaptively
maintain and manage loadbalanced server pool according their health
"""
homepage = "http://www.keepalived.org"
url = "http://www.keepalived.org/software/keepalived-1.2.0.tar.gz"
version('2.0.19', sha256='0e2f8454765bc6a5fa26758bd9cec18aae42882843cdd24848aff0ae65ce4ca7')
version('2.0.18', sha256='1423a2b1b8e541211029b9e1e1452e683bbe5f4b0b287eddd609aaf5ff024fd0')
version('2.0.17', sha256='8965ffa2ffe243014f9c0245daa65f00a9930cf746edf33525d28a86f97497b4')
version('2.0.16', sha256='f0c7dc86147a286913c1c2c918f557735016285d25779d4d2fce5732fcb888df')
version('2.0.15', sha256='933ee01bc6346aa573453b998f87510d3cce4aba4537c9642b24e6dbfba5c6f4')
version('2.0.14', sha256='1bf586e56ee38b47b82f2a27b27e04d0e5b23f1810db6a8e801bde9d3eb8617b')
version('2.0.13', sha256='c7fb38e8a322fb898fb9f6d5d566827a30aa5a4cd1774f474bb4041c85bcbc46')
version('2.0.12', sha256='fd50e433d784cfd948de5726752cf89ab7001f587fe10a5110c6c7cbda4b7b5e')
version('2.0.11', sha256='a298b0c02a20959cfc365b62c14f45abd50d5e0595b2869f5bce10ec2392fa48')
depends_on('openssl', type='build')
| lgpl-2.1 | Python |
|
5ef097bc394ef5be9b723ca0732bb842ab82e9e1 | Include app.wsgi into repository as an example #8 | reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/ActiveDriverDB,reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/ActiveDriverDB,reimandlab/ActiveDriverDB,reimandlab/ActiveDriverDB,reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/Visualisation-Framework-for-Genome-Mutations | website/app.wsgi | website/app.wsgi | import sys
from pathlib import Path
path = Path(__file__)
# when moving virual environment, update following line
venv_location = str(path.parents[2])
# in Python3 there is no builtin execfile shortcut - let's define one
def execfile(filename):
globals = dict( __file__ = filename)
exec(open(filename).read(), globals)
# add application directory to execution path
sys.path.insert(0, str(path.parent))
sys.path.insert(0, str(path.parents[1]))
# activate virual environment
activate_this = venv_location + '/virtual_environment/bin/activate_this.py'
execfile(activate_this)
# import application to serve
from app import app as application
| lgpl-2.1 | Python |
|
fb0f5340d9dcd28725f43dc3b7f93def78bdab92 | Add serialization tests for TMRegion | rcrowder/nupic,lscheinkman/nupic,vitaly-krugl/nupic,rhyolight/nupic,rcrowder/nupic,rhyolight/nupic,vitaly-krugl/nupic,rcrowder/nupic,vitaly-krugl/nupic,alfonsokim/nupic,neuroidss/nupic,ywcui1990/nupic,alfonsokim/nupic,scottpurdy/nupic,neuroidss/nupic,neuroidss/nupic,alfonsokim/nupic,ywcui1990/nupic,scottpurdy/nupic,subutai/nupic,scottpurdy/nupic,rhyolight/nupic,subutai/nupic,numenta/nupic,numenta/nupic,lscheinkman/nupic,subutai/nupic,lscheinkman/nupic,numenta/nupic,ywcui1990/nupic | tests/unit/nupic/regions/tm_region_test.py | tests/unit/nupic/regions/tm_region_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""TMRegion unit tests."""
import tempfile
import unittest
import numpy as np
from nupic.regions.tm_region import TMRegion
from nupic.regions.tm_region_capnp import TMRegionProto
class TMRegionTest(unittest.TestCase):
def checkTMRegionImpl(self, impl):
output1 = {
"bottomUpOut": np.zeros((40,)),
"topDownOut": np.zeros((10,)),
"activeCells": np.zeros((40,)),
"predictedActiveCells": np.zeros((40,)),
"anomalyScore": np.zeros((1,)),
"lrnActiveStateT": np.zeros((40,)),
}
output2 = {
"bottomUpOut": np.zeros((40,)),
"topDownOut": np.zeros((10,)),
"activeCells": np.zeros((40,)),
"predictedActiveCells": np.zeros((40,)),
"anomalyScore": np.zeros((1,)),
"lrnActiveStateT": np.zeros((40,)),
}
a = np.zeros(10, dtype="int32")
a[[1, 3, 7]] = 1
b = np.zeros(10, dtype="int32")
b[[2, 4, 8]] = 1
inputA = {
"bottomUpIn": a,
"resetIn": np.zeros(1),
"sequenceIdIn": np.zeros(1),
}
inputB = {
"bottomUpIn": b,
"resetIn": np.zeros(1),
"sequenceIdIn": np.zeros(1),
}
region1 = TMRegion(10, 10, 4, temporalImp=impl)
region1.initialize()
region1.compute(inputA, output1)
proto1 = TMRegionProto.new_message()
region1.writeToProto(proto1)
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = TMRegionProto.read(f)
region2 = TMRegion.readFromProto(proto2)
region1.compute(inputB, output1)
region2.compute(inputB, output2)
self.assertTrue(np.array_equal(output1["bottomUpOut"],
output2["bottomUpOut"]))
self.assertTrue(np.array_equal(output1["topDownOut"],
output2["topDownOut"]))
self.assertTrue(np.array_equal(output1["activeCells"],
output2["activeCells"]))
self.assertTrue(np.array_equal(output1["predictedActiveCells"],
output2["predictedActiveCells"]))
self.assertTrue(np.array_equal(output1["anomalyScore"],
output2["anomalyScore"]))
self.assertTrue(np.array_equal(output1["lrnActiveStateT"],
output2["lrnActiveStateT"]))
def testWriteReadPy(self):
self.checkTMRegionImpl("py")
def testWriteReadCpp(self):
self.checkTMRegionImpl("cpp")
def testWriteReadTMPy(self):
self.checkTMRegionImpl("tm_py")
def testWriteReadTMCpp(self):
self.checkTMRegionImpl("tm_cpp")
if __name__ == "__main__":
unittest.main()
| agpl-3.0 | Python |
|
8d6676f2e19ab9df01c681b6590c6f4adb0f938c | add profile model | ben-cunningham/pybot,ben-cunningham/python-messenger-bot | fbmsgbot/models/profile.py | fbmsgbot/models/profile.py | class Profile():
def __init__(self, **kwargs):
self.first_name = kwargs['first_name']
self.last_name = kwargs['last_name']
self.profile_pic = kwargs['profile_pic']
self.locale = kwargs['locale']
self.timezone = kwargs['timezone']
self.gender = kwargs['gender']
| mit | Python |
|
519b141349b4d39902416be560b989160d48b141 | add echo_delay to estimate the delay between two wav files | xiongyihui/tdoa,xiongyihui/tdoa | echo_delay.py | echo_delay.py |
import sys
import wave
import numpy as np
from gcc_phat import gcc_phat
if len(sys.argv) != 3:
print('Usage: {} near.wav far.wav'.format(sys.argv[0]))
sys.exit(1)
near = wave.open(sys.argv[1], 'rb')
far = wave.open(sys.argv[2], 'rb')
rate = near.getframerate()
N = rate
window = np.hanning(N)
while True:
sig = near.readframes(N)
if len(sig) != 2 * N:
break
ref = far.readframes(N)
sig_buf = np.fromstring(sig, dtype='int16')
ref_buf = np.fromstring(ref, dtype='int16')
tau = gcc_phat(sig_buf * window, ref_buf * window, fs=rate, max_tau=1)
# tau = gcc_phat(sig_buf, ref_buf, fs=rate, max_tau=1)
print(tau * 1000)
| apache-2.0 | Python |
|
c71924d4baea473a36f0c22f0878fea7a9ff2800 | Create constants.py | a2monkey/boatbot | a2/constants.py | a2/constants.py | import re
import time
#first link to view the cruise
base_link = 'https://www.princess.com/find/cruiseDetails.do?voyageCode=2801'
#element to find
button_element = 'md-hidden'
#gets the current time
time = time.strftime('%I:%M:%S')
forming = 'building request'
seperator = 'โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ'
#xpath
# //*[contains(@class, 'col-pax-item selectable-blue-arrow col-xs-12 col-xs-pad-0 clearfix'), (@button, '')]
#//button[@data-num-pax="4"]/text()
#//button[@data-num-pax="4"]
# //*[contains(@class, 'col-pax-item selectable-blue-arrow col-xs-12 col-xs-pad-0 clearfix')//[contains(@button[contains(text(),'4')])]]
| mit | Python |
|
3dcd012977d4dfea69ec4a51650ac9a4fd375842 | add missing migration file | pythonkr/pyconapac-2016,pythonkr/pyconapac-2016,pythonkr/pyconapac-2016 | registration/migrations/0007_auto_20160416_1217.py | registration/migrations/0007_auto_20160416_1217.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-04-16 03:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registration', '0006_auto_20160416_1202'),
]
operations = [
migrations.AlterField(
model_name='registration',
name='payment_message',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| mit | Python |
|
64d675304b2d66d89e55dcff167d1dd20e6b000c | add fragment molecule class for monte carlo simulation | KEHANG/AutoFragmentModeling | afm/molecule.py | afm/molecule.py |
class FragmentMolecule(object):
def __init__(self, composition):
self.composition = composition
def __str__(self):
"""
Return a string representation.
"""
return self.composition
| mit | Python |
|
f3c2b9087a06b508a278cb8e6f79200caae1ac07 | Add a tool to encode udot instructions in asm code so we compile on any toolchain. | google/gemmlowp,google/gemmlowp,google/gemmlowp,google/gemmlowp | standalone/encode.py | standalone/encode.py | import sys
import re
def encode_udot_vector(line):
m = re.search(
r'\budot[ ]+v([0-9]+)[ ]*.[ ]*4s[ ]*,[ ]*v([0-9]+)[ ]*.[ ]*16b[ ]*,[ ]*v([0-9]+)[ ]*.[ ]*16b',
line)
if not m:
return 0, line
match = m.group(0)
accum = int(m.group(1))
lhs = int(m.group(2))
rhs = int(m.group(3))
assert accum >= 0 and accum <= 31
assert lhs >= 0 and lhs <= 31
assert rhs >= 0 and rhs <= 31
mcode = 0x6e809400 | (accum << 0) | (lhs << 5) | (rhs << 16)
return mcode, match
def encode_udot_element(line):
m = re.search(
r'\budot[ ]+v([0-9]+)[ ]*.[ ]*4s[ ]*,[ ]*v([0-9]+)[ ]*.[ ]*16b[ ]*,[ ]*v([0-9]+)[ ]*.[ ]*4b[ ]*\[([0-9])\]',
line)
if not m:
return 0, line
match = m.group(0)
accum = int(m.group(1))
lhs = int(m.group(2))
rhs = int(m.group(3))
lanegroup = int(m.group(4))
assert accum >= 0 and accum <= 31
assert lhs >= 0 and lhs <= 31
assert rhs >= 0 and rhs <= 31
assert lanegroup >= 0 and lanegroup <= 3
l = 1 if lanegroup & 1 else 0
h = 1 if lanegroup & 2 else 0
mcode = 0x6f80e000 | (accum << 0) | (lhs << 5) | (rhs << 16) | (l << 21) | (
h << 11)
return mcode, match
def encode(line):
mcode, match = encode_udot_vector(line)
if mcode:
return mcode, match
mcode, match = encode_udot_element(line)
if mcode:
return mcode, match
return 0, line
for line in sys.stdin:
mcode, match = encode(line)
if mcode:
line = line.replace(match, '.word 0x%x // %s' % (mcode, match))
sys.stdout.write(line)
| apache-2.0 | Python |
|
74c23aff06485f323c45b24e7e3784dd3c72d576 | Create dokEchoServer.py | Dokument/BM_Echo | dokEchoServer.py | dokEchoServer.py | #!/usr/bin/env python2.7
# Created by Adam Melton (.dok) referenceing https://bitmessage.org/wiki/API_Reference for API documentation
# Distributed under the MIT/X11 software license. See the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# This is an example of an echo server for PyBitmessage 0.3.0, by .dok (Version 0.2.1)
import ConfigParser
import xmlrpclib
import getopt
import json
import sys
import time
from time import strftime, gmtime
versionNo = 2.1
def logEcho(recTime,bmAddress):
global versionNo
config = ConfigParser.RawConfigParser()
echoLogFile = 'EchoLog.dat'
config.read(echoLogFile)
try: #try to open the file
config.get('EchoServer','processedTotal')
except:# if it fails, then initialize the EchoLog.dat file since this is the first time running the program
print 'Initializing EchoLog.dat'
config.add_section('EchoServer')
config.add_section('EchoLogs')
config.set('EchoServer','versionNumber',str(versionNo))
config.set('EchoServer','processedTotal','0')
processedTotal = int(config.get('EchoServer','processedTotal'))
processedTotal = processedTotal + 1
config.set('EchoServer','processedTotal',str(processedTotal)) #echo count
config.set('EchoLogs',recTime,bmAddress) #message information
with open(echoLogFile, 'wb') as configfile: #updates the total number of processed messages
config.write(configfile)
print 'Echo successfully logged.'
def processEcho():
api = xmlrpclib.ServerProxy("http://echo:echoPassword@localhost:8442/") #Connect to BitMessage using these api credentials
print 'Loaded from API successfully.'
timeStamp = gmtime() #set the timestamp before processing (to get the most accurate time)
inboxMessages = json.loads(api.getAllInboxMessages()) #Parse json data in to python data structure
print 'Loaded all inbox messages for processing.'
newestMessage = (len(inboxMessages['inboxMessages']) - 1) #Find the newest message (the only to echo)
replyAddress = inboxMessages['inboxMessages'][newestMessage]['fromAddress'] #Get the return address
myAddress = inboxMessages['inboxMessages'][newestMessage]['toAddress'] #Get my address
subject = 'ECHO'.encode('base64') #Set the subject
print 'Loaded and parsed data. Ready to build outgoing message.'
message = inboxMessages['inboxMessages'][newestMessage]['message'].decode('base64') #Gets the message sent by the user
if (len(message) > 256):
message = (message[:256] + '... Truncated to 256 characters.\n')
echoMessage = ('Message successfully received at ' + strftime("%Y-%m-%d %H:%M:%S",timeStamp) + ' UTC/GMT.\n' + '-------------------------------------------------------------------------------\n' + message + '\n\n\nThank you for using EchoServer. For More information about the BitMessage project, please visit https://BitMessage.org\n\nFeel free to contact me with questions or comments: BM-or9zPodxMUmkrmVVGCSV9xT1AibwdTFK9 \n.dok')
echoMessage = echoMessage.encode('base64') #Encode the message.
print 'Message built, ready to send. Sending...'
api.sendMessage(replyAddress,myAddress,subject,echoMessage) #build the message and send it
print 'Sent.'
print 'Begin logging echo.'
logEcho(strftime("%Y_%m_%d-%H_%M_%S",timeStamp), replyAddress) #Logs the echo to the EchoLog.dat file
if (newestMessage > 25): #Delete oldest message, trying to keep only 25 messages in the inbox. Only deletes one so it won't clear out your inbox (you can do that).
msgId = inboxMessages['inboxMessages'][0]['msgid'] #gets the message ID of the oldest message in the inbox (index 0)
api.trashMessage(msgId)
print 'Oldest message deleted.'
def main():
arg = sys.argv[1]
if arg == "startingUp":
sys.exit()
elif arg == "newMessage":
processEcho() #carries out the echo
print 'Done.'
sys.exit() #Done, exit
elif arg == "newBroadcast":
sys.exit()
else:
assert False, "unhandled option"
sys.exit() #Not a relevant argument, exit
if __name__ =="__main__":
main()
| mit | Python |
|
fb8b1d7cb6e98e97fb383ca7457cb1cd237f8184 | Add usernamer.py | SeattleCentral/ITC110 | examples/username.py | examples/username.py | # Madenning Username Generator
# Returns first char of first name and first 7 chars of last name
def usernamer(first_name, last_name):
username = first_name[0] + last_name[:7]
return username.lower()
if __name__ == '__main__':
# Testing
assert usernamer("Joshua", "Wedekind") == "jwedekin"
first_name = input("Enter first name: ")
last_name = input("Enter last name: ")
print(usernamer(first_name, last_name))
| mit | Python |
|
1417d5345d68ef67ba6e832bbc45b8f0ddd911bc | Create testTemplate.py | lilsweetcaligula/Algorithms,lilsweetcaligula/Algorithms,lilsweetcaligula/Algorithms | data_structures/linked_list/utils/testTemplate.py | data_structures/linked_list/utils/testTemplate.py | # A test template for Python solutions.
import sys
def TestMain(sol, log=sys.stdout, doNotLogPassed=True) -> bool:
"""
@param sol: the function to be tested.
@param log: a stream or a file to log the tester output to.
@param doNotLogPassed: if True, all successful tests will not be logged.
@return: True if all tests in the TESTS array were successful, False otherwise.
All tester functions should follow the signature
of the TestMain function.
"""
def TestPredefined(solution: function, log):
raise NotImplementedError()
# Please add all tester functions to the TESTS tuple.
TESTS = (TestPredefined, )
areAllPassed = True
for Test in TESTS:
if not Test(solution, log):
areAllPassed = False
return areAllPassed
| mit | Python |
|
3ea318cf5c1b66106bf496d513efdd6e86d0f665 | add vowpal_wabbit requirement installation | braincorp/robustus,braincorp/robustus | robustus/detail/install_vowpal_wabbit.py | robustus/detail/install_vowpal_wabbit.py | # =============================================================================
# COPYRIGHT 2013 Brain Corporation.
# License under MIT license (see LICENSE file)
# =============================================================================
import logging
import os
from requirement import RequirementException
from utility import unpack, safe_remove, run_shell, ln
import shutil
import subprocess
def install(robustus, requirement_specifier, rob_file, ignore_index):
cwd = os.getcwd()
os.chdir(robustus.cache)
install_dir = os.path.join(robustus.cache, 'vowpal_wabbit-%s' % requirement_specifier.version)
# try to download precompiled Vowpal Wabbit from the remote cache first
if not os.path.isdir(install_dir) and not ignore_index:
wabbit_archive = robustus.download_compiled_archive('vowpal_wabbit', requirement_specifier.version)
if wabbit_archive is not None:
unpack(wabbit_archive)
logging.info('Initializing compiled vowpal_wabbit')
# install into wheelhouse
if not os.path.exists(install_dir):
raise RequirementException("Failed to unpack precompiled vowpal_wabbit archive")
if not os.path.isdir(install_dir) and not ignore_index:
archive_name = '%s.tar.gz' % requirement_specifier.version # e.g. "7.7.tar.gz"
if os.path.exists(archive_name):
safe_remove(archive_name)
# move sources to a folder in order to use a clean name for installation
src_dir = 'vowpal_wabbit-%s' % requirement_specifier.version
if os.path.exists(src_dir):
safe_remove(src_dir)
run_shell(['wget', 'https://github.com/JohnLangford/vowpal_wabbit/archive/%s' % (archive_name,)],
verbose=robustus.settings['verbosity'] >= 1)
run_shell(['tar', 'zxvf', archive_name],
verbose=robustus.settings['verbosity'] >= 1)
if os.path.exists(src_dir+'_src'):
safe_remove(src_dir+'_src')
shutil.move(src_dir, src_dir+'_src')
src_dir += '_src'
os.chdir(src_dir)
if os.path.exists(install_dir):
safe_remove(install_dir)
os.mkdir(install_dir)
retcode = run_shell(['make'], verbose=robustus.settings['verbosity'] >= 1)
if retcode:
raise RequirementException('Failed to compile Vowpal Wabbit')
retcode = run_shell('make install', shell=True)
if retcode:
raise RequirementException('Failed install Vowpal Wabbit')
os.chdir(robustus.cache)
shutil.rmtree(src_dir)
venv_install_folder = os.path.join(robustus.env, 'vowpal_wabbit')
if os.path.exists(venv_install_folder):
safe_remove(venv_install_folder)
shutil.copytree(install_dir, venv_install_folder)
executable_path = os.path.join(install_dir, 'bin', 'vw')
ln(executable_path, os.path.join(robustus.env, 'bin', 'vw'), force=True)
os.chdir(cwd)
# now install python part
robustus.install_through_wheeling(requirement_specifier, rob_file, ignore_index)
| mit | Python |
|
30567284410b9bb7154b8d39e5dfe7bc4bb1b269 | Add migration for on_delete SET_NULL | worthwhile/django-herald,jproffitt/django-herald,jproffitt/django-herald,worthwhile/django-herald | herald/migrations/0006_auto_20170825_1813.py | herald/migrations/0006_auto_20170825_1813.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2017-08-25 23:13
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('herald', '0005_merge_20170407_1316'),
]
operations = [
migrations.AlterField(
model_name='sentnotification',
name='user',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
]
| mit | Python |
|
05ecbd6ea7692ac85a96b35a39ca4609f0a88d86 | Create gapminder_data_analysis.py | duttashi/Data-Analysis-Visualization | gapminder_data_analysis.py | gapminder_data_analysis.py | # Importing the required libraries
# Note %matplotlib inline works only for ipython notebook. It will not work for PyCharm. It is used to show the plot distributions
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(color_codes=True)
# Reading the data where low_memory=False increases the program efficiency
data= pd.read_csv("gapminder.csv", low_memory=False)
# setting variables that you will be working with to numeric
data['breastcancerper100th']= data['breastcancerper100th'].convert_objects(convert_numeric=True)
data['femaleemployrate']= data['femaleemployrate'].convert_objects(convert_numeric=True)
data['alcconsumption']= data['alcconsumption'].convert_objects(convert_numeric=True)
# shows the number of rows and columns
print (len(data))
print (len(data.columns))
print (len(data.index))
# Print the column headers/headings
names=data.columns.values
print names
# using the describe function to get the standard deviation and other descriptive statistics of our variables
desc1=data['breastcancerper100th'].describe()
desc2=data['femaleemployrate'].describe()
desc3=data['alcconsumption'].describe()
print "\nBreast Cancer per 100th person\n", desc1
print "\nfemale employ rate\n", desc2
print "\nAlcohol consumption in litres\n", desc3
data.describe()
# Show the frequency distribution
print "\nAlcohol Consumption\nFrequency Distribution (in %)"
c1=data['alcconsumption'].value_counts(sort=False,dropna=False)
print c1
print "\nBreast Cancer per 100th"
c2=data['breastcancerper100th'].value_counts(sort=False)
print c2
print "\nFemale Employee Rate"
c3=data['femaleemployrate'].value_counts(sort=False)
print c3
# Show the frequency distribution of the quantitative variable using the groupby function
ac1=data.groupby('alcconsumption').size()
print "ac1\n",ac1
# Creating a subset of the data
sub1=data[(data['femaleemployrate']>40) & (data['alcconsumption']>=20)& (data['breastcancerper100th']<50)]
# creating a copy of the subset. This copy will be used for subsequent analysis
sub2=sub1.copy()
print "\nContries where Female Employee Rate is greater than 40 &" \
" Alcohol Consumption is greater than 20L & new breast cancer cases reported are less than 50\n"
print sub2
print "\nContries where Female Employee Rate is greater than 50 &" \
" Alcohol Consumption is greater than 10L & new breast cancer cases reported are greater than 70\n"
sub3=data[(data['alcconsumption']>10)&(data['breastcancerper100th']>70)&(data['femaleemployrate']>50)]
print sub3
# Checking for missing values in the data row-wise
print "Missing data rows count: ",sum([True for idx,row in data.iterrows() if any(row.isnull())])
# Checking for missing values in the data column-wise
print "Showing missing data coulmn-wise"
print data.isnull().sum()
# Create a copy of the original dataset as sub4 by using the copy() method
sub4=data.copy()
# Now showing the count of null values in the variables
print sub4.isnull().sum()
# Since the data is all continuous variables therefore the use the mean() for missing value imputation
# if dealing with categorical data, than use the mode() for missing value imputation
sub4.fillna(sub4['breastcancerper100th'].mean(), inplace=True)
sub4.fillna(sub4['femaleemployrate'].mean(), inplace=True)
sub4.fillna(sub4['alcconsumption'].mean(), inplace=True)
# Showing the count of null values after imputation
print sub4.isnull().sum()
# categorize quantitative variable based on customized splits using the cut function
sub4['alco']=pd.qcut(sub4.alcconsumption,6,labels=["0","1-4","5-9","10-14","15-19","20-24"])
sub4['brst']=pd.qcut(sub4.breastcancerper100th,5,labels=["1-20","21-40","41-60","61-80","81-90"])
sub4['emply']=pd.qcut(sub4.femaleemployrate,4,labels=["30-39","40-59","60-79","80-90"])
# Showing the frequency distribution of the categorised quantitative variables
print "Frequency distribution of the categorized quantitative variables\n"
fd1=sub4['alco'].value_counts(sort=False,dropna=False)
fd2=sub4['brst'].value_counts(sort=False,dropna=False)
fd3=sub4['emply'].value_counts(sort=False,dropna=False)
print "Alcohol Consumption\n",fd1
print "\n------------------------\n"
print "Breast Cancer per 100th\n",fd2
print "\n------------------------\n"
print "Female Employee Rate\n",fd3
print "\n------------------------\n"
# Now plotting the univariate quantitative variables using the distribution plot
sub5=sub4.copy()
sns.distplot(sub5['alcconsumption'].dropna(),kde=True)
plt.xlabel('Alcohol consumption in litres')
plt.title('Breast cancer in working class women')
plt.show() # Note: Although there is no need to use the show() method for ipython notebook as %matplotlib inline does the trick but
#I am adding it here because matplotlib inline does not work for an IDE like Pycharm and for that i need to use plt.show
sns.distplot(sub5['breastcancerper100th'].dropna(),kde=True)
plt.xlabel('Breast cancer per 100th women')
plt.title('Breast cancer in working class women')
plt.show()
sns.distplot(sub5['femaleemployrate'].dropna(),kde=True)
plt.xlabel('Female employee rate')
plt.title('Breast cancer in working class women')
plt.show()
# using scatter plot the visulaize quantitative variable.
# if categorical variable then use histogram
scat1= sns.regplot(x='alcconsumption', y='breastcancerper100th', data=data)
plt.xlabel('Alcohol consumption in liters')
plt.ylabel('Breast cancer per 100th person')
plt.title('Scatterplot for the Association between Alcohol Consumption and Breast Cancer 100th person')
scat2= sns.regplot(x='femaleemployrate', y='breastcancerper100th', data=data)
plt.xlabel('Female Employ Rate')
plt.ylabel('Breast cancer per 100th person')
plt.title('Scatterplot for the Association between Female Employ Rate and Breast Cancer per 100th Rate')
| mit | Python |
|
1999295556ba404c7542d2001d7fdca80de54b5f | update api | danielecook/Genomic-API-lambda,danielecook/Genomic-API-lambda,danielecook/Genomic-API-lambda,danielecook/Genomic-API-lambda | functions/bcftools/main.py | functions/bcftools/main.py | """
Lambda example with external dependency
"""
import logging
from subprocess import Popen, PIPE
import json
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def return_msg(out, err, status = 200):
return {
'statusCode': status,
'body': json.dumps({"out": out, "err": err}),
'headers': {
'Content-Type': 'application/json',
}
}
def handle(event, context):
logger.info("%s ------ %s", event, context)
if 'body' not in event:
return return_msg(None, "Error: must specify VCF and region", 400)
body = event['body']
if 'vcf' not in body:
return return_msg(None, "Error: must specify VCF and region", 400)
logger.info("%s", event['body'])
out, err = Popen(["./bcftools"], stdout = PIPE, stderr = PIPE).communicate()
logger.info(out + " out")
logger.info(err + " err")
return return_msg(out, err, 200)
| mit | Python |
|
f3c4bac262c6d09730b3f0c4a24639fde8b4d923 | Add wsgi compatible example gunicorn application | voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts | gunicorn-app.py | gunicorn-app.py | from __future__ import unicode_literals
import multiprocessing
import gunicorn.app.base
from gunicorn.six import iteritems
def number_of_workers():
return (multiprocessing.cpu_count() * 2) + 1
def handler_app(environ, start_response):
response_body = b'Works fine'
status = '200 OK'
response_headers = [
('Content-Type', 'text/plain'),
]
start_response(status, response_headers)
return [response_body]
class StandaloneApplication(gunicorn.app.base.BaseApplication):
def __init__(self, app, options=None):
self.options = options or {}
self.application = app
super(StandaloneApplication, self).__init__()
def load_config(self):
config = dict([(key, value) for key, value in iteritems(self.options)
if key in self.cfg.settings and value is not None])
for key, value in iteritems(config):
self.cfg.set(key.lower(), value)
def load(self):
return self.application
if __name__ == '__main__':
options = {
'bind': '%s:%s' % ('127.0.0.1', '8080'),
'workers': number_of_workers(),
}
StandaloneApplication(handler_app, options).run()
| mit | Python |
|
8d8522c95492f034db2a43e95a6c9cd3fb60c798 | Create glove2word2vec.py | manasRK/glove-gensim | glove2word2vec.py | glove2word2vec.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Manas Ranjan Kar <manasrkar91@gmail.com>
# Licensed under the MIT License https://opensource.org/licenses/MIT
"""
CLI USAGE: python glove2word2vec.py <GloVe vector file> <Output model file>
Convert GloVe vectors into Gensim compatible format to instantiate from an existing file on disk in the word2vec C format;
model = gensim.models.Word2Vec.load_word2vec_format('/tmp/vectors.txt', binary=False) # C text format
word2vec embeddings start with a line with the number of lines (tokens?) and the number of dimensions of the file. This allows gensim to allocate memory
accordingly for querying the model. Larger dimensions mean larger memory is held captive. Accordingly, this line has to be inserted into the GloVe
embeddings file.
"""
import re
import sys
import gensim
import smart_open
def glove2word2vec(glove_vector_file,output_model_file):
def get_info(glove_file_name):
"""
Function to calculate the number of lines and dimensions of the GloVe vectors to make it Gensim compatible
"""
num_lines = sum(1 for line in smart_open.smart_open(glove_vector_file))
if 'twitter' in glove_file_name:
dims= re.findall('\d+',glove_vector_file.split('.')[3])
dims=''.join(dims)
else:
dims=re.findall('\d+',glove_vector_file.split('.')[2])
dims=''.join(dims)
return num_lines,dims
def prepend_line(infile, outfile, line):
"""
Function to prepend lines using smart_open
"""
with smart_open.smart_open(infile, 'rb') as old:
with smart_open.smart_open(outfile, 'wb') as new:
new.write(str(line) + "\n")
for line in old:
new.write(line)
return outfile
num_lines,dims=get_info(glove_vector_file)
gensim_first_line = "{} {}".format(num_lines, dims)
print '%s lines with %s dimensions' %(num_lines,dims)
model_file=prepend_line(glove_vector_file,output_model_file,gensim_first_line)
# Demo: Loads the newly created glove_model.txt into gensim API.
model=gensim.models.Word2Vec.load_word2vec_format(model_file,binary=False) #GloVe Model
print 'Most similar to king are: ', model.most_similar(positive=['king'], topn=10)
print 'Similarity score between woman and man is: ', model.similarity('woman', 'man')
print 'Model %s successfully created !!'%output_model_file
return model_file
if __name__ == "__main__":
glove_vector_file=sys.argv[1]
output_model_file=sys.argv[2]
glove2word2vec(glove_vector_file,output_model_file)
| mit | Python |
|
c718cf1d483b2570b886269cf990458b195500b5 | Remove Access-Control-Allow-Origin after all | gratipay/gratipay.com,mccolgst/www.gittip.com,studio666/gratipay.com,studio666/gratipay.com,eXcomm/gratipay.com,eXcomm/gratipay.com,studio666/gratipay.com,gratipay/gratipay.com,gratipay/gratipay.com,mccolgst/www.gittip.com,studio666/gratipay.com,gratipay/gratipay.com,eXcomm/gratipay.com,eXcomm/gratipay.com,mccolgst/www.gittip.com,mccolgst/www.gittip.com | gratipay/utils/cache_static.py | gratipay/utils/cache_static.py | """
Handles caching of static resources.
"""
from base64 import b64encode
from hashlib import md5
from aspen import Response
ETAGS = {}
def asset_etag(path):
if path.endswith('.spt'):
return ''
if path in ETAGS:
h = ETAGS[path]
else:
with open(path) as f:
h = ETAGS[path] = b64encode(md5(f.read()).digest(), '-_').replace('=', '~')
return h
# algorithm functions
def get_etag_for_file(dispatch_result):
return {'etag': asset_etag(dispatch_result.match)}
def try_to_serve_304(website, dispatch_result, request, etag):
"""Try to serve a 304 for static resources.
"""
if not etag:
# This is a request for a dynamic resource.
return
qs_etag = request.line.uri.querystring.get('etag')
if qs_etag and qs_etag != etag:
# Don't serve one version of a file as if it were another.
raise Response(410)
headers_etag = request.headers.get('If-None-Match')
if not headers_etag:
# This client doesn't want a 304.
return
if headers_etag != etag:
# Cache miss, the client sent an old or invalid etag.
return
# Huzzah!
# =======
# We can serve a 304! :D
raise Response(304)
def add_caching_to_response(website, response, request=None, etag=None):
"""Set caching headers for static resources.
"""
if etag is None:
return
assert request is not None # sanity check
if response.code not in (200, 304):
return
# https://developers.google.com/speed/docs/best-practices/caching
response.headers['Vary'] = 'accept-encoding'
response.headers['Etag'] = etag
if request.line.uri.querystring.get('etag'):
# We can cache "indefinitely" when the querystring contains the etag.
response.headers['Cache-Control'] = 'public, max-age=31536000'
else:
# Otherwise we cache for 5 seconds
response.headers['Cache-Control'] = 'public, max-age=5'
| """
Handles caching of static resources.
"""
from base64 import b64encode
from hashlib import md5
from aspen import Response
ETAGS = {}
def asset_etag(path):
if path.endswith('.spt'):
return ''
if path in ETAGS:
h = ETAGS[path]
else:
with open(path) as f:
h = ETAGS[path] = b64encode(md5(f.read()).digest(), '-_').replace('=', '~')
return h
# algorithm functions
def get_etag_for_file(dispatch_result):
return {'etag': asset_etag(dispatch_result.match)}
def try_to_serve_304(website, dispatch_result, request, etag):
"""Try to serve a 304 for static resources.
"""
if not etag:
# This is a request for a dynamic resource.
return
qs_etag = request.line.uri.querystring.get('etag')
if qs_etag and qs_etag != etag:
# Don't serve one version of a file as if it were another.
raise Response(410)
headers_etag = request.headers.get('If-None-Match')
if not headers_etag:
# This client doesn't want a 304.
return
if headers_etag != etag:
# Cache miss, the client sent an old or invalid etag.
return
# Huzzah!
# =======
# We can serve a 304! :D
raise Response(304)
def add_caching_to_response(website, response, request=None, etag=None):
"""Set caching headers for static resources.
"""
if etag is None:
return
assert request is not None # sanity check
if response.code not in (200, 304):
return
# https://developers.google.com/speed/docs/best-practices/caching
response.headers['Vary'] = 'accept-encoding'
response.headers['Etag'] = etag
if 'Access-Control-Allow-Origin' not in response.headers:
response.headers['Access-Control-Allow-Origin'] = 'https://gratipay.com'
if request.line.uri.querystring.get('etag'):
# We can cache "indefinitely" when the querystring contains the etag.
response.headers['Cache-Control'] = 'public, max-age=31536000'
else:
# Otherwise we cache for 5 seconds
response.headers['Cache-Control'] = 'public, max-age=5'
| mit | Python |
2f6bfddbff166115e59db7763a62258a06b4e789 | Apply orphaned migration | barberscore/barberscore-api,barberscore/barberscore-api,dbinetti/barberscore,barberscore/barberscore-api,dbinetti/barberscore,dbinetti/barberscore-django,dbinetti/barberscore-django,barberscore/barberscore-api | project/apps/api/migrations/0010_remove_chart_song.py | project/apps/api/migrations/0010_remove_chart_song.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0009_auto_20150722_1041'),
]
operations = [
migrations.RemoveField(
model_name='chart',
name='song',
),
]
| bsd-2-clause | Python |
|
b3ab8fa855a08f0d63885b6df206715d1f36a817 | Add DNS-over-HTTPS example script | SpotlightKid/micropython-stm-lib | mrequests/examples/dns-over-https.py | mrequests/examples/dns-over-https.py | import mrequests
from urlencode import urlencode
DOH_IP = "1.1.1.1"
DOH_SERVER = b"cloudflare-dns.com"
DOH_PATH = "/dns-query"
def gethostbyname(name):
params = urlencode({
"name": name,
"type": "A"
})
headers = {
b"accept": b"application/dns-json",
b"user-agent": b"mrequests.py",
b"Host": DOH_SERVER
}
req = mrequests.get(
"https://{}{}?{}".format(DOH_IP, DOH_PATH, params),
headers=headers
)
# ~ print(req.status_code)
if req.status == 200:
reply = req.json()
else:
reply = {}
req.close()
if reply.get("Status") == 0:
return [item["data"] for item in reply.get("Answer", [])]
if __name__ == '__main__':
import sys
#name = sys.argv[1]
name = "httpbin.org"
res = gethostbyname(name)
if res:
print(" ".join(res))
else:
print("Could not resolve host name '{}'.".format(name), file=sys.stderr)
| mit | Python |
|
4f765997c740f1f9b2dc985e7f3b0a467e8c311a | add code. | nag4/image_to_dir | image_to_yymmdd_dir_by_EXIF.py | image_to_yymmdd_dir_by_EXIF.py | # -*- coding: utf-8 -*-
from PIL import Image
import os
import shutil
user_name = os.getlogin()
# image/hoge.jpg, image/fuga.png, etc...
src_dir = "/Users/" + user_name + "/Desktop/image/"
# create dst_dir/yyyymmdd/
dst_dir = "/Users/" + user_name + "/Desktop/dst_dir/"
if os.path.exists(dst_dir) == False:
os.mkdir(dst_dir)
for root, dirs, files in os.walk(src_dir):
for filename in files:
try:
image_info = Image.open(src_dir + filename)
# 36867 : EXIF DateTimeOriginal
date = image_info._getexif()[36867]
yyyy, mm, dd = date[:4], date[5:7], date[8:10]
yyyymmdd_dir = os.path.join(dst_dir, yyyy + mm + dd)
if os.path.exists(yyyymmdd_dir) == False:
os.mkdir(yyyymmdd_dir)
dst = os.path.join(yyyymmdd_dir, filename)
if os.path.exists(dst) == False:
shutil.copy2(src_dir + filename, dst)
except Exception as e:
# .DS_Store must Die
print filename + ' is fail.'
| mit | Python |
|
920dbe007501ea99b95c41f94fb8f4a48c40717a | Add SensorsCollector, which collects data from libsensors via PySensors | szibis/Diamond,anandbhoraskar/Diamond,datafiniti/Diamond,Nihn/Diamond-1,jriguera/Diamond,bmhatfield/Diamond,eMerzh/Diamond-1,gg7/diamond,Slach/Diamond,joel-airspring/Diamond,disqus/Diamond,signalfx/Diamond,TinLe/Diamond,sebbrandt87/Diamond,Ensighten/Diamond,rtoma/Diamond,mfriedenhagen/Diamond,disqus/Diamond,EzyInsights/Diamond,jumping/Diamond,jriguera/Diamond,actmd/Diamond,Slach/Diamond,rtoma/Diamond,stuartbfox/Diamond,MediaMath/Diamond,ramjothikumar/Diamond,EzyInsights/Diamond,tellapart/Diamond,python-diamond/Diamond,eMerzh/Diamond-1,mzupan/Diamond,MichaelDoyle/Diamond,TAKEALOT/Diamond,saucelabs/Diamond,Nihn/Diamond-1,Nihn/Diamond-1,codepython/Diamond,timchenxiaoyu/Diamond,python-diamond/Diamond,szibis/Diamond,socialwareinc/Diamond,actmd/Diamond,thardie/Diamond,tusharmakkar08/Diamond,jriguera/Diamond,Nihn/Diamond-1,krbaker/Diamond,Precis/Diamond,Basis/Diamond,MediaMath/Diamond,janisz/Diamond-1,MediaMath/Diamond,ceph/Diamond,Clever/Diamond,acquia/Diamond,jumping/Diamond,tellapart/Diamond,codepython/Diamond,cannium/Diamond,thardie/Diamond,thardie/Diamond,mzupan/Diamond,eMerzh/Diamond-1,jaingaurav/Diamond,socialwareinc/Diamond,actmd/Diamond,tuenti/Diamond,signalfx/Diamond,datafiniti/Diamond,h00dy/Diamond,Ormod/Diamond,dcsquared13/Diamond,CYBERBUGJR/Diamond,python-diamond/Diamond,sebbrandt87/Diamond,acquia/Diamond,stuartbfox/Diamond,thardie/Diamond,Ssawa/Diamond,tusharmakkar08/Diamond,dcsquared13/Diamond,codepython/Diamond,h00dy/Diamond,Ssawa/Diamond,Ormod/Diamond,Ensighten/Diamond,zoidbergwill/Diamond,ramjothikumar/Diamond,Precis/Diamond,mfriedenhagen/Diamond,metamx/Diamond,dcsquared13/Diamond,Netuitive/Diamond,saucelabs/Diamond,CYBERBUGJR/Diamond,works-mobile/Diamond,hvnsweeting/Diamond,Netuitive/Diamond,Basis/Diamond,skbkontur/Diamond,russss/Diamond,tellapart/Diamond,works-mobile/Diamond,CYBERBUGJR/Diamond,works-mobile/Diamond,russss/Diamond,skbkontur/Diamond,stuartbfox/Diamond,TAKEALOT/Diamond,tuenti/Diamond,ramjothikumar/Diamond,EzyInsights/Diamond,MichaelDoyle/Diamond,krbaker/Diamond,ceph/Diamond,h00dy/Diamond,Netuitive/Diamond,TAKEALOT/Diamond,hamelg/Diamond,Netuitive/netuitive-diamond,joel-airspring/Diamond,tuenti/Diamond,mfriedenhagen/Diamond,eMerzh/Diamond-1,Slach/Diamond,zoidbergwill/Diamond,MichaelDoyle/Diamond,jumping/Diamond,jaingaurav/Diamond,hvnsweeting/Diamond,jriguera/Diamond,janisz/Diamond-1,MichaelDoyle/Diamond,TinLe/Diamond,jaingaurav/Diamond,tusharmakkar08/Diamond,TinLe/Diamond,ramjothikumar/Diamond,Basis/Diamond,saucelabs/Diamond,actmd/Diamond,metamx/Diamond,saucelabs/Diamond,mzupan/Diamond,Clever/Diamond,cannium/Diamond,Netuitive/netuitive-diamond,krbaker/Diamond,hamelg/Diamond,russss/Diamond,bmhatfield/Diamond,sebbrandt87/Diamond,ceph/Diamond,zoidbergwill/Diamond,bmhatfield/Diamond,szibis/Diamond,gg7/diamond,timchenxiaoyu/Diamond,metamx/Diamond,Netuitive/netuitive-diamond,anandbhoraskar/Diamond,works-mobile/Diamond,Netuitive/netuitive-diamond,hvnsweeting/Diamond,socialwareinc/Diamond,Ormod/Diamond,skbkontur/Diamond,jaingaurav/Diamond,MediaMath/Diamond,skbkontur/Diamond,rtoma/Diamond,tuenti/Diamond,timchenxiaoyu/Diamond,mzupan/Diamond,janisz/Diamond-1,gg7/diamond,anandbhoraskar/Diamond,rtoma/Diamond,datafiniti/Diamond,socialwareinc/Diamond,zoidbergwill/Diamond,hvnsweeting/Diamond,tusharmakkar08/Diamond,Precis/Diamond,signalfx/Diamond,Netuitive/Diamond,timchenxiaoyu/Diamond,janisz/Diamond-1,disqus/Diamond,EzyInsights/Diamond,h00dy/Diamond,ceph/Diamond,cannium/Diamond,Clever/Diamond,mfriedenhagen/Diamond,joel-airspring/Diamond,jumping/Diamond,CYBERBUGJR/Diamond,Clever/Diamond,TAKEALOT/Diamond,Ensighten/Diamond,Ormod/Diamond,Ssawa/Diamond,szibis/Diamond,acquia/Diamond,TinLe/Diamond,tellapart/Diamond,hamelg/Diamond,datafiniti/Diamond,dcsquared13/Diamond,codepython/Diamond,anandbhoraskar/Diamond,stuartbfox/Diamond,bmhatfield/Diamond,sebbrandt87/Diamond,hamelg/Diamond,krbaker/Diamond,gg7/diamond,signalfx/Diamond,Ssawa/Diamond,Ensighten/Diamond,russss/Diamond,joel-airspring/Diamond,acquia/Diamond,Basis/Diamond,Slach/Diamond,Precis/Diamond,cannium/Diamond | src/collectors/SensorsCollector/SensorsCollector.py | src/collectors/SensorsCollector/SensorsCollector.py | import diamond.collector
import sensors
class SensorsCollector(diamond.collector.Collector):
"""
This class collects data from libsensors. It should work against libsensors 2.x and 3.x, pending
support within the PySensors Ctypes binding: http://pypi.python.org/pypi/PySensors/
Requires: 'sensors' to be installed, configured, and the relevant kernel modules to be loaded.
Requires: PySensors requires Python 2.6+
If you're having issues, check your version of 'sensors'. This collector written against:
sensors version 3.1.2 with libsensors version 3.1.2
"""
def get_default_config(self):
"""
Returns default collector settings.
"""
return {
'enabled': 'True',
'path': 'sensors',
'fahrenheit': 'True'
}
def collect(self):
sensors.init()
try:
for chip in sensors.iter_detected_chips():
for feature in chip:
self.publish(".".join([str(chip), feature.label]), feature.get_value())
finally:
sensors.cleanup()
| mit | Python |
|
68e43eafc1bb8e060ee105bcc9e3c354486dfcd2 | add unit tests for dataset.py | DucAnhPhi/LinguisticAnalysis | dataset_tests.py | dataset_tests.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 28 16:18:02 2017
Unit tests for dataset.py
@author: duc
"""
import unittest
import dataset as ds
import numpy as np
from flesch_kincaid import get_flesch_grade_level as lvl
from nltk.corpus import cmudict
pronDict = cmudict.dict()
class DatasetTests(unittest.TestCase):
def test_normalize(self):
m = np.array([[1, 4, 0.5, 9], [0, 2, 0.2, 2], [0, 1, 0.01, 8], [1, 2.5, 0.3, 3]])
norm = np.array([[1, 1, 1, 1], [0, 0.5, 0.4, 0.222], [0, 0.25, 0.02, 0.888], [1, 0.625, 0.6, 0.333]])
decimal = 3
np.testing.assert_array_almost_equal(ds.normalize(m), norm, decimal)
def test_combined_keywords(self):
t1 = [[["fake", "media"]], [["fake", "news"]]]
t2 = [[["women", "hillary"]], [["media", "trump"]]]
keywords = set(["fake", "media", "news", "women", "hillary", "trump"])
self.assertEqual(ds.get_combined_keywords(t1, t2), keywords)
def test_keywords_count(self):
t = [["make", "america", "great", "again"],["america", "was", "great"]]
dict = {"make": 0, "america": 0, "great": 0, "again": 0, "was": 0}
counted = {"make": 1, "america": 2, "great": 2, "again": 1, "was": 1}
self.assertEqual(ds.get_keywords_count(t, dict), counted)
def test_extract_features(self):
t = ["Make america great again!", "America was great! Hillary Clinton"]
norm = [[["make", "america", "great", "again"]],[["america", "was", "great"], ["hillary", "clinton"]]]
count = {"make": 0, "america": 0, "great": 0, "again": 0, "was": 0, "hillary": 0, "clinton": 0}
features = [
[4, 1, lvl(norm[0], pronDict), 1, 1, 1, 1, 0, 0, 0],
[2.5, 1, lvl(norm[1], pronDict), 0, 1, 1, 0, 1, 1, 1]
]
print(features)
self.assertEqual(ds.extract_features(t, norm, count, pronDict), features)
def test_positive_negative_amount(self):
m = [[0, 1, 0.5, 1, 0.02], [1, 1, 1, 0.3, 0.99], [1, 0, 0, 0, 0]]
n = np.array(m)
self.assertEqual(ds.get_positive_negative_amount(m), (2, 1))
self.assertEqual(ds.get_positive_negative_amount(n), (2, 1))
def test_training_set(self):
# should have 50% positive and 50% negative examples
ts = ds.divide_data_into_sets(ds.get_prepared_tweet_data("realDonaldTrump", "HillaryClinton"), 0.1, 0.1, 0.8)[2]
count = ds.get_positive_negative_amount(ts)
self.assertEqual(count[0], count[1])
if __name__ == '__main__':
unittest.main() | mit | Python |
|
66f32607d9d140be2a8e71270862074c53121a68 | Create dataUIwgt.py | satishgoda/learningqt,satishgoda/learningqt | pyside/pyside_basics/jamming/dataUIwgt.py | pyside/pyside_basics/jamming/dataUIwgt.py | from PySide import QtGui
class Data(object):
def __init__(self):
self.requiredNames = "A B C D E".split(' ')
self.availableActions = "Set Select Delete".split(' ')
def Set(self, name):
print "setting ", name
def Select(self, name):
print "selecting ", name
def Delete(self, name):
print "deleting ", name
class ActionButton(QtGui.QPushButton):
delegateActionSignal = QtCore.Signal((str, str))
def __init__(self, itemName, actionName, parent=None):
super(ActionButton, self).__init__(parent)
self.itemName = itemName
self.actionName = actionName
self.clicked.connect(self._delegate)
self.setText(self.actionName)
def _delegate(self):
self.delegateActionSignal.emit(self.itemName, self.actionName)
# def delegated(itemName, actionName):
# print itemName, actionName
#
# self = ActionButton('A', 'Set')
# self.delegateActionSignal.connect(delegated)
# self.show()
class DataUIWidget(QtGui.QWidget):
def __init__(self, data, parent=None):
super(DataUIWidget, self).__init__(parent)
self.data = data
self._setupUI()
def handleAction(self, itemName, actionName):
print itemName, actionName
def _setupUI(self):
layout = QtGui.QGridLayout()
self.setLayout(layout)
for index, name in enumerate(self.data.requiredNames):
lbl = QtGui.QLabel(name)
layout.addWidget(lbl, index, 0)
for ind, actName in enumerate(self.data.availableActions, 1):
btn = ActionButton(name, actName)
btn.delegateActionSignal.connect(self.handleAction)
layout.addWidget(btn, index, ind)
data = Data()
self = DataUIWidget(data)
self.show()
| mit | Python |
|
ae948c95ea0087f33f13ef3463dc022eda0301a2 | Add a solution for the MadLibs lab | google/cssi-labs,google/cssi-labs | python/labs/make-a-short-story/mystory.py | python/labs/make-a-short-story/mystory.py | # Create a function for adjectives so I don't repeat myself in prompts.
def get_adjective():
return raw_input("Give me an adjective: ")
def get_noun():
return raw_input("Give me a noun: ")
def get_verb():
return raw_input("Give me a verb: ")
adjective1 = get_adjective()
noun1 = get_noun()
verb1 = get_verb()
adjective2 = get_adjective()
noun2 = get_noun()
verb2 = get_verb()
# Use parentheses so Python will "know" the string has multiple lines
print ("At CSSI we were all " + adjective1 + " when a " + noun1 +
" fell through the ceiling. See-Mong tried to " + verb1 + " it but it " +
"was too " + adjective2 + ". Instead, Zack gave it a " + noun2 + " which " +
"caused it to " + verb2 + ".")
| apache-2.0 | Python |
|
3cf1eb01540a126ef6a38219f89a41a0f05ad63f | Format fixing | Lowingbn/iccpy | constants.py | constants.py | UNITS = "SI"
UNIT_LENGTH = 1
UNIT_MASS = 1
UNIT_TIME = 1
DEFAULT_GRAVITATIONAL_CONSTANT = 6.673e-11 # m3 kg-1 s-2
DEFAULT_SPEED_OF_LIGHT = 299792458 # m s-1
DEFAULT_SOLAR_MASS = 1.98892e30 # kg
DEFAULT_PARSEC = 3.08568025e16 # m
DEFAULT_YEAR = 31556926 # s
DEFAULT_h = 0.73
G = GRAVITATIONAL_CONSTANT = DEFAULT_GRAVITATIONAL_CONSTANT
c = SPEED_OF_LIGHT = DEFAULT_SPEED_OF_LIGHT
def set_units(units):
global UNITS
global c, SPEED_OF_LIGHT, G, GRAVITATIONAL_CONSTANT
if units=="SI":
UNIT_LENGTH = 1
UNIT_MASS = 1
UNIT_TIME = 1
elif units=="GALACTIC":
UNIT_LENGTH = (1e6 * DEFAULT_PARSEC / DEFAULT_h) # 1.0 Mpc h^-1
UNIT_MASS = (1e10 * DEFAULT_SOLAR_MASS / DEFAULT_h) # 10^10 M_solar h^-1
UNIT_TIME = (1e3 * DEFAULT_PARSEC / DEFAULT_h) # 977.8 Gyr h^-1
elif units=="CGI":
UNIT_LENGTH = 0.01
UNIT_MASS = 0.001
UNIT_TIME = 1
UNITS = units
G = GRAVITATIONAL_CONSTANT = DEFAULT_GRAVITATIONAL_CONSTANT * UNIT_MASS * UNIT_TIME**2 / UNIT_LENGTH**3
c = SPEED_OF_LIGHT = DEFAULT_SPEED_OF_LIGHT * UNIT_TIME / UNIT_LENGTH;
set_units("SI") | mit | Python |
|
476f2493576c55c0f412165e3c3ce8225599ba0a | Copy caller_checker.py | zstars/weblabdeusto,zstars/weblabdeusto,weblabdeusto/weblabdeusto,porduna/weblabdeusto,porduna/weblabdeusto,zstars/weblabdeusto,zstars/weblabdeusto,morelab/weblabdeusto,weblabdeusto/weblabdeusto,morelab/weblabdeusto,morelab/weblabdeusto,porduna/weblabdeusto,morelab/weblabdeusto,porduna/weblabdeusto,zstars/weblabdeusto,weblabdeusto/weblabdeusto,morelab/weblabdeusto,porduna/weblabdeusto,morelab/weblabdeusto,porduna/weblabdeusto,weblabdeusto/weblabdeusto,morelab/weblabdeusto,porduna/weblabdeusto,weblabdeusto/weblabdeusto,zstars/weblabdeusto,weblabdeusto/weblabdeusto,porduna/weblabdeusto,zstars/weblabdeusto,porduna/weblabdeusto,porduna/weblabdeusto,weblabdeusto/weblabdeusto,weblabdeusto/weblabdeusto,zstars/weblabdeusto,weblabdeusto/weblabdeusto,weblabdeusto/weblabdeusto,zstars/weblabdeusto,morelab/weblabdeusto,morelab/weblabdeusto,zstars/weblabdeusto,morelab/weblabdeusto | server/src/voodoo/gen2/caller_checker.py | server/src/voodoo/gen2/caller_checker.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduรฑa <pablo@ordunya.com>
#
ALL = 'All servers'
def caller_check(servers = ALL):
def func_wrapper(func):
def wrapped_func(*args, **kargs):
# TODO
# try:
# servers[0]
# except TypeError:
# all_servers = (servers,)
# else:
# all_servers = servers
#TODO: work with all_servers
return func(*args,**kargs)
wrapped_func.__name__ = func.__name__
wrapped_func.__doc__ = func.__doc__
return wrapped_func
return func_wrapper
| bsd-2-clause | Python |
|
77b34390345208a6e0bc5ad30cdce62e42ca0c56 | Add simple command to list speakers and tickets | CTPUG/wafer,CTPUG/wafer,CarlFK/wafer,CTPUG/wafer,CTPUG/wafer,CarlFK/wafer,CarlFK/wafer,CarlFK/wafer | wafer/management/commands/pycon_speaker_tickets.py | wafer/management/commands/pycon_speaker_tickets.py | import sys
import csv
from optparse import make_option
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from wafer.talks.models import ACCEPTED
class Command(BaseCommand):
help = "List speakers and associated tickets."
option_list = BaseCommand.option_list + tuple([
make_option('--speakers', action="store_true", default=False,
help='List speakers and tickets (for accepted talks)'),
make_option('--allspeakers', action="store_true", default=False,
help='List speakers and tickets (for all talks)'),
])
def _speaker_tickets(self, options):
people = User.objects.filter(talks__isnull=False).distinct()
csv_file = csv.writer(sys.stdout)
for person in people:
# We query talks to filter out the speakers from ordinary
# accounts
if options['allspeakers']:
titles = [x.title for x in person.talks.all()]
else:
titles = [x.title for x in
person.talks.filter(status=ACCEPTED)]
if not titles:
continue
tickets = person.ticket.all()
if tickets:
ticket = '%d' % tickets[0].barcode
else:
ticket = 'NO TICKET PURCHASED'
row = [x.encode("utf-8") for x in (person.get_full_name(),
person.email,
ticket)]
csv_file.writerow(row)
def handle(self, *args, **options):
self._speaker_tickets(options)
| isc | Python |
|
c2e882855ea56c265ef46646ec5e20f78d0ad064 | add migrations for missing phaselogs after fixing bulk project status updates | onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle | bluebottle/projects/migrations/0028_auto_20170619_1555.py | bluebottle/projects/migrations/0028_auto_20170619_1555.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-06-19 13:55
from __future__ import unicode_literals
import datetime
from django.db import migrations
def fix_phaselog_for_incorrect_project_statuses(apps, schema_editor):
"""
#BB-9886 : Fix to add a new project phase status logs for projects whose status does not correspond to the last
project phase status log. We have to fake a timestamp as we dont know when the status was really updated.
"""
Project = apps.get_model('projects', 'Project')
ProjectPhaseLog = apps.get_model('projects', 'ProjectPhaseLog')
for project in Project.objects.all():
last_project_phase_log = ProjectPhaseLog.objects.filter(project=project).order_by('start').last()
if project.status != last_project_phase_log.status:
start = last_project_phase_log.start + datetime.timedelta(minutes = 1)
log = ProjectPhaseLog.objects.create(project=project, status=project.status, start=start)
log.save()
def dummy(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('projects', '0027_auto_20170602_2240'),
]
operations = [
migrations.RunPython(fix_phaselog_for_incorrect_project_statuses, dummy),
]
| bsd-3-clause | Python |
|
c628e5ed57effd4386c913b0cb47884e61c7db88 | Use camera height, and display disk | fhennecker/semiteleporter,fhennecker/semiteleporter,fhennecker/semiteleporter | research/triangulation_3/Triangulation.py | research/triangulation_3/Triangulation.py |
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
from math import sin, cos, tan, atan, pi
from pylab import imread
from mpl_toolkits.mplot3d import Axes3D
# In[2]:
image = imread("lines1.png")
plt.imshow(image)
plt.show()
#### Formules de position
# In[3]:
def position(laser, gamma, theta, phi):
"""
laser: position (x,y,z) du laser par rapport ร la camera
gamma: angle que fait le laser avec le plan ortogonal ร la camรฉra
theta: angle horizontal du rayon de la camera
phi : angle vertical du rayon de la camera
"""
# vecteur directeur du rayon sortant de la camera
ray = np.array([sin(theta), cos(theta), tan(phi)])
# Matrice tq (matrix) * (l, m, z) = (laser)
matrix = np.array([
[cos(gamma), 0, sin(theta)],
[sin(gamma), 0, cos(theta)],
[ 0, 1, tan(phi) ]
])
l, m, z = np.linalg.solve(matrix, -laser)
return z * ray
# In[4]:
CAMERA_HEIGHT = 39
PLATE_HEIGHT = 18.5
RELATIVE_HEIGHT = CAMERA_HEIGHT - PLATE_HEIGHT
CAM_DISTANCE = 53.2
def theta_phi(alpha, image_shape, position):
x, y = map(float, position)
w, h = map(float, image_shape)
ratio = w/h
beta = alpha / ratio
theta = (x - w/2)/w * alpha
phi = (h/2 - y)/h * beta
return theta, phi
#### Paramรจtres du sytรจme
# In[5]:
def deg2rad(x): return pi*float(x)/180
def rad2deg(x): return 180*float(x)/pi
GAMMA_D = deg2rad(83)
GAMMA_G = deg2rad(78)
ALPHA = deg2rad(60)
LASER_G = np.array([CAM_DISTANCE * tan(pi/2-GAMMA_G), 0, 0])
LASER_D = np.array([CAM_DISTANCE * tan(pi/2-GAMMA_D), 0, 0])
# In[6]:
tuple(position(LASER_G, GAMMA_G, 0, 0)) # Devrait รชtre (0, 53.2, 0)
#### Calcul des positions des points
# In[7]:
XYZ = []
IJ = []
H, W = image.shape[:2]
for i in range(H):
for j in range(W):
if tuple(image[i][j]) != (0, 0, 0):
IJ.append((j, i))
theta, phi = theta_phi(ALPHA/2, [W, H], [j, i])
gamma = GAMMA_G if theta < 0 else GAMMA_D
laser = LASER_G if theta < 0 else LASER_D
XYZ.append(position(laser, gamma, theta, phi))
X, Y, Z = map(np.array, zip(*XYZ))
I, J = map(np.array, zip(*IJ))
XYZ[0]
# In[8]:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X, Y, Z)
ax.plot([0, 0], [0, CAM_DISTANCE], [0, 0], color='red')
plt.xlim(-50, 50)
plt.ylim(0, 60)
plt.show()
# In[9]:
photo = imread("imgs/04.png")
h, w = photo.shape[:2]
plt.imshow(photo)
plt.scatter(I, J)
plt.plot([w/2, w/2], [0, h], 'y')
plt.show()
# In[10]:
get_ipython().magic(u'pinfo plt.grid')
# In[10]:
| mit | Python |
|
9cb5658c53a2202931e314ced3ee66714301a087 | Create _im_rot_manual_detect.py | gabru-md/faces | resources/_py_in/_im_rot_manual_detect.py | resources/_py_in/_im_rot_manual_detect.py | # PYTHON
# MANISH DEVGAN
# https://github.com/gabru-md
# Program helps in detecting faces which are
# tilted right or left! The detection is done by
# rotating the image and the trying to detect the
# potential faces in it!
#BEGIN
# importing
import cv2
import numpy as np
import os
import sys
# function to rotate the image to a specific angle begins
def rotate(img,angle):
image = np.copy(img)
(h, w) = image.shape[:2]
(cX, cY) = (w // 2, h // 2)
# grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
M = cv2.getRotationMatrix2D((cX, cY), angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# perform the actual rotation and return the image
_im = cv2.warpAffine(image, M, (nW, nH))
# a new vairable is taken instead of the old one as it will then form 2 different copies
# instead of forming a reference of the object or altering the object itself
# now show the rotated image!
return _im
# function ends
# reading image which is to be rotated
# this image will then be further looked in for faces at different angles
image = cv2.imread('te.jpg')
cascPath = "haarcascade_frontalface_default.xml"
os.chdir('C:\Users\Manish\Desktop')
# range is taken from 0 to 360
# therefore we have range(360+1)
for i in range(361):
# new object of image type or numpy.ndarray is created and named _im
# this will have our rotated image
_im = rotate(image,i)
# converting our _im to grayscale to detect potential faces in it!
_gray = cv2.cvtColor(_im,cv2.COLOR_BGR2GRAY)
# declaring a classifier based on the cascade specified
# in this case it is : 'haarcascade_frontalface_default.xml'
faces = faceCascade.detectMultiScale(
_gray,
scaleFactor = 1.2,
minNeighbors=1,
minSize=(15,15),
flags = cv2.cv.CV_HAAR_SCALE_IMAGE
)
# drawing a box around the potential faces that have been identified
for (x,y,w,h) in faces:
cv2.rectangle(_im,(x+int(w*0.18),y+int(h*0.15)),(x+int(w*0.80),y+int(h*0.90)),(0,255,0),2)
# showing the rotated image to the user!
cv2.imshow('Rotated Image',_im)
if cv2.waitKey(0) == 27:
break
#END
| bsd-3-clause | Python |
|
36ada2dc33ccb3cb1803f67a112e3559efd7e821 | Add file to initialize item endpoint - Add item_fields | Elbertbiggs360/buckelist-api | app/api/item.py | app/api/item.py | """ Routes for bucket_item Functionality"""
# from flask import g
# from flask import Blueprint, request, jsonify
from flask_restplus import fields
# from app.models.bucketlist import Bucketlist
# from app import api
item_fields = {
'id': fields.Integer,
'name': fields.String,
'date_created': fields.DateTime(attribute='created_at'),
'date_modified': fields.DateTime(attribute='modified_at'),
'done': fields.Boolean
}
| mit | Python |
|
bf53f738bb5408622b08eedb9b0b0c6f80487a0c | Create 0603_verbs_vehicles.py | boisvert42/npr-puzzle-python | 2019/0603_verbs_vehicles.py | 2019/0603_verbs_vehicles.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
NPR 2019-06-02
https://www.npr.org/2019/06/02/728600551/sunday-puzzle-lets-go-toe-to-toe?utm_medium=RSS&utm_campaign=sundaypuzzle
Think of a verb in its present and past tense forms.
Drop the first letter of each word.
The result will name two vehicles. What are they?
"""
import requests
import sys
sys.path.append('..')
import nprcommontools as nct
# URL with verb forms
URL = 'https://cdn.jsdelivr.net/gh/kulakowka/english-verbs-conjugation@master/src/services/ConjugationService/verbs.json'
r = requests.get(URL)
j = r.json()
VEHICLES = frozenset(nct.get_category_members('vehicle'))
#%%
for d in j:
verb = d[0]
past = d[1]
if past is not None:
v1 = verb[1:]
p1 = past[1:]
if v1 in VEHICLES and p1 in VEHICLES:
print(verb, past, v1, p1)
| cc0-1.0 | Python |
|
60f54674cc7bb619d5275dbd49e346ecee276ff2 | fix reload module | nanqinlang-shadowsocksr/shadowsocksr-python,nanqinlang-shadowsocksr/shadowsocksr-python | importloader.py | importloader.py | ๏ปฟ#!/usr/bin/python
# -*- coding: UTF-8 -*-
def load(name):
try:
obj = __import__(name)
reload(obj)
return obj
except:
pass
try:
import importlib
obj = importlib.__import__(name)
importlib.reload(obj)
return obj
except:
pass
def loads(namelist):
for name in namelist:
obj = load(name)
if obj is not None:
return obj
| apache-2.0 | Python |
|
152db7b696b949c67b5121d42fba28ec31eceb47 | Create everyeno_keys.py | vgan/everyeno | everyeno_keys.py | everyeno_keys.py | tumblr_consumer_key = ''
tumblr_consumer_secret = ''
tumblr_token_key = ''
tumblr_token_secret = ''
google_developerKey = ''
twitter_consumer_key = ''
twitter_consumer_secret = ''
twitter_token_key = ''
twitter_token_secret = ''
discogs_user_token = ''
| cc0-1.0 | Python |
|
faff5fc7665abfcbcf5ab497ca533d7d3d4e53ac | Split property system into it's own file. | dednal/chromium.src,jaruba/chromium.src,jaruba/chromium.src,dushu1203/chromium.src,junmin-zhu/chromium-rivertrail,dushu1203/chromium.src,anirudhSK/chromium,ChromiumWebApps/chromium,hgl888/chromium-crosswalk,Fireblend/chromium-crosswalk,patrickm/chromium.src,chuan9/chromium-crosswalk,junmin-zhu/chromium-rivertrail,junmin-zhu/chromium-rivertrail,ondra-novak/chromium.src,krieger-od/nwjs_chromium.src,nacl-webkit/chrome_deps,bright-sparks/chromium-spacewalk,dushu1203/chromium.src,dushu1203/chromium.src,patrickm/chromium.src,axinging/chromium-crosswalk,axinging/chromium-crosswalk,keishi/chromium,zcbenz/cefode-chromium,jaruba/chromium.src,rogerwang/chromium,mogoweb/chromium-crosswalk,Just-D/chromium-1,hgl888/chromium-crosswalk,littlstar/chromium.src,zcbenz/cefode-chromium,mohamed--abdel-maksoud/chromium.src,anirudhSK/chromium,timopulkkinen/BubbleFish,mohamed--abdel-maksoud/chromium.src,Jonekee/chromium.src,hujiajie/pa-chromium,Jonekee/chromium.src,mogoweb/chromium-crosswalk,Just-D/chromium-1,M4sse/chromium.src,junmin-zhu/chromium-rivertrail,Jonekee/chromium.src,hgl888/chromium-crosswalk,fujunwei/chromium-crosswalk,littlstar/chromium.src,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,mogoweb/chromium-crosswalk,hgl888/chromium-crosswalk-efl,ondra-novak/chromium.src,TheTypoMaster/chromium-crosswalk,M4sse/chromium.src,ChromiumWebApps/chromium,TheTypoMaster/chromium-crosswalk,axinging/chromium-crosswalk,littlstar/chromium.src,krieger-od/nwjs_chromium.src,patrickm/chromium.src,pozdnyakov/chromium-crosswalk,nacl-webkit/chrome_deps,littlstar/chromium.src,patrickm/chromium.src,hgl888/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,jaruba/chromium.src,Chilledheart/chromium,littlstar/chromium.src,nacl-webkit/chrome_deps,timopulkkinen/BubbleFish,hujiajie/pa-chromium,hujiajie/pa-chromium,hgl888/chromium-crosswalk-efl,rogerwang/chromium,mohamed--abdel-maksoud/chromium.src,crosswalk-project/chromium-crosswalk-efl,keishi/chromium,Jonekee/chromium.src,crosswalk-project/chromium-crosswalk-efl,anirudhSK/chromium,bright-sparks/chromium-spacewalk,chuan9/chromium-crosswalk,robclark/chromium,Pluto-tv/chromium-crosswalk,ChromiumWebApps/chromium,keishi/chromium,mohamed--abdel-maksoud/chromium.src,bright-sparks/chromium-spacewalk,axinging/chromium-crosswalk,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk,timopulkkinen/BubbleFish,hgl888/chromium-crosswalk,junmin-zhu/chromium-rivertrail,krieger-od/nwjs_chromium.src,PeterWangIntel/chromium-crosswalk,chuan9/chromium-crosswalk,anirudhSK/chromium,bright-sparks/chromium-spacewalk,robclark/chromium,dednal/chromium.src,keishi/chromium,robclark/chromium,mohamed--abdel-maksoud/chromium.src,rogerwang/chromium,pozdnyakov/chromium-crosswalk,Jonekee/chromium.src,Pluto-tv/chromium-crosswalk,robclark/chromium,TheTypoMaster/chromium-crosswalk,nacl-webkit/chrome_deps,junmin-zhu/chromium-rivertrail,nacl-webkit/chrome_deps,chuan9/chromium-crosswalk,markYoungH/chromium.src,pozdnyakov/chromium-crosswalk,Jonekee/chromium.src,anirudhSK/chromium,junmin-zhu/chromium-rivertrail,rogerwang/chromium,robclark/chromium,krieger-od/nwjs_chromium.src,crosswalk-project/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,dushu1203/chromium.src,robclark/chromium,anirudhSK/chromium,hgl888/chromium-crosswalk-efl,axinging/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,ltilve/chromium,Fireblend/chromium-crosswalk,ltilve/chromium,mohamed--abdel-maksoud/chromium.src,nacl-webkit/chrome_deps,dednal/chromium.src,krieger-od/nwjs_chromium.src,Just-D/chromium-1,pozdnyakov/chromium-crosswalk,markYoungH/chromium.src,hgl888/chromium-crosswalk-efl,keishi/chromium,ondra-novak/chromium.src,ChromiumWebApps/chromium,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk,dushu1203/chromium.src,axinging/chromium-crosswalk,chuan9/chromium-crosswalk,chuan9/chromium-crosswalk,keishi/chromium,junmin-zhu/chromium-rivertrail,hujiajie/pa-chromium,Fireblend/chromium-crosswalk,littlstar/chromium.src,hujiajie/pa-chromium,dednal/chromium.src,Fireblend/chromium-crosswalk,pozdnyakov/chromium-crosswalk,timopulkkinen/BubbleFish,zcbenz/cefode-chromium,Jonekee/chromium.src,ondra-novak/chromium.src,axinging/chromium-crosswalk,rogerwang/chromium,TheTypoMaster/chromium-crosswalk,robclark/chromium,hgl888/chromium-crosswalk-efl,ltilve/chromium,crosswalk-project/chromium-crosswalk-efl,junmin-zhu/chromium-rivertrail,nacl-webkit/chrome_deps,hujiajie/pa-chromium,chuan9/chromium-crosswalk,jaruba/chromium.src,chuan9/chromium-crosswalk,junmin-zhu/chromium-rivertrail,timopulkkinen/BubbleFish,patrickm/chromium.src,Chilledheart/chromium,markYoungH/chromium.src,zcbenz/cefode-chromium,krieger-od/nwjs_chromium.src,crosswalk-project/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk-efl,markYoungH/chromium.src,fujunwei/chromium-crosswalk,jaruba/chromium.src,Just-D/chromium-1,ltilve/chromium,mogoweb/chromium-crosswalk,rogerwang/chromium,robclark/chromium,krieger-od/nwjs_chromium.src,Pluto-tv/chromium-crosswalk,fujunwei/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,rogerwang/chromium,littlstar/chromium.src,dushu1203/chromium.src,timopulkkinen/BubbleFish,Chilledheart/chromium,rogerwang/chromium,dednal/chromium.src,dednal/chromium.src,dushu1203/chromium.src,zcbenz/cefode-chromium,jaruba/chromium.src,crosswalk-project/chromium-crosswalk-efl,Chilledheart/chromium,anirudhSK/chromium,Just-D/chromium-1,Jonekee/chromium.src,fujunwei/chromium-crosswalk,dushu1203/chromium.src,dednal/chromium.src,anirudhSK/chromium,robclark/chromium,Just-D/chromium-1,pozdnyakov/chromium-crosswalk,patrickm/chromium.src,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk,fujunwei/chromium-crosswalk,axinging/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,ondra-novak/chromium.src,markYoungH/chromium.src,ltilve/chromium,TheTypoMaster/chromium-crosswalk,nacl-webkit/chrome_deps,zcbenz/cefode-chromium,bright-sparks/chromium-spacewalk,Jonekee/chromium.src,bright-sparks/chromium-spacewalk,robclark/chromium,crosswalk-project/chromium-crosswalk-efl,hujiajie/pa-chromium,mogoweb/chromium-crosswalk,keishi/chromium,nacl-webkit/chrome_deps,ondra-novak/chromium.src,dushu1203/chromium.src,M4sse/chromium.src,Chilledheart/chromium,hujiajie/pa-chromium,timopulkkinen/BubbleFish,ondra-novak/chromium.src,M4sse/chromium.src,ltilve/chromium,timopulkkinen/BubbleFish,anirudhSK/chromium,mogoweb/chromium-crosswalk,ondra-novak/chromium.src,M4sse/chromium.src,dushu1203/chromium.src,bright-sparks/chromium-spacewalk,crosswalk-project/chromium-crosswalk-efl,mogoweb/chromium-crosswalk,ChromiumWebApps/chromium,timopulkkinen/BubbleFish,keishi/chromium,Jonekee/chromium.src,ltilve/chromium,littlstar/chromium.src,M4sse/chromium.src,markYoungH/chromium.src,Pluto-tv/chromium-crosswalk,M4sse/chromium.src,Pluto-tv/chromium-crosswalk,Fireblend/chromium-crosswalk,Just-D/chromium-1,Fireblend/chromium-crosswalk,hujiajie/pa-chromium,ChromiumWebApps/chromium,krieger-od/nwjs_chromium.src,jaruba/chromium.src,patrickm/chromium.src,M4sse/chromium.src,mohamed--abdel-maksoud/chromium.src,mohamed--abdel-maksoud/chromium.src,markYoungH/chromium.src,Chilledheart/chromium,dednal/chromium.src,dednal/chromium.src,markYoungH/chromium.src,PeterWangIntel/chromium-crosswalk,nacl-webkit/chrome_deps,ChromiumWebApps/chromium,PeterWangIntel/chromium-crosswalk,mogoweb/chromium-crosswalk,bright-sparks/chromium-spacewalk,ChromiumWebApps/chromium,jaruba/chromium.src,pozdnyakov/chromium-crosswalk,Pluto-tv/chromium-crosswalk,pozdnyakov/chromium-crosswalk,ChromiumWebApps/chromium,zcbenz/cefode-chromium,fujunwei/chromium-crosswalk,Pluto-tv/chromium-crosswalk,fujunwei/chromium-crosswalk,axinging/chromium-crosswalk,Chilledheart/chromium,zcbenz/cefode-chromium,patrickm/chromium.src,ChromiumWebApps/chromium,dednal/chromium.src,rogerwang/chromium,PeterWangIntel/chromium-crosswalk,ChromiumWebApps/chromium,ltilve/chromium,zcbenz/cefode-chromium,ChromiumWebApps/chromium,hgl888/chromium-crosswalk-efl,Just-D/chromium-1,fujunwei/chromium-crosswalk,markYoungH/chromium.src,pozdnyakov/chromium-crosswalk,fujunwei/chromium-crosswalk,pozdnyakov/chromium-crosswalk,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk-efl,jaruba/chromium.src,TheTypoMaster/chromium-crosswalk,anirudhSK/chromium,krieger-od/nwjs_chromium.src,timopulkkinen/BubbleFish,zcbenz/cefode-chromium,anirudhSK/chromium,M4sse/chromium.src,Chilledheart/chromium,Just-D/chromium-1,mohamed--abdel-maksoud/chromium.src,Jonekee/chromium.src,ltilve/chromium,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk,mogoweb/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,mohamed--abdel-maksoud/chromium.src,pozdnyakov/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,krieger-od/nwjs_chromium.src,markYoungH/chromium.src,hujiajie/pa-chromium,keishi/chromium,nacl-webkit/chrome_deps,ondra-novak/chromium.src,Fireblend/chromium-crosswalk,patrickm/chromium.src,keishi/chromium,dednal/chromium.src,axinging/chromium-crosswalk,Chilledheart/chromium,mogoweb/chromium-crosswalk,chuan9/chromium-crosswalk,timopulkkinen/BubbleFish,crosswalk-project/chromium-crosswalk-efl,markYoungH/chromium.src,keishi/chromium,M4sse/chromium.src,hujiajie/pa-chromium,junmin-zhu/chromium-rivertrail,zcbenz/cefode-chromium,TheTypoMaster/chromium-crosswalk,anirudhSK/chromium,rogerwang/chromium,M4sse/chromium.src,jaruba/chromium.src | ppapi/generators/idl_propertynode.py | ppapi/generators/idl_propertynode.py | #!/usr/bin/python
#
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Hierarchical property system for IDL AST """
import re
import sys
from idl_log import ErrOut, InfoOut, WarnOut
from idl_option import GetOption, Option, ParseOptions
#
# IDLPropertyNode
#
# A property node is a hierarchically aware system for mapping
# keys to values, such that a local dictionary is search first,
# followed by parent dictionaries in order.
#
class IDLPropertyNode(object):
def __init__(self):
self.parents = []
self.property_map = {}
def Error(self, msg):
name = self.GetProperty('NAME', 'Unknown')
parents = [parent.GetProperty('NAME', '???') for parent in self.parents]
ErrOut.Log('%s [%s] : %s' % (name, ' '.join(parents), msg))
def AddParent(self, parent):
assert parent
self.parents.append(parent)
def SetProperty(self, name, val):
self.property_map[name] = val
def _GetProperty_(self, name):
# Check locally for the property, and return it if found.
prop = self.property_map.get(name, None)
if prop is not None: return prop
# If not, seach parents in order
for parent in self.parents:
prop = parent.GetProperty(name)
if prop is not None: return prop
# Otherwise, it can not be found.
return None
def GetProperty(self, name, default=None):
prop = self._GetProperty_(name)
if prop is None:
return default
else:
return prop
def GetPropertyLocal(self, name, default=None):
# Search for the property, but only locally, returning the
# default if not found.
prop = self.property_map.get(name, default)
return prop
# Regular expression to parse property keys in a string such that a string
# "My string $NAME$" will find the key "NAME".
regex_var = re.compile('(?P<src>[^\\$]+)|(?P<key>\\$\\w+\\$)')
def GetPropertyList(self):
return self.property_map.keys()
# Recursively expands text keys in the form of $KEY$ with the value
# of the property of the same name. Since this is done recursively
# one property can be defined in terms of another.
def Replace(self, text):
itr = IDLPropertyNode.regex_var.finditer(text)
out = ''
for m in itr:
(start, stop) = m.span()
if m.lastgroup == 'src':
out += text[start:stop]
if m.lastgroup == 'key':
key = text[start+1:stop-1]
val = self.GetProperty(key, None)
if not val:
self.Error('No property "%s"' % key)
out += self.Replace(str(val))
return out
#
# Testing functions
#
# Build a property node, setting the properties including a name, and
# associate the children with this new node.
#
def BuildNode(name, props, children=[], parents=[]):
node = IDLPropertyNode()
node.SetProperty('NAME', name)
for prop in props:
toks = prop.split('=')
node.SetProperty(toks[0], toks[1])
for child in children:
child.AddParent(node)
for parent in parents:
node.AddParent(parent)
return node
def ExpectProp(node, name, val):
found = node.GetProperty(name)
if found != val:
ErrOut.Log('Got property %s expecting %s' % (found, val))
return 1
return 0
#
# Verify property inheritance
#
def PropertyTest():
errors = 0
left = BuildNode('Left', ['Left=Left'])
right = BuildNode('Right', ['Right=Right'])
top = BuildNode('Top', ['Left=Top', 'Right=Top'], [left, right])
errors += ExpectProp(top, 'Left', 'Top')
errors += ExpectProp(top, 'Right', 'Top')
errors += ExpectProp(left, 'Left', 'Left')
errors += ExpectProp(left, 'Right', 'Top')
errors += ExpectProp(right, 'Left', 'Top')
errors += ExpectProp(right, 'Right', 'Right')
if not errors: InfoOut.Log('Passed PropertyTest')
return errors
def ExpectText(node, text, val):
found = node.Replace(text)
if found != val:
ErrOut.Log('Got replacement %s expecting %s' % (found, val))
return 1
return 0
#
# Verify text replacement
#
def ReplaceTest():
errors = 0
left = BuildNode('Left', ['Left=Left'])
right = BuildNode('Right', ['Right=Right'])
top = BuildNode('Top', ['Left=Top', 'Right=Top'], [left, right])
errors += ExpectText(top, '$Left$', 'Top')
errors += ExpectText(top, '$Right$', 'Top')
errors += ExpectText(left, '$Left$', 'Left')
errors += ExpectText(left, '$Right$', 'Top')
errors += ExpectText(right, '$Left$', 'Top')
errors += ExpectText(right, '$Right$', 'Right')
if not errors: InfoOut.Log('Passed ReplaceTest')
return errors
def MultiParentTest():
errors = 0
parent1 = BuildNode('parent1', ['PARENT1=parent1', 'TOPMOST=$TOP$'])
parent2 = BuildNode('parent2', ['PARENT1=parent2', 'PARENT2=parent2'])
child = BuildNode('child', ['CHILD=child'], parents=[parent1, parent2])
BuildNode('top', ['TOP=top'], children=[parent1])
errors += ExpectText(child, '$CHILD$', 'child')
errors += ExpectText(child, '$PARENT1$', 'parent1')
errors += ExpectText(child, '$PARENT2$', 'parent2')
# Verify recursive resolution
errors += ExpectText(child, '$TOPMOST$', 'top')
if not errors: InfoOut.Log('Passed MultiParentTest')
return errors
def Main():
errors = 0
errors += PropertyTest()
errors += ReplaceTest()
errors += MultiParentTest()
if errors:
ErrOut.Log('IDLNode failed with %d errors.' % errors)
return -1
return 0
if __name__ == '__main__':
sys.exit(Main())
| bsd-3-clause | Python |
|
d86bdff73f2c90667c8cd07750cfc120ca8a5a7d | Add BERT example. | iree-org/iree-torch,iree-org/iree-torch | examples/bert.py | examples/bert.py | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import torch
import torch_mlir
import iree_torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
def prepare_sentence_tokens(hf_model: str, sentence: str):
tokenizer = AutoTokenizer.from_pretrained(hf_model)
return torch.tensor([tokenizer.encode(sentence)])
class OnlyLogitsHuggingFaceModel(torch.nn.Module):
"""Wrapper that returns only the logits from a HuggingFace model."""
def __init__(self, model_name: str):
super().__init__()
self.model = AutoModelForSequenceClassification.from_pretrained(
model_name, # The pretrained model name.
# The number of output labels--2 for binary classification.
num_labels=2,
# Whether the model returns attentions weights.
output_attentions=False,
# Whether the model returns all hidden-states.
output_hidden_states=False,
torchscript=True,
)
self.model.eval()
def forward(self, input):
# Return only the logits.
return self.model(input)[0]
def _suppress_warnings():
import warnings
warnings.simplefilter("ignore")
import os
os.environ["TOKENIZERS_PARALLELISM"] = "true"
def _get_argparse():
parser = argparse.ArgumentParser(
description="Run a HuggingFace BERT Model.")
parser.add_argument("--model-name",
default="philschmid/MiniLM-L6-H384-uncased-sst2",
help="The HuggingFace model name to use.")
parser.add_argument("--sentence",
default="The quick brown fox jumps over the lazy dog.",
help="sentence to run the model on.")
return parser
def main():
_suppress_warnings()
args = _get_argparse().parse_args()
print("Parsing sentence tokens.")
example_input = prepare_sentence_tokens(args.model_name, args.sentence)
print("Instantiating model.")
model = OnlyLogitsHuggingFaceModel(args.model_name)
# TODO: Wrap up all these steps into a convenient, well-tested API.
# TODO: Add ability to run on IREE CUDA backend.
print("Tracing model.")
traced = torch.jit.trace(model, example_input)
print("Compiling with Torch-MLIR")
linalg_on_tensors_mlir = torch_mlir.compile(traced, example_input,
output_type=torch_mlir.OutputType.LINALG_ON_TENSORS)
print("Compiling with IREE")
iree_vmfb = iree_torch.compile_to_vmfb(linalg_on_tensors_mlir)
print("Loading in IREE")
invoker = iree_torch.load_vmfb(iree_vmfb)
print("Running on IREE")
import time
start = time.time()
result = invoker.forward(example_input)
end = time.time()
print("RESULT:", result)
print(f"Model execution took {end - start} seconds.")
if __name__ == "__main__":
main()
| apache-2.0 | Python |
|
768b6fd5f4af994ca9af1470cfcc7fa7eb216a8f | Add a binding.gyp file. | karatheodory/ws,websockets/ws,guymguym/ws | binding.gyp | binding.gyp | {
'targets': [
{
'target_name': 'validation',
'cflags': [ '-O3' ],
'sources': [ 'src/validation.cc' ]
},
{
'target_name': 'bufferutil',
'cflags': [ '-O3' ],
'sources': [ 'src/bufferutil.cc' ]
}
]
}
| mit | Python |
|
3bbf06964452683d986db401556183f575d15a55 | Add script for inserting project into DB | muzhack/muzhack,muzhack/muzhack,praneybehl/muzhack,muzhack/musitechhub,praneybehl/muzhack,praneybehl/muzhack,muzhack/musitechhub,praneybehl/muzhack,muzhack/muzhack,muzhack/muzhack,muzhack/musitechhub,muzhack/musitechhub | insert-project.py | insert-project.py | #!/usr/bin/env python3
import pymongo
import subprocess
import re
from datetime import datetime
import argparse
from json import load as load_json
import sys
def _info(msg):
sys.stdout.write(msg + '\n')
sys.stdout.flush()
cl_parser = argparse.ArgumentParser(description='Insert a project into Meteor\'s local MongoDB')
cl_parser.add_argument('input', help='JSON input file')
cl_parser.add_argument('--site', default=None, help='Specify Meteor site (default: localhost)')
args = cl_parser.parse_args()
with open(args.input) as input_file:
json = load_json(input_file)
command = ['meteor', 'mongo', '-U']
if args.site:
command.append(args.site)
_info('Getting Mongo URL...')
mongo_url = subprocess.check_output(command).decode().strip()
mongo_url, db_name = mongo_url.rsplit('/', 1)
_info('Connecting to MongoDB: {} (DB: {})'.format(mongo_url, db_name))
client = pymongo.MongoClient(mongo_url)
db = client[db_name]
project = {
'created': datetime.utcnow(),
'owner': json['owner'],
'projectId': json['id'],
'tags': json['tags'],
'text': json['description'],
'title': json['title'],
'instructions': json['instructions'],
'pictures': json['pictures'],
'files': json['files'],
'license': json['license'],
}
db.projects.update({'owner': project['owner'], 'projectId': project['projectId']}, project,
upsert=True)
_info('Successfully inserted project \'{}/{}\' ({})'.format(
project['owner'],
project['projectId'],
project['title'],
))
| mit | Python |
|
28e483c32d3e946f0f9159fe7459531f284d50aa | Add shared counter support to cache. | MapofLife/MOL,MapofLife/MOL,MapofLife/MOL,MapofLife/MOL,MapofLife/MOL,MapofLife/MOL,MapofLife/MOL,MapofLife/MOL | app/molcounter.py | app/molcounter.py | from google.appengine.api import memcache
from google.appengine.ext import db
import random
import collections
import logging
class GeneralCounterShardConfig(db.Model):
"""Tracks the number of shards for each named counter."""
name = db.StringProperty(required=True)
num_shards = db.IntegerProperty(required=True, default=20)
class GeneralCounterShard(db.Model):
"""Shards for each named counter"""
name = db.StringProperty(required=True)
count = db.IntegerProperty(required=True, default=0)
def get_top_names(top_count, all_results):
logging.info('%s from request' % top_count)
d = collections.defaultdict(list)
for counter in GeneralCounterShard.all():
d[counter.name.split('-')[-1]].append(counter.count)
results = {}
for name, counts in d.iteritems():
results[name] = reduce(lambda x,y: x+y, counts)
top = {}
x = collections.defaultdict(list)
for name, count in results.iteritems():
x[count].append(name)
keys = x.keys()
keys.sort()
keys.reverse()
tc = top_count
for k in keys:
if top_count > 0:
logging.info(top_count)
top[reduce(lambda x,y: '%s,%s' % (x,y), x[k])] = k
top_count -= 1
else:
break
logging.info(top)
if all_results:
return {'top-%s' % tc: top, 'results': results}
else:
return {'top-%s' % tc: top}
def get_count(name):
"""Retrieve the value for a given sharded counter.
Parameters:
name - The name of the counter
"""
total = memcache.get(name)
if total is None:
total = 0
for counter in GeneralCounterShard.all().filter('name = ', name):
total += counter.count
memcache.add(name, total, 60)
return total
def increment(name):
"""Increment the value for a given sharded counter.
Parameters:
name - The name of the counter
"""
config = GeneralCounterShardConfig.get_or_insert(name, name=name)
def txn():
index = random.randint(0, config.num_shards - 1)
shard_name = name + str(index)
counter = GeneralCounterShard.get_by_key_name(shard_name)
if counter is None:
counter = GeneralCounterShard(key_name=shard_name, name=name)
counter.count += 1
counter.put()
db.run_in_transaction(txn)
# does nothing if the key does not exist
memcache.incr(name)
def increase_shards(name, num):
"""Increase the number of shards for a given sharded counter.
Will never decrease the number of shards.
Parameters:
name - The name of the counter
num - How many shards to use
"""
config = GeneralCounterShardConfig.get_or_insert(name, name=name)
def txn():
if config.num_shards < num:
config.num_shards = num
config.put()
db.run_in_transaction(txn)
| bsd-3-clause | Python |
|
2c6700d7a16ec7e76847f3664655aaf6c8f171eb | Create test_servo5v.py | somchaisomph/RPI.GPIO.TH | test/test_servo5v.py | test/test_servo5v.py | from gadgets.motors.servos import Servo5V
import time
import random
servo = Servo5V(pin_number=12,freq=100)
count = 0
while count < 185:
time.sleep(0.1)
servo.write(count)
count += 5
servo.cleanup()
| mit | Python |
|
7c9c95795dbbc5f64b532720f5749b58361c222b | add collector for http://www.dshield.org/ | spantons/attacks-pages-collector | collectors/dshield.py | collectors/dshield.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import socket
import requests
import ipwhois
from pprint import pprint
def get_url(url):
try:
res = requests.get(url)
except requests.exceptions.ConnectionError:
raise requests.exceptions.ConnectionError("DNS lookup failures")
else:
if res.status_code != 200:
raise requests.exceptions.ConnectionError(
"the {}, answer with {} error".format(url, res.status_code))
return res
def get_ip(name):
attempts = 5
ip = "undefined"
while attempts:
try:
data = socket.gethostbyname_ex(name)
ip = data[2][0]
break
except (socket.herror, socket.gaierror):
attempts -= 1
return ip
def get_who_is_and_country(ip):
try:
ip_obj = ipwhois.IPWhois(ip)
who_is = ip_obj.lookup(retry_count=5)
return str(who_is), who_is['asn_country_code']
except ipwhois.exceptions.IPDefinedError:
return "Private-Use Networks", "undefined"
except ipwhois.exceptions.WhoisLookupError:
return "undefined", "undefined"
def gather():
attack_type = 'undefined'
base_url = "http://www.dshield.org/feeds/suspiciousdomains_High.txt"
res = get_url(base_url)
for line in res.iter_lines():
if line[:1] == "#" or line in ("Site", ""):
continue
host = line
if host[-1] == "\t":
host = line[:-1]
ip_address = get_ip(host)
if ip_address == "undefined":
who_is, country = "undefined", "undefined"
else:
who_is, country = get_who_is_and_country(ip_address)
doc = {
'IP': ip_address,
'SourceInfo': base_url,
'Type': attack_type,
'Country': country,
'Domain': host,
'URL': host,
'WhoIsInfo': who_is,
}
pprint(doc)
if __name__ == '__main__':
gather()
| mit | Python |
|
8fe73523b7141f93d8523e56a7c6a5cc2ed82051 | Test case for ioddrivesnmp class | datafiniti/Diamond,stuartbfox/Diamond,joel-airspring/Diamond,disqus/Diamond,Netuitive/Diamond,metamx/Diamond,cannium/Diamond,hvnsweeting/Diamond,MichaelDoyle/Diamond,MediaMath/Diamond,Precis/Diamond,anandbhoraskar/Diamond,socialwareinc/Diamond,joel-airspring/Diamond,h00dy/Diamond,mfriedenhagen/Diamond,socialwareinc/Diamond,CYBERBUGJR/Diamond,tuenti/Diamond,Netuitive/netuitive-diamond,tellapart/Diamond,Ssawa/Diamond,metamx/Diamond,Ensighten/Diamond,codepython/Diamond,jaingaurav/Diamond,EzyInsights/Diamond,cannium/Diamond,thardie/Diamond,tusharmakkar08/Diamond,socialwareinc/Diamond,actmd/Diamond,Slach/Diamond,eMerzh/Diamond-1,signalfx/Diamond,mzupan/Diamond,codepython/Diamond,sebbrandt87/Diamond,anandbhoraskar/Diamond,janisz/Diamond-1,works-mobile/Diamond,skbkontur/Diamond,Nihn/Diamond-1,Ormod/Diamond,disqus/Diamond,mzupan/Diamond,works-mobile/Diamond,socialwareinc/Diamond,dcsquared13/Diamond,krbaker/Diamond,TAKEALOT/Diamond,hamelg/Diamond,codepython/Diamond,MediaMath/Diamond,Netuitive/netuitive-diamond,Basis/Diamond,acquia/Diamond,TAKEALOT/Diamond,Nihn/Diamond-1,saucelabs/Diamond,saucelabs/Diamond,TinLe/Diamond,timchenxiaoyu/Diamond,actmd/Diamond,Basis/Diamond,acquia/Diamond,Ssawa/Diamond,eMerzh/Diamond-1,jriguera/Diamond,dcsquared13/Diamond,timchenxiaoyu/Diamond,zoidbergwill/Diamond,TinLe/Diamond,thardie/Diamond,thardie/Diamond,tellapart/Diamond,jumping/Diamond,Ensighten/Diamond,works-mobile/Diamond,ceph/Diamond,szibis/Diamond,dcsquared13/Diamond,MediaMath/Diamond,datafiniti/Diamond,szibis/Diamond,hamelg/Diamond,Clever/Diamond,works-mobile/Diamond,eMerzh/Diamond-1,Slach/Diamond,metamx/Diamond,CYBERBUGJR/Diamond,Ssawa/Diamond,Ensighten/Diamond,cannium/Diamond,Basis/Diamond,russss/Diamond,python-diamond/Diamond,EzyInsights/Diamond,bmhatfield/Diamond,MediaMath/Diamond,Ormod/Diamond,skbkontur/Diamond,eMerzh/Diamond-1,russss/Diamond,jumping/Diamond,MichaelDoyle/Diamond,mzupan/Diamond,ceph/Diamond,ceph/Diamond,Netuitive/Diamond,tusharmakkar08/Diamond,Ensighten/Diamond,h00dy/Diamond,EzyInsights/Diamond,Netuitive/netuitive-diamond,janisz/Diamond-1,Nihn/Diamond-1,rtoma/Diamond,jriguera/Diamond,skbkontur/Diamond,TAKEALOT/Diamond,ceph/Diamond,Clever/Diamond,tusharmakkar08/Diamond,Ormod/Diamond,sebbrandt87/Diamond,timchenxiaoyu/Diamond,russss/Diamond,Slach/Diamond,python-diamond/Diamond,python-diamond/Diamond,Netuitive/Diamond,h00dy/Diamond,dcsquared13/Diamond,sebbrandt87/Diamond,tusharmakkar08/Diamond,saucelabs/Diamond,jriguera/Diamond,ramjothikumar/Diamond,szibis/Diamond,ramjothikumar/Diamond,skbkontur/Diamond,janisz/Diamond-1,tuenti/Diamond,Clever/Diamond,tuenti/Diamond,mfriedenhagen/Diamond,h00dy/Diamond,zoidbergwill/Diamond,jumping/Diamond,MichaelDoyle/Diamond,actmd/Diamond,stuartbfox/Diamond,TinLe/Diamond,Ormod/Diamond,gg7/diamond,rtoma/Diamond,tellapart/Diamond,jaingaurav/Diamond,hvnsweeting/Diamond,timchenxiaoyu/Diamond,cannium/Diamond,Clever/Diamond,datafiniti/Diamond,jaingaurav/Diamond,tuenti/Diamond,jriguera/Diamond,MichaelDoyle/Diamond,sebbrandt87/Diamond,krbaker/Diamond,acquia/Diamond,Slach/Diamond,zoidbergwill/Diamond,gg7/diamond,anandbhoraskar/Diamond,disqus/Diamond,bmhatfield/Diamond,ramjothikumar/Diamond,acquia/Diamond,CYBERBUGJR/Diamond,hamelg/Diamond,gg7/diamond,Precis/Diamond,joel-airspring/Diamond,thardie/Diamond,datafiniti/Diamond,Precis/Diamond,Basis/Diamond,signalfx/Diamond,hvnsweeting/Diamond,signalfx/Diamond,rtoma/Diamond,krbaker/Diamond,Netuitive/Diamond,gg7/diamond,stuartbfox/Diamond,mfriedenhagen/Diamond,EzyInsights/Diamond,Nihn/Diamond-1,tellapart/Diamond,bmhatfield/Diamond,szibis/Diamond,signalfx/Diamond,zoidbergwill/Diamond,mzupan/Diamond,bmhatfield/Diamond,janisz/Diamond-1,krbaker/Diamond,codepython/Diamond,TAKEALOT/Diamond,jumping/Diamond,CYBERBUGJR/Diamond,ramjothikumar/Diamond,hvnsweeting/Diamond,hamelg/Diamond,saucelabs/Diamond,Netuitive/netuitive-diamond,stuartbfox/Diamond,TinLe/Diamond,rtoma/Diamond,jaingaurav/Diamond,joel-airspring/Diamond,anandbhoraskar/Diamond,Precis/Diamond,russss/Diamond,Ssawa/Diamond,mfriedenhagen/Diamond,actmd/Diamond | src/collectors/iodrivesnmp/test/testiodrivesnmp.py | src/collectors/iodrivesnmp/test/testiodrivesnmp.py | #!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from iodrivesnmp import IODriveSNMPCollector
class TestIODriveSNMPCollector(CollectorTestCase):
def setUp(self, allowed_names=None):
if not allowed_names:
allowed_names = []
config = get_collector_config('IODriveSNMPCollector', {
'allowed_names': allowed_names,
'interval': 1
})
self.collector = IODriveSNMPCollector(config, None)
def test_import(self):
self.assertTrue(IODriveSNMPCollector)
| mit | Python |
|
2460bd91632da0e6b02e0faf379fe27b273575bc | Add rotate.py | bm5w/practice | rotate.py | rotate.py | """Funtion to rotate image 90 degress."""
def rotate(matrix):
pass | mit | Python |
|
7a068872a071af2e60bf24ca7a00b3f1e999f139 | add request builder | nittyan/QiitaAPI | builders.py | builders.py | # -*- coding: utf-8 -*-
import json
class PostBuilder(object):
def __init__(self):
self.parameters = {
'title': '',
'body': '',
'coediting': False,
'gist': False,
'private': False,
'tags': [],
'tweet': False
}
def body(self, content):
"""
Args:
content: str
"""
self.parameters['body'] = content
return self
def coediting(self, flag):
"""
Args:
flag: bool
"""
self.parameters['coediting'] = flag
return self
def gist(self, flag):
"""
Args:
flag: bool
"""
self.parameters['gist'] = flag
return self
def private(self, flag):
"""
Args:
flag: bool
"""
self.parameters['private'] = flag
return self
def tags(self, t):
"""
Args:
t: list[dict]
example : {"name": "tag_name", "versions": ["1.0"]}
"""
self.parameters['tags'] = t
return self
def title(self, t):
self.parameters['title'] = t
return self
def tweet(self, flag):
self.parameters['tweet'] = flag
return self
def __str__(self):
return json.dumps(self.parameters)
def encode(self):
"""
Returns:
condoded request json string
"""
return str(self).encode('utf-8')
| mit | Python |
|
857a5cb7effa03e9cd700fa69ae4d3b231212754 | Create business.py | getlucas/flaming-lucas,getlucas/flaming-lucas,getlucas/flaming-lucas,getlucas/flaming-lucas | business.py | business.py | # business logic here
# - account managing
# - create
# - edit
# - delete
# - payment data -> tokens
# - scripts running
# - statistics
| mit | Python |
|
4a45256b614ebf8a8455562b63c1d50ec1521c71 | add a test class for auth.py | longaccess/bigstash-python,longaccess/bigstash-python | BigStash/t/test_auth.py | BigStash/t/test_auth.py | from mock import Mock
from testtools.matchers import Contains
from testtools import TestCase
class AuthTest(TestCase):
def setUp(self):
super(AuthTest, self).setUp()
def tearDown(self):
super(AuthTest, self).tearDown()
def _makeit(self, *args, **kwargs):
from BigStash.auth import Auth
return Auth(*args, **kwargs)
def test_auth_class(self):
assert self._makeit(self.getUniqueString(),
self.getUniqueString(),
self.getUniqueString())
def test_do_login(self, stdout):
requests = Mock()
requests.post.return_value = self.getUniqueString()
api_key = self.getUniqueString()
api_secret = self.getUniqueString()
url = self.getUniqueString()
auth = self._makeit(api_key, api_secret, url)
self.assertThat(auth.GetAPIKey(),
Contains('authentication succesfull'))
| apache-2.0 | Python |
|
c212d1c25095f3b6e2f88cfccdc5c49280b22be0 | Add test for tilequeue changes related to #1387. | mapzen/vector-datasource,mapzen/vector-datasource,mapzen/vector-datasource | integration-test/1387-business-and-spur-routes.py | integration-test/1387-business-and-spur-routes.py | from . import FixtureTest
class BusinessAndSpurRoutes(FixtureTest):
def test_first_capitol_dr_i70_business(self):
self.load_fixtures([
'https://www.openstreetmap.org/relation/1933234',
])
# check that First Capitol Dr, part of the above relation, is given
# a network that includes the "business" extension.
self.assert_has_feature(
16, 16294, 25097, 'roads',
{'id': 12276055, 'shield_text': '70', 'network': 'US:I:Business'})
| mit | Python |
|
672210c3af1a1b56a145b5265e5f316a1f6f36df | Add test folder | nitsas/py3utils | py3utils/test/__init__.py | py3utils/test/__init__.py | mit | Python |
||
7ec15caf8f2c9d0a21581261a356f6decc548061 | Add some basic UI tests | spacewiki/spacewiki,tdfischer/spacewiki,spacewiki/spacewiki,spacewiki/spacewiki,tdfischer/spacewiki,tdfischer/spacewiki,tdfischer/spacewiki,spacewiki/spacewiki | test/ui_test.py | test/ui_test.py | from app import app
import unittest
class UiTestCase(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
def test_index(self):
self.assertEqual(self.app.get('/').status_code, 200)
def test_no_page(self):
self.assertEqual(self.app.get('/missing-page').status_code, 200)
def test_all_pages(self):
self.assertEqual(self.app.get('/.all-pages').status_code, 200)
def test_edit(self):
self.assertEqual(self.app.get('/.edit/Index').status_code, 200)
self.assertEqual(self.app.get('/.edit/').status_code, 404)
| agpl-3.0 | Python |
|
59cc25693f2185ddfe36370d7f6641b2795d4798 | Test File Upload | boris-p/ladybug,boris-p/ladybug | ladybug/test.py | ladybug/test.py | import epw
from comfort.pmv import PMV
| agpl-3.0 | Python |
|
d1b2d330d2a43814d89c7f17a347e425c434957d | Add Eoin's resampling function. | willu47/pyrate,UCL-ShippingGroup/pyrate,UCL-ShippingGroup/pyrate,willu47/pyrate | pyrate/tools/resampler.py | pyrate/tools/resampler.py | import pandas as pd
import numpy
# Does the resampling
# Called internally, one of the wrapper functions should be called if its needed
######################
def convert_messages_to_hourly_bins(df,period='H',fillnans=False,run_resample=True):
if df.empty:
return df
if run_resample:
speed_ts=df.sog.resample(period,how='mean')
draught_ts=df.draught.resample(period,how=numpy.max)
df_new=pd.DataFrame({'sog':speed_ts,'draught':draught_ts})
for col in df.columns:
if col != 'sog' and col!='draught':
df_new[col]=df[col].resample(period,how='first')
else:
df_new=[]
#set the time equal to the index
df_new['time']=df_new.index.values
# fill forward
if fillnans:
#forward fill first
df_new=df_new.fillna(method='pad')
#now backward fill for remain
df_new=df_new.fillna(method='bfill')
else:
#remove all entries where there are nans in speed
df_new=df_new.ix[pd.isnull(df_new.sog)==False]
return df_new
| mit | Python |
|
3b064d6933ef7e910fab5634420358562866f1bc | Add test | JokerQyou/pitools | tests/test_camera.py | tests/test_camera.py | # coding: utf-8
from __future__ import unicode_literals
import unittest
import tempfile
import shutil
from flask import Flask
from pitools import camera
app = Flask(__name__)
app.register_blueprint(camera.blueprint)
class CameraTestCase(unittest.TestCase):
def setUp(self):
self.workspace = tempfile.mkdtemp()
self.app = app.test_client()
def tearDown(self):
shutil.rmtree(self.workspace)
def test_post_shot_api(self):
'''
Should fail with 405 method not allowed
'''
rv = self.app.post('/camera/shot')
assert 405 == rv.status_code
def test_get_shot_api(self):
'''
Should return a image with image/* MIME
'''
rv = self.app.get('/camera/shot')
assert rv.content_type.startswith('image/')
print dir(rv)
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | Python |
|
7ddfb39256229aa8c985ed8d70a29479187c76ad | Create script for beta invites | HelloLily/hellolily,HelloLily/hellolily,HelloLily/hellolily,HelloLily/hellolily | lily/management/commands/generate_beta_invites.py | lily/management/commands/generate_beta_invites.py | import csv
import gc
import logging
from datetime import date
from hashlib import sha256
from django.conf import settings
from django.core.files.storage import default_storage
from django.core.management import call_command
from django.core.management.base import BaseCommand
from django.core.urlresolvers import reverse_lazy
from lily.tenant.models import Tenant
logger = logging.getLogger(__name__)
class Command(BaseCommand):
def handle(self, **kwargs):
current_site = 'app.hellolily.com'
with default_storage.open('beta_signups_with_invites.csv', 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',')
spamwriter.writerow(['company', 'email', 'first_name', 'last_name', 'invite', 'country'])
for row in self.read_csvfile('beta_signups.csv'):
company = row['company']
first_name = row['first_name']
last_name = row['last_name']
email = row['email']
country = row['country']
date_string = date.today().strftime('%d%m%Y')
tenant = Tenant.objects.create(name=company, country=country)
call_command('create_tenant', tenant=tenant.id)
invite_hash = sha256('%s-%s-%s-%s' % (
tenant.id,
email,
date_string,
settings.SECRET_KEY
)).hexdigest()
invite_link = '%s://%s%s' % ('https', current_site, reverse_lazy('invitation_accept', kwargs={
'tenant_id': tenant.id,
'first_name': first_name,
'email': email,
'date': date_string,
'hash': invite_hash,
}))
spamwriter.writerow([company, email, first_name, last_name, invite_link, country])
gc.collect()
def read_csvfile(self, file_name):
"""
Read from path assuming it's a file with ';' separated values.
"""
# Newlines are breaking correct csv parsing. Write correct temporary file to parse.
csv_file = default_storage.open(file_name, 'rU')
reader = csv.DictReader(csv_file, delimiter=';', quoting=csv.QUOTE_ALL)
for row in reader:
yield row
| agpl-3.0 | Python |
|
5bc089a98bf578fd0c56e3e50cf76888ee74aba2 | Add py solution for 537. Complex Number Multiplication | ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode | py/complex-number-multiplication.py | py/complex-number-multiplication.py | import re
class Solution(object):
def complexNumberMultiply(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
"""
pat = re.compile(r'(-?\d+)\+(-?\d+)i')
mata = pat.match(a)
matb = pat.match(b)
a = int(mata.group(1)), int(mata.group(2))
b = int(matb.group(1)), int(matb.group(2))
ans = a[0] * b[0] - a[1] * b[1], a[1] * b[0] + a[0] * b[1]
return '%d+%di' % ans
| apache-2.0 | Python |
|
400ad736a271946569efa438e8fc9d00a7ce0075 | test for #22 | fopina/tgbotplug | tests/test_issues.py | tests/test_issues.py | from tgbot import plugintest
from tgbot.botapi import Update
from test_plugin import TestPlugin
class TestPluginTest(plugintest.PluginTestCase):
def setUp(self):
self.plugin = TestPlugin()
self.bot = self.fake_bot(
'',
plugins=[self.plugin],
)
self.received_id = 1
def test_user_update(self):
"""Test for issue #22"""
sender = {
'id': 1,
'first_name': 'John',
'last_name': 'Doe',
}
self.receive_message('test', sender=sender)
self.assertEqual(self.bot.models.User.get(self.bot.models.User.id == 1).first_name, 'John')
sender['first_name'] = 'Paul'
self.receive_message('test', sender=sender)
self.assertEqual(self.bot.models.User.get(self.bot.models.User.id == 1).first_name, 'Paul')
def receive_message(self, text, sender=None, chat=None, reply_to_message_id=None):
if sender is None:
sender = {
'id': 1,
'first_name': 'John',
'last_name': 'Doe',
}
if chat is None:
chat = {'type': 'private'}
chat.update(sender)
reply_to_message = None
if reply_to_message_id is not None:
reply_to_message = {
'message_id': reply_to_message_id,
'chat': chat,
}
self.bot.process_update(
Update.from_dict({
'update_id': self.received_id,
'message': {
'message_id': self.received_id,
'text': text,
'chat': chat,
'from': sender,
'reply_to_message': reply_to_message,
}
})
)
self.received_id += 1
| mit | Python |
|
06e82c471afa83bf0f08f0779b32dd8a09b8d1ba | Add py solution for 350. Intersection of Two Arrays II | ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode | py/intersection-of-two-arrays-ii.py | py/intersection-of-two-arrays-ii.py | from collections import Counter
class Solution(object):
def intersect(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
c1, c2 = Counter(nums1), Counter(nums2)
return list((c1 & c2).elements())
| apache-2.0 | Python |
|
742e827178ee28663699acbb4a5f0ad5440649fc | add new keyboard_locks module | valdur55/py3status,ultrabug/py3status,guiniol/py3status,ultrabug/py3status,ultrabug/py3status,alexoneill/py3status,vvoland/py3status,valdur55/py3status,Andrwe/py3status,tobes/py3status,tobes/py3status,valdur55/py3status,docwalter/py3status,Andrwe/py3status,guiniol/py3status | py3status/modules/keyboard_locks.py | py3status/modules/keyboard_locks.py | # -*- coding: utf-8 -*-
"""
Monitor CapsLock, NumLock, and ScrLock keys
NumLock: Allows the user to type numbers by pressing the keys on the number pad,
rather than having them act as up, down, left, right, page up, end, and so forth.
CapsLock: When enabled, letters the user types will be in uppercase by default
rather than lowercase.
ScrLock: In some applications, such as spreadsheets, the lock mode is used to
change the behavior of the cursor keys to scroll the document instead of the cursor.
Configuration parameters:
cache_timeout: refresh interval for this module (default 1)
icon_capslock_off: show when Caps Lock is off (default 'CAP')
icon_capslock_on: show when Caps Lock is on (default 'CAP')
icon_numlock_off: show when Num Lock is off (default 'NUM')
icon_numlock_on: show when Num Lock is off (default 'NUM')
icon_scrlock_off: show when Scroll Lock is off (default 'SCR')
icon_scrlock_on: show when Scroll Lock is on (default 'SCR')
Color options:
color_good: Lock on
color_bad: Lock off
@author lasers
"""
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 1
icon_capslock_off = "CAP"
icon_capslock_on = "CAP"
icon_numlock_off = "NUM"
icon_numlock_on = "NUM"
icon_scrlock_off = "SCR"
icon_scrlock_on = "SCR"
def keyboard_lock(self):
out = self.py3.command_output('xset -q')
capslock_color = self.py3.COLOR_BAD
capslock_icon = self.icon_capslock_off
numlock_color = self.py3.COLOR_BAD
numlock_icon = self.icon_numlock_off
scrlock_color = self.py3.COLOR_BAD
scrlock_icon = self.icon_scrlock_off
if 'on' in out.split("Caps Lock:")[1][0:6]:
capslock_color = self.py3.COLOR_GOOD
capslock_icon = self.icon_capslock_on
if 'on' in out.split("Num Lock:")[1][0:6]:
numlock_color = self.py3.COLOR_GOOD
numlock_icon = self.icon_numlock_on
if 'on' in out.split("Scroll Lock:")[1][0:6]:
scrlock_color = self.py3.COLOR_GOOD
scrlock_icon = self.icon_scrlock_on
return {
'cached_until': self.py3.time_in(self.cache_timeout),
'composite': [
{
'color': capslock_color,
'full_text': capslock_icon,
},
{
'full_text': ' '
},
{
'color': numlock_color,
'full_text': numlock_icon,
},
{
'full_text': ' '
},
{
'color': scrlock_color,
'full_text': scrlock_icon,
},
]
}
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| bsd-3-clause | Python |
|
3ff7c739cfc688c757396c465799ab42638c4a80 | Add toopher-pair utility | toopher/toopher-pam,toopher/toopher-pam,toopher/toopher-pam | toopher-pair.py | toopher-pair.py | import StringIO
import getpass
import argparse
import signal
import time
import os
import sys
from wsgiref import validate
import configobj
import toopher
import validate
from common import *
TIMEOUT_PAIRING = 30
DEFAULT_USER_CONFIG_FILE = StringIO.StringIO("""\
# This is a user-specific Toopher configuration file. See toopher_config (5)
# for more information. This file provides a way for users to customize the
# behavior of their authentication flows which are configured to use the Toopher
# PAM module.
# Pairings: This section lists known pairing identifiers issued by the Toopher API
# server for hosts to use for pairings identification when making an authentication
# request. The values here may be populated using the 'toopher-pair' utility.
[Pairings]
""")
def sigint_handler(sig, frame):
sys.exit("Cancelled by user (Ctrl-C)")
signal.signal(signal.SIGINT, sigint_handler)
def main():
argparser = argparse.ArgumentParser(description="Pair an account with the Toopher app")
argparser.add_argument('--user', '-u', default='',
help="the user to pair (defaults to the current user, specifying a user is usually only useful for the superuser)")
argparser.add_argument('--shared', '-s', action="store_true",
help="create a shared pairing instead of a host-specific pairing")
argparser.add_argument('--phrase', '-p', metavar="PAIRING_PHRASE",
help="a pairing phrase generated by the Toopher app (will prompt if not supplied)")
args = argparser.parse_args()
# Retrieve API credentials from system configuration file
try:
system_config = get_system_config()
api = get_api_object(system_config)
except Exception:
sys.exit("Could not read the Toopher system config, please request that your administrator configure the system for Toopher.")
username = args.user
if not username:
username = getpass.getuser()
user_config_filename = get_user_config_filename(username)
try:
user_config = get_user_config(username)
except IOError: # Does not exist or cannot read
if os.path.exists(user_config_filename): # Exists but can't be read
sys.exit("Could not read user's Toopher config file ('%s')" % user_config_filename)
else: # Does not exist, make sure we can write it if we try
if not os.access(os.path.dirname(user_config_filename), os.W_OK):
sys.exit("Missing write permissions for the user's Toopher config file ('%s')" % user_config_filename)
user_config = configobj.ConfigObj(DEFAULT_USER_CONFIG_FILE)
user_config.filename = user_config_filename
except configobj.ConfigObjError, e: # Could not parse
sys.exit("Malformed configuration file ('%s'): %s" % (user_config_filename, e))
except validate.ValidateError, e: # Did not validate against spec
sys.exit("Problem validating user configuration file ('%s'):\n"
"%s\n"
"Please fix or remove user configuration file and try again."
% (user_config_filename, e))
else: # Exists, readable, parseable, and valid - make sure we can write it if we try
if not os.access(user_config_filename, os.W_OK):
sys.exit("Missing write permissions for the user's Toopher config file ('%s')" % user_config_filename)
phrase = args.phrase
while True:
if not phrase:
phrase = raw_input("Enter a pairing phrase (generated by the Toopher app): ")
if not phrase:
print 'Invalid pairing phrase, please try again.'
else:
break
full_username = get_full_username(username, args.shared)
try:
sys.stdout.write('Contacting server to perform pairing... ')
sys.stdout.flush()
pairing = api.pair(phrase, full_username)
print 'done.'
except Exception as error:
print 'error.'
sys.exit('This user could not be paired due to an error: %s' % error)
print 'Your Toopher app should now ask you to approve this pairing request. Please respond to continue.'
sys.stdout.write('Checking status..')
sys.stdout.flush()
start_time = time.time()
while time.time() - start_time < TIMEOUT:
sys.stdout.write(".")
sys.stdout.flush()
pairing_status = api.get_pairing_status(pairing.id)
if not pairing_status.pending:
if pairing_status.enabled:
print ' pairing approved.'
break
else:
print ' pairing denied.'
sys.exit('This pairing request was denied by the Toopher app user.')
time.sleep(1)
pairing_key = USER_CONFIG_PAIRINGS_KEY_SHARED_PAIRING_ID if args.shared else HOSTNAME
user_config[USER_CONFIG_PAIRINGS_SECTION][pairing_key] = pairing.id
user_config.write()
print 'Pairing successful - PAM services configured to use the Toopher module will now use this pairing to authenticate.'
if __name__ == '__main__':
try:
main()
except Exception, e:
sys.exit("An unexpected error was encountered. Please contact support@toopher.com for resolution: (Error: %s)"
% e) | epl-1.0 | Python |
|
9d7c348170fc0f9d339a2ef57a9e64b1ceaa7516 | Add demo MNH event scraper | andrewgleave/whim,andrewgleave/whim,andrewgleave/whim | web/whim/core/scrapers/mnh.py | web/whim/core/scrapers/mnh.py | from datetime import datetime, timezone, time
import requests
from bs4 import BeautifulSoup
from django.db import transaction
from .base import BaseScraper
from .exceptions import ScraperException
from whim.core.models import Event, Source, Category
from whim.core.utils import get_object_or_none
from whim.core.time import zero_time_with_timezone
class MNHScraper(BaseScraper):
def get_data(self):
url = "https://manxnationalheritage.im/whats-on/"
parsed = []
page = requests.get(url)
if page.status_code == 200:
soup = BeautifulSoup(page.content, 'html.parser')
events = soup.select(
"div.columns.no-padding-grid.push-top-m > div > a")
parsed = []
for e in events:
tmp = {
"link": e.get('href'),
"category": e.find("span", {"class": "badge"}).string
}
#get rest of data
article = e.find("div", {"class": "text"})
if article:
tmp["name"] = article.contents[0].string #h2
tmp["description"] = article.contents[3].contents[
0].string #p
#dates
try:
dates = article.contents[2].contents[0].string.replace(
" ", "").replace("โ", "-").split("-") #span
tmp["start_date"] = zero_time_with_timezone(
datetime.strptime(dates[0], "%d/%m/%Y"))
if len(dates) > 1:
tmp["end_date"] = zero_time_with_timezone(
datetime.strptime(dates[1], "%d/%m/%Y"))
except:
continue
parsed.append(tmp)
return parsed
else:
raise ScraperException("Unexpected status code")
@transaction.atomic
def run(self, source_id):
source = Source.objects.get(id=source_id)
for scraped_event in self.get_data():
event = get_object_or_none(
Event, source=source, name=scraped_event["name"])
if event is None:
category, _ = Category.objects.get_or_create_from_name(
scraped_event["category"])
Event.objects.create(
source=source,
category=category,
name=scraped_event["name"],
description=scraped_event["description"],
start_datetime=scraped_event["start_date"],
end_datetime=scraped_event.get("end_date"),
link=scraped_event["link"],
tags=[])
#mark this run
source.last_run_date = datetime.now(timezone.utc)
source.save()
| mit | Python |
|
632fea66f57f72d176fb8ad56f0cdaf5e4884110 | add test for multi-arch disasm | williballenthin/python-idb | tests/test_multiarch_disasm.py | tests/test_multiarch_disasm.py | import os.path
import idb
def test_armel_disasm():
cd = os.path.dirname(__file__)
idbpath = os.path.join(cd, 'data', 'armel', 'ls.idb')
with idb.from_file(idbpath) as db:
api = idb.IDAPython(db)
assert api.idc.GetDisasm(0x00002598) == 'push\t{r4, r5, r6, r7, r8, sb, sl, fp, lr}'
assert api.idc.GetDisasm(0x00012010) == 'b\t#0x12014'
def test_thumb_disasm():
cd = os.path.dirname(__file__)
idbpath = os.path.join(cd, 'data', 'thumb', 'ls.idb')
with idb.from_file(idbpath) as db:
api = idb.IDAPython(db)
assert api.idc.GetDisasm(0x00011eac) == 'strb\tr4, [r3, r5]'
assert api.idc.GetDisasm(0x00011eae) == 'b\t#0x11ebc'
def test_arm64_disasm():
cd = os.path.dirname(__file__)
idbpath = os.path.join(cd, 'data', 'arm64', 'ls.i64')
with idb.from_file(idbpath) as db:
api = idb.IDAPython(db)
assert api.idc.GetDisasm(0x00005d30) == 'cmp\tw5, #0x74'
assert api.idc.GetDisasm(0x00005d34) == 'csel\tw5, w5, w12, ne'
assert api.idc.GetDisasm(0x00005d38) == 'b\t#0x5c30'
def test_mips_disasm():
cd = os.path.dirname(__file__)
idbpath = os.path.join(cd, 'data', 'mips', 'ls.idb')
with idb.from_file(idbpath) as db:
api = idb.IDAPython(db)
assert api.idc.GetDisasm(0x00005440) == 'sb\t$t2, ($t1)'
assert api.idc.GetDisasm(0x00005444) == 'addiu\t$t3, $t3, 1'
assert api.idc.GetDisasm(0x00005448) == 'b\t0x523c'
def test_mipsel_disasm():
cd = os.path.dirname(__file__)
idbpath = os.path.join(cd, 'data', 'mipsel', 'ls.idb')
with idb.from_file(idbpath) as db:
api = idb.IDAPython(db)
assert api.idc.GetDisasm(0x0000543c) == 'sb\t$t2, ($t1)'
assert api.idc.GetDisasm(0x00005440) == 'addiu\t$t3, $t3, 1'
assert api.idc.GetDisasm(0x00005444) == 'b\t0x5238'
def test_mips64el_disasm():
cd = os.path.dirname(__file__)
idbpath = os.path.join(cd, 'data', 'mips64el', 'ls.i64')
with idb.from_file(idbpath) as db:
api = idb.IDAPython(db)
assert api.idc.GetDisasm(0x0000b8c8) == 'addiu\t$s0, $s0, -0x57'
assert api.idc.GetDisasm(0x0000b8cc) == 'daddiu\t$v1, $v1, 1'
assert api.idc.GetDisasm(0x0000b8d0) == 'b\t0xb760'
| apache-2.0 | Python |
|
423554349177a5c8ed987f249b13fac9c8b8d79a | Add links to upgrade actions in the change log | ratoaq2/Flexget,crawln45/Flexget,patsissons/Flexget,camon/Flexget,qvazzler/Flexget,tobinjt/Flexget,tobinjt/Flexget,LynxyssCZ/Flexget,thalamus/Flexget,ZefQ/Flexget,jacobmetrick/Flexget,dsemi/Flexget,malkavi/Flexget,antivirtel/Flexget,ZefQ/Flexget,sean797/Flexget,JorisDeRieck/Flexget,tobinjt/Flexget,Flexget/Flexget,patsissons/Flexget,grrr2/Flexget,grrr2/Flexget,qk4l/Flexget,asm0dey/Flexget,vfrc2/Flexget,drwyrm/Flexget,qvazzler/Flexget,poulpito/Flexget,spencerjanssen/Flexget,asm0dey/Flexget,xfouloux/Flexget,asm0dey/Flexget,jacobmetrick/Flexget,qk4l/Flexget,JorisDeRieck/Flexget,lildadou/Flexget,sean797/Flexget,cvium/Flexget,tarzasai/Flexget,v17al/Flexget,crawln45/Flexget,camon/Flexget,malkavi/Flexget,sean797/Flexget,tarzasai/Flexget,dsemi/Flexget,LynxyssCZ/Flexget,malkavi/Flexget,ratoaq2/Flexget,cvium/Flexget,JorisDeRieck/Flexget,tvcsantos/Flexget,drwyrm/Flexget,X-dark/Flexget,antivirtel/Flexget,Danfocus/Flexget,jacobmetrick/Flexget,xfouloux/Flexget,JorisDeRieck/Flexget,jawilson/Flexget,LynxyssCZ/Flexget,X-dark/Flexget,ibrahimkarahan/Flexget,gazpachoking/Flexget,v17al/Flexget,OmgOhnoes/Flexget,voriux/Flexget,voriux/Flexget,ianstalk/Flexget,Pretagonist/Flexget,spencerjanssen/Flexget,Danfocus/Flexget,Danfocus/Flexget,thalamus/Flexget,spencerjanssen/Flexget,ratoaq2/Flexget,thalamus/Flexget,dsemi/Flexget,ibrahimkarahan/Flexget,cvium/Flexget,ibrahimkarahan/Flexget,Pretagonist/Flexget,offbyone/Flexget,qvazzler/Flexget,lildadou/Flexget,oxc/Flexget,offbyone/Flexget,tsnoam/Flexget,poulpito/Flexget,jawilson/Flexget,malkavi/Flexget,gazpachoking/Flexget,offbyone/Flexget,Pretagonist/Flexget,ZefQ/Flexget,LynxyssCZ/Flexget,ianstalk/Flexget,OmgOhnoes/Flexget,tarzasai/Flexget,oxc/Flexget,vfrc2/Flexget,Flexget/Flexget,poulpito/Flexget,vfrc2/Flexget,ianstalk/Flexget,jawilson/Flexget,patsissons/Flexget,lildadou/Flexget,qk4l/Flexget,tvcsantos/Flexget,Danfocus/Flexget,jawilson/Flexget,OmgOhnoes/Flexget,X-dark/Flexget,Flexget/Flexget,Flexget/Flexget,crawln45/Flexget,oxc/Flexget,crawln45/Flexget,grrr2/Flexget,v17al/Flexget,tsnoam/Flexget,drwyrm/Flexget,tsnoam/Flexget,antivirtel/Flexget,tobinjt/Flexget,xfouloux/Flexget | gen-changelog.py | gen-changelog.py | # Writes a changelog in trac WikiFormatting based on a git log
from __future__ import unicode_literals, division, absolute_import
import codecs
from itertools import ifilter
import os
import re
import subprocess
import sys
import dateutil.parser
import requests
from flexget.utils.soup import get_soup
out_path = 'ChangeLog'
if len(sys.argv) > 1:
dir_name = os.path.dirname(sys.argv[1])
if dir_name and not os.path.isdir(dir_name):
print 'Output dir doesn\'t exist: %s' % sys.argv[1]
sys.exit(1)
out_path = sys.argv[1]
ua_response = requests.get('http://flexget.com/wiki/UpgradeActions')
ua_soup = get_soup(ua_response.text)
# 1.0.3280 was last revision on svn
git_log_output = subprocess.check_output(['git', 'log', '--pretty=%n---%n.%d%n%ci%n%h%n%s%n%-b%n---%n',
'--topo-order', '--decorate=full','refs/tags/1.0.3280..HEAD'])
git_log_iter = ifilter(None, git_log_output.decode('utf-8').splitlines())
with codecs.open(out_path, 'w', encoding='utf-8') as out_file:
for line in git_log_iter:
assert line == '---'
tag = re.search('refs/tags/([\d.]+)', next(git_log_iter))
date = dateutil.parser.parse(next(git_log_iter))
commit_hash = next(git_log_iter)
body = list(iter(git_log_iter.next, '---'))
if tag:
ver = tag.group(1)
ua_link = ''
result = ua_soup.find('h3', text=re.compile(re.escape(ver)))
if result:
ua_link = '^[wiki:UpgradeActions#%s upgrade actions]^ ' % result['id']
out_file.write('\n=== %s (%s) %s===\n\n' % (ver, date.strftime('%Y.%m.%d'), ua_link))
out_file.write(' * (%s) %s\n' % (commit_hash, '[[BR]]\n '.join(body)))
| # Writes a changelog in trac WikiFormatting based on a git log
from __future__ import unicode_literals, division, absolute_import
import codecs
from itertools import ifilter
import os
import re
import subprocess
import sys
import dateutil.parser
out_path = 'ChangeLog'
if len(sys.argv) > 1:
dir_name = os.path.dirname(sys.argv[1])
if dir_name and not os.path.isdir(dir_name):
print 'Output dir doesn\'t exist: %s' % sys.argv[1]
sys.exit(1)
out_path = sys.argv[1]
# 1.0.3280 was last revision on svn
git_log_output = subprocess.check_output(['git', 'log', '--pretty=%n---%n.%d%n%ci%n%h%n%s%n%-b%n---%n',
'--topo-order', '--decorate=full','refs/tags/1.0.3280..HEAD'])
git_log_iter = ifilter(None, git_log_output.decode('utf-8').splitlines())
with codecs.open(out_path, 'w', encoding='utf-8') as out_file:
for line in git_log_iter:
assert line == '---'
tag = re.search('refs/tags/([\d.]+)', next(git_log_iter))
date = dateutil.parser.parse(next(git_log_iter))
commit_hash = next(git_log_iter)
body = list(iter(git_log_iter.next, '---'))
if tag:
out_file.write('\n=== %s (%s) ===\n\n' % (tag.group(1), date.strftime('%Y.%m.%d')))
out_file.write(' * (%s) %s\n' % (commit_hash, '[[BR]]\n '.join(body)))
| mit | Python |
35b1fc5e43f553e95ad4c8a42c37ca66639d9120 | add test for core.py | econ-ark/HARK,econ-ark/HARK | HARK/tests/test_core.py | HARK/tests/test_core.py | """
This file implements unit tests for interpolation methods
"""
from HARK.core import HARKobject
import numpy as np
import unittest
class testHARKobject(unittest.TestCase):
def setUp(self):
self.obj_a = HARKobject()
self.obj_b = HARKobject()
def test_distance(self):
self.assertRaises(AttributeError, self.obj_a.distance(self.obj_b))
| apache-2.0 | Python |
|
be17fa5026fd7cd64ccfc6e7241137a3f864725b | add google doc generator | gnawhleinad/pad,gnawhleinad/pad | generate_gpad.py | generate_gpad.py | import httplib2
import webbrowser
from apiclient.discovery import build
from oauth2client import client
flow = client.flow_from_clientsecrets(
'client_secret.json',
scope=['https://www.googleapis.com/auth/drive.file',
'https://www.googleapis.com/auth/urlshortener'],
redirect_uri='urn:ietf:wg:oauth:2.0:oob')
webbrowser.open(flow.step1_get_authorize_url())
auth_code = raw_input('Enter the auth code: ')
credentials = flow.step2_exchange(auth_code)
http = credentials.authorize(httplib2.Http())
service = build('drive', 'v2', http)
body = {
'mimeType': 'application/vnd.google-apps.document',
'title': 'hodor'
}
file = service.files().insert(body=body).execute()
body = {
'role': 'writer',
'type': 'anyone',
'withLink': True
}
service.permissions().insert(fileId=file['id'], body=body).execute()
file = service.files().get(fileId=file['id']).execute()
share = file['alternateLink']
service = build('urlshortener', 'v1', http)
body = { 'longUrl': share }
short = service.url().insert(body=body).execute()
print short['id']
| unlicense | Python |
|
f9e6176bc43262882a0d50f4d850c04c3460b9d8 | Add SS :-) | m4rx9/rna-pdb-tools,m4rx9/rna-pdb-tools | rna_pdb_tools/SecondaryStructure.py | rna_pdb_tools/SecondaryStructure.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Seq and secondary structure prediction"""
import os
import tempfile
import shutil
VARNA_PATH = '/Users/magnus/skills/rnax/varna_tut/'
def draw_ss(title,seq, ss, img_out):
""""""
curr = os.getcwd()
os.chdir(VARNA_PATH)#VARNAv3-93-src')
print os.getcwd()
t = tempfile.NamedTemporaryFile(delete=False)
t.name += '.png'
os.system('java -cp VARNA.jar fr.orsay.lri.varna.applications.VARNAcmd -sequenceDBN ' + seq + " -structureDBN '" + ss + "' -o " + t.name + " -title " + title + " -resolution '2.0'")
os.chdir(curr)
print img_out
shutil.move(t.name, img_out)
if __name__ == '__main__':
seq = 'AAAAAAA'
ss = '((...))'
img_out = 'out.png'
draw_ss('rna', seq, ss, img_out)
| mit | Python |
|
e5fed1895b69d824e3dc773dd6c6f88974e24f67 | discard module (#61452) | thaim/ansible,thaim/ansible | lib/ansible/modules/network/checkpoint/cp_mgmt_discard.py | lib/ansible/modules/network/checkpoint/cp_mgmt_discard.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage CheckPoint Firewall (c) 2019
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: cp_mgmt_discard
short_description: All changes done by user are discarded and removed from database.
description:
- All changes done by user are discarded and removed from database.
- All operations are performed over Web Services API.
version_added: "2.9"
author: "Or Soffer (@chkp-orso)"
options:
uid:
description:
- Session unique identifier. Specify it to discard a different session than the one you currently use.
type: str
extends_documentation_fragment: checkpoint_commands
"""
EXAMPLES = """
- name: discard
cp_mgmt_discard:
"""
RETURN = """
cp_mgmt_discard:
description: The checkpoint discard output.
returned: always.
type: dict
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.checkpoint.checkpoint import checkpoint_argument_spec_for_commands, api_command
def main():
argument_spec = dict(
uid=dict(type='str')
)
argument_spec.update(checkpoint_argument_spec_for_commands)
module = AnsibleModule(argument_spec=argument_spec)
command = "discard"
result = api_command(module, command)
module.exit_json(**result)
if __name__ == '__main__':
main()
| mit | Python |
|
1bd0669e67fc082cbd496b3aa54c6a6f6a0d5fce | Add grab.util.log::print_dict method for fuzzy displaying of dict objects in console | subeax/grab,maurobaraldi/grab,liorvh/grab,alihalabyah/grab,maurobaraldi/grab,lorien/grab,giserh/grab,huiyi1990/grab,subeax/grab,istinspring/grab,istinspring/grab,SpaceAppsXploration/grab,pombredanne/grab-1,lorien/grab,kevinlondon/grab,raybuhr/grab,DDShadoww/grab,subeax/grab,DDShadoww/grab,huiyi1990/grab,raybuhr/grab,pombredanne/grab-1,codevlabs/grab,kevinlondon/grab,liorvh/grab,codevlabs/grab,SpaceAppsXploration/grab,giserh/grab,alihalabyah/grab,shaunstanislaus/grab,shaunstanislaus/grab | grab/util/log.py | grab/util/log.py | def repr_value(val):
if isinstance(val, unicode):
return val.encode('utf-8')
elif isinstance(val, (list, tuple)):
return '[%s]' % ', '.join(repr_val(x) for x in val)
elif isinstance(val, dict):
return '{%s}' % ', '.join('%s: %s' % (repr_val(x), repr_val(y)) for x, y in val.items())
else:
return str(val)
def print_dict(dic):
print '[---'
for key, val in sorted(dic.items(), key=lambda x: x[0]):
print key, ':', repr_value(val)
print '---]'
| mit | Python |
|
e5bd12b67f58c1a099c2bd2dd66b043b43969267 | Add a tool to publish packages in the repo to pub. Review URL: https://codereview.chromium.org//11415191 | dart-archive/dart-sdk,dartino/dart-sdk,dart-lang/sdk,dart-lang/sdk,dartino/dart-sdk,dart-lang/sdk,dart-lang/sdk,dartino/dart-sdk,dart-archive/dart-sdk,dartino/dart-sdk,dart-archive/dart-sdk,dart-lang/sdk,dartino/dart-sdk,dart-lang/sdk,dart-archive/dart-sdk,dart-lang/sdk,dartino/dart-sdk,dart-archive/dart-sdk,dart-archive/dart-sdk,dart-archive/dart-sdk,dart-lang/sdk,dartino/dart-sdk,dartino/dart-sdk,dartino/dart-sdk,dart-archive/dart-sdk,dart-archive/dart-sdk | tools/publish_pkg.py | tools/publish_pkg.py | #!/usr/bin/env python
#
# Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
#
# Script to push a package to pub.
#
# Usage: publish_pkg.py pkg_dir
import os
import os.path
import re
import shutil
import sys
import subprocess
import tempfile
def ReplaceInFiles(paths, subs):
'''Reads a series of files, applies a series of substitutions to each, and
saves them back out. subs should be a list of (pattern, replace) tuples.'''
for path in paths:
contents = open(path).read()
for pattern, replace in subs:
contents = re.sub(pattern, replace, contents)
dest = open(path, 'w')
dest.write(contents)
dest.close()
def ReadVersion(file, field):
for line in open(file).read().split('\n'):
[k, v] = re.split('\s+', line)
if field == k:
return int(v)
def Main(argv):
HOME = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
versionFile = os.path.join(HOME, 'tools', 'VERSION')
major = ReadVersion(versionFile, 'MAJOR')
minor = ReadVersion(versionFile, 'MINOR')
build = ReadVersion(versionFile, 'BUILD')
patch = ReadVersion(versionFile, 'PATCH')
if major == 0 and minor <= 1:
print 'Error: Do not run this script from a bleeding_edge checkout.'
return -1
version = '%d.%d.%d+%d' % (major, minor, build, patch)
tmpDir = tempfile.mkdtemp()
pkgName = argv[1].split('/').pop()
shutil.copytree(os.path.join(HOME, argv[1]),
os.path.join(tmpDir, pkgName))
# Add version to pubspec file.
pubspec = os.path.join(tmpDir, pkgName, 'pubspec.yaml')
pubspecFile = open(pubspec)
lines = pubspecFile.readlines()
pubspecFile.close()
pubspecFile = open(pubspec, 'w')
foundVersion = False
for line in lines:
if line.startswith('version:'):
foundVersion = True
if line.startswith('description:') and not foundVersion:
pubspecFile.write('version: ' + version + '\n')
if not line.startswith(' sdk:'):
pubspecFile.write(line)
pubspecFile.close()
# Replace '../*/pkg' imports and parts.
for root, dirs, files in os.walk(os.path.join(tmpDir, pkgName)):
for name in files:
if name.endswith('.dart'):
ReplaceInFiles([os.path.join(root, name)],
[(r'(import|part)(\s+)(\'|")(\.\./)+pkg/', r'\1\2\3package:')])
print 'publishing version ' + version + ' of ' + argv[1] + ' to pub\n'
print tmpDir
subprocess.call(['pub', 'publish'], cwd=os.path.join(tmpDir, pkgName))
shutil.rmtree(tmpDir)
if __name__ == '__main__':
sys.exit(Main(sys.argv))
| bsd-3-clause | Python |
|
f734cbd91ff8997b9f2aac6bbec2238f8b5f7511 | Create __init__.py | ETCBC/laf-fabric | graf/__init__.py | graf/__init__.py | unlicense | Python |
||
a1c2423c349757f4725ef1250b9de084a469683c | Fix indentation | alfredodeza/ceph-doctor | ceph_medic/checks/cluster.py | ceph_medic/checks/cluster.py | from ceph_medic import metadata
#
# Error checks
#
def check_osds_exist():
code = 'ECLS1'
msg = 'There are no OSDs available'
osd_count = len(metadata['osds'].keys())
if not osd_count:
return code, msg
def check_nearfull():
"""
Checks if the osd capacity is at nearfull
"""
code = 'ECLS2'
msg = 'Cluster is nearfull'
try:
osd_map = metadata['cluster']['status']['osdmap']['osdmap']
except KeyError:
return
if osd_map['nearfull']:
return code, msg
| from ceph_medic import metadata
#
# Error checks
#
def check_osds_exist():
code = 'ECLS1'
msg = 'There are no OSDs available'
osd_count = len(metadata['osds'].keys())
if not osd_count:
return code, msg
def check_nearfull():
"""
Checks if the osd capacity is at nearfull
"""
code = 'ECLS2'
msg = 'Cluster is nearfull'
try:
osd_map = metadata['cluster']['status']['osdmap']['osdmap']
except KeyError:
return
if osd_map['nearfull']:
return code, msg | mit | Python |
84990a4ef20c2e0f42133ed06ade5ce2d4e98ae3 | Save team member picture with extension. | cdriehuys/chmvh-website,cdriehuys/chmvh-website,cdriehuys/chmvh-website | chmvh_website/team/models.py | chmvh_website/team/models.py | import os
from django.db import models
def team_member_image_name(instance, filename):
_, ext = os.path.splitext(filename)
return 'team/{0}{1}'.format(instance.name, ext)
class TeamMember(models.Model):
bio = models.TextField(
verbose_name='biography')
name = models.CharField(
max_length=50,
unique=True,
verbose_name='name')
picture = models.ImageField(
blank=True,
null=True,
upload_to=team_member_image_name)
def __str__(self):
"""Return the team member's name"""
return self.name
| from django.db import models
def team_member_image_name(instance, filename):
return 'team/{0}'.format(instance.name)
class TeamMember(models.Model):
bio = models.TextField(
verbose_name='biography')
name = models.CharField(
max_length=50,
unique=True,
verbose_name='name')
picture = models.ImageField(
blank=True,
null=True,
upload_to=team_member_image_name)
def __str__(self):
"""Return the team member's name"""
return self.name
| mit | Python |
bf993439a7c53bcffe099a61138cf8c17c39f943 | Add Partner label factory | masschallenge/django-accelerator,masschallenge/django-accelerator | accelerator/migrations/0066_partnerlabel.py | accelerator/migrations/0066_partnerlabel.py | # Generated by Django 2.2.10 on 2021-08-24 13:27
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0065_organization_note'),
]
operations = [
migrations.CreateModel(
name='PartnerLabel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('label', models.CharField(max_length=255)),
('partners', models.ManyToManyField(blank=True, to=settings.ACCELERATOR_PARTNER_MODEL)),
],
options={
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_PARTNERLABEL_MODEL',
},
),
]
| mit | Python |
|
6f9dcee86d986f05e289b39f6b4700d5d302f551 | add tests for base models | clach04/json-rpc,lorehov/json-rpc | jsonrpc/tests/test_base.py | jsonrpc/tests/test_base.py | """ Test base JSON-RPC classes."""
import unittest
from ..base import JSONRPCBaseRequest, JSONRPCBaseResponse
class TestJSONRPCBaseRequest(unittest.TestCase):
""" Test JSONRPCBaseRequest functionality."""
def test_data(self):
request = JSONRPCBaseRequest()
self.assertEqual(request.data, {})
with self.assertRaises(ValueError):
request.data = []
with self.assertRaises(ValueError):
request.data = None
class TestJSONRPCBaseResponse(unittest.TestCase):
""" Test JSONRPCBaseResponse functionality."""
def test_data(self):
response = JSONRPCBaseResponse(result="")
self.assertEqual(response.data, {})
with self.assertRaises(ValueError):
response.data = []
with self.assertRaises(ValueError):
response.data = None
| mit | Python |
|
216b96e7f36d8b72ccd3ddf6809f0cc5af14d15a | Add fat_ready.py | cataliniacob/misc,cataliniacob/misc | fat_ready.py | fat_ready.py | #!/usr/bin/env python3
'''Make all files in a directory suitable for copying to a FAT filesystem.
'''
from __future__ import print_function
import os
import os.path
import sys
from six import u
if __name__ == u('__main__'):
if len(sys.argv) != 2:
print(u('Usage: {} <directory to make FAT ready>').format(sys.argv[0]),
file=sys.stderr)
sys.exit(1)
fat_ready_dir = sys.argv[1]
for root, dirs, files in os.walk(fat_ready_dir):
for name in files:
if u(':') in name:
new_name = name.replace(u(':'), u(' '))
full_path_old = os.path.join(root, name)
full_path_new = os.path.join(root, new_name)
print(u('Renaming {} to {}').format(full_path_old, full_path_new))
os.rename(full_path_old, full_path_new)
| mit | Python |
|
40070b6bab49fa0bd46c1040d92bc476e557b19b | add algorithms.fractionation to assess gene loss, bites, etc. | sgordon007/jcvi_062915,tanghaibao/jcvi | algorithms/fractionation.py | algorithms/fractionation.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Catalog gene losses, and bites within genes.
"""
import sys
from optparse import OptionParser
from itertools import groupby
from jcvi.formats.blast import Blast
from jcvi.utils.range import range_minmax, range_overlap
from jcvi.utils.cbook import gene_name
from jcvi.algorithms.synteny import add_beds, check_beds
from jcvi.apps.base import ActionDispatcher, debug
debug()
def main():
actions = (
('loss', 'extract likely gene loss candidates'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def region_str(region):
return "{0}:{1}-{2}".format(*region)
def loss(args):
"""
%prog loss a.b.i1.blocks a.b-genomic.blast
Extract likely gene loss candidates between genome a and b.
"""
p = OptionParser(loss.__doc__)
p.add_option("--gdist", default=20,
help="Gene distance [default: %default]")
p.add_option("--bdist", default=20000,
help="Base pair distance [default: %default]")
add_beds(p)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
blocksfile, genomicblast = args
gdist, bdist = opts.gdist, opts.bdist
qbed, sbed, qorder, sorder, is_self = check_beds(blocksfile, p, opts)
blocks = []
fp = open(blocksfile)
genetrack = {}
proxytrack = {}
for row in fp:
a, b = row.split()
genetrack[a] = b
blocks.append((a, b))
data = []
for key, rows in groupby(blocks, key=lambda x: x[-1]):
rows = list(rows)
data.append((key, rows))
imax = len(data) - 1
for i, (key, rows) in enumerate(data):
if i == 0 or i == imax:
continue
if key != '.':
continue
before, br = data[i - 1]
after, ar = data[i + 1]
bi, bx = sorder[before]
ai, ax = sorder[after]
dist = abs(bi - ai)
if bx.seqid != ax.seqid or dist > gdist:
continue
start, end = range_minmax(((bx.start, bx.end), (ax.start, ax.end)))
proxy = (bx.seqid, start - bdist, end + bdist)
for a, b in rows:
proxytrack[a] = proxy
blast = Blast(genomicblast)
tags = {}
for query, bb in blast.iter_hits():
query = gene_name(query)
if query not in proxytrack:
continue
proxy = proxytrack[query]
tag = "NS"
for b in bb:
hsp = (b.subject, b.sstart, b.sstop)
if range_overlap(proxy, hsp):
proxytrack[query] = hsp
tag = "S"
break
tags[query] = tag
for b in qbed:
accn = b.accn
target_region = genetrack[accn]
if accn in proxytrack:
target_region = region_str(proxytrack[accn])
if accn in tags:
target_region += "[{0}]".format(tags[accn])
else:
target_region += "[NF]"
print "\t".join((accn, target_region))
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
|
257134bdaea7c250d5956c4095adf0b917b65aa6 | Fix null case for event details | verycumbersome/the-blue-alliance,the-blue-alliance/the-blue-alliance,nwalters512/the-blue-alliance,nwalters512/the-blue-alliance,the-blue-alliance/the-blue-alliance,tsteward/the-blue-alliance,nwalters512/the-blue-alliance,phil-lopreiato/the-blue-alliance,nwalters512/the-blue-alliance,tsteward/the-blue-alliance,jaredhasenklein/the-blue-alliance,phil-lopreiato/the-blue-alliance,verycumbersome/the-blue-alliance,the-blue-alliance/the-blue-alliance,the-blue-alliance/the-blue-alliance,fangeugene/the-blue-alliance,jaredhasenklein/the-blue-alliance,phil-lopreiato/the-blue-alliance,bdaroz/the-blue-alliance,bdaroz/the-blue-alliance,tsteward/the-blue-alliance,bdaroz/the-blue-alliance,bdaroz/the-blue-alliance,tsteward/the-blue-alliance,the-blue-alliance/the-blue-alliance,tsteward/the-blue-alliance,nwalters512/the-blue-alliance,verycumbersome/the-blue-alliance,fangeugene/the-blue-alliance,fangeugene/the-blue-alliance,jaredhasenklein/the-blue-alliance,verycumbersome/the-blue-alliance,jaredhasenklein/the-blue-alliance,nwalters512/the-blue-alliance,verycumbersome/the-blue-alliance,tsteward/the-blue-alliance,phil-lopreiato/the-blue-alliance,fangeugene/the-blue-alliance,verycumbersome/the-blue-alliance,jaredhasenklein/the-blue-alliance,the-blue-alliance/the-blue-alliance,phil-lopreiato/the-blue-alliance,phil-lopreiato/the-blue-alliance,bdaroz/the-blue-alliance,fangeugene/the-blue-alliance,jaredhasenklein/the-blue-alliance,fangeugene/the-blue-alliance,bdaroz/the-blue-alliance | database/dict_converters/event_details_converter.py | database/dict_converters/event_details_converter.py | from database.dict_converters.converter_base import ConverterBase
class EventDetailsConverter(ConverterBase):
SUBVERSIONS = { # Increment every time a change to the dict is made
3: 0,
}
@classmethod
def convert(cls, event_details, dict_version):
CONVERTERS = {
3: cls.eventDetailsConverter_v3,
}
return CONVERTERS[dict_version](event_details)
@classmethod
def eventDetailsConverter_v3(cls, event_details):
event_details_dict = {
'alliances': event_details.alliance_selections if event_details else None,
'district_points': event_details.district_points if event_details else None,
'rankings': event_details.renderable_rankings if event_details else None,
'stats': event_details.matchstats if event_details else None,
}
return event_details_dict
| from database.dict_converters.converter_base import ConverterBase
class EventDetailsConverter(ConverterBase):
SUBVERSIONS = { # Increment every time a change to the dict is made
3: 0,
}
@classmethod
def convert(cls, event_details, dict_version):
CONVERTERS = {
3: cls.eventDetailsConverter_v3,
}
return CONVERTERS[dict_version](event_details)
@classmethod
def eventDetailsConverter_v3(cls, event_details):
event_details_dict = {
'alliances': event_details.alliance_selections,
'district_points': event_details.district_points,
'rankings': event_details.renderable_rankings,
'stats': event_details.matchstats,
}
return event_details_dict
| mit | Python |
1a296a5203c422a7eecc0be71a91994798f01c10 | copy name->title for BehaviorAction and BehaviorSequences | izzyalonso/tndata_backend,izzyalonso/tndata_backend,tndatacommons/tndata_backend,tndatacommons/tndata_backend,izzyalonso/tndata_backend,izzyalonso/tndata_backend,tndatacommons/tndata_backend,tndatacommons/tndata_backend | tndata_backend/goals/migrations/0020_populate_basebehavior_title_slugs.py | tndata_backend/goals/migrations/0020_populate_basebehavior_title_slugs.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django.utils.text import slugify
def _copy_name_to_title(model, apps):
"""Copy the values from the Model's name -> title and name_slug -> title_slug."""
M = apps.get_model("goals", model)
for obj in M.objects.all():
obj.title = obj.name
obj.title_slug = obj.name_slug or slugify(obj.name)
obj.save()
def _copy_title_to_name(model, apps):
"""Copy the values from the Model's title -> name and title_slug -> name_slug."""
M = apps.get_model("goals", model)
for obj in M.objects.all():
obj.name = obj.title
obj.name_slug = obj.title_slug or slugify(obj.title)
obj.save()
def copy_behavior_title(apps, schema_editor):
_copy_name_to_title("BehaviorSequence", apps)
def copy_action_title(apps, schema_editor):
_copy_name_to_title("BehaviorAction", apps)
def rev_copy_behavior_title(apps, schema_editor):
_copy_title_to_name("BehaviorSequence", apps)
def rev_copy_action_title(apps, schema_editor):
_copy_title_to_name("BehaviorAction", apps)
class Migration(migrations.Migration):
dependencies = [
('goals', '0019_auto_20150312_1553'),
]
operations = [
migrations.RunPython(copy_behavior_title, reverse_code=rev_copy_behavior_title),
migrations.RunPython(copy_action_title, reverse_code=rev_copy_action_title),
]
| mit | Python |
|
94cfc0a7598dd8dcf455311f8bb41c2016c7c3a8 | Create solution.py | lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges | hackerrank/algorithms/warmup/easy/plus_minus/py/solution.py | hackerrank/algorithms/warmup/easy/plus_minus/py/solution.py | #include <math.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <assert.h>
#include <limits.h>
#include <stdbool.h>
int main(void)
{
int n;
scanf("%d",&n);
int arr[n];
for(int arr_i = 0; arr_i < n; arr_i++){
scanf("%d",&arr[arr_i]);
}
const double UNIT_RATIO = 1.0 / n;
double posratio = 0.0;
double negratio = 0.0;
double zratio = 0.0;
int value = 0;
for (int i = 0; i < n; ++i) {
value = arr[i];
if (value > 0) {
posratio += UNIT_RATIO;
} else if (value < 0) {
negratio += UNIT_RATIO;
} else {
zratio += UNIT_RATIO;
}
}
printf("%lf\n", posratio);
printf("%lf\n", negratio);
printf("%lf\n", zratio);
return 0;
}
| mit | Python |
|
a27d30c4514cef93e054d5597829dc758b04c95e | add xycut in util | Transkribus/TranskribusDU,Transkribus/TranskribusDU,Transkribus/TranskribusDU | TranskribusDU/util/XYcut.py | TranskribusDU/util/XYcut.py | # -*- coding: utf-8 -*-
"""
XYcut.py
vertical/ horizontal cuts for page elements:
copyright Naver Labs Europe 2018
READ project
"""
def mergeSegments(lSegment, iMin):
"""Take as input a list of interval on some axis,
together with the object that contributed to this interval.
In this module it's a textbox or an image
Merge overlapping segments
Return a sorted list of disjoints segments together
with the associated objects (that is the union of the objects
associated to the segments being merged)
Contiguous segments closer than iMin are merged as well.
INPUT: [ (a,b,o) , ...]
or INPUT: [ (a,b, [o,...]) , ...]
OUPUT: [ (c,d,[o,...]) , ...], min, max
bProjOn may contain the name of the axis on which the projection has
been done ("X" for an x-cut, "Y" for an y-cut)
then in frontier mode , we keep smal intervals if they are coinciding
with a frontier (e.g. a very narrow horizontal split coinciding with
a line is kept despite it's narower than iMin
p and q are the boundaries along the other axis of the block to cut
"""
lMergedSegment = []
for seg in lSegment:
(aaux,baux,o) = seg
lo = (o,)
a = min(aaux,baux) #just in case...
b = max(aaux,baux) #just in case...
#find all overlapping or close-enough segments and merge them
lOverlap = []
for mseg in lMergedSegment:
[aa,bb,loaux] = mseg
iOver = max(a,aa) - min(b, bb) #negative means overlap
if iOver <= iMin: #overlap or spaced by less than iMin pixel
lOverlap.append(mseg)
else:
pass #nothing to merge with
if lOverlap:
#merge the current segment with all overlapping msegments
for aa, bb, lolo in lOverlap:
if aa<a: a=aa
if bb>b: b=bb
lo = lo + tuple(lolo)
for mseg in lOverlap:
lMergedSegment.remove(mseg)
#mseg = [a, b, lo]
mseg = (a, b, tuple(lo))
lMergedSegment.append(mseg)
#sorted list
lMergedSegment.sort()
amin = lMergedSegment[0][0]
amax = lMergedSegment[-1][1]
return tuple(lMergedSegment), amin, amax
| bsd-3-clause | Python |
|
e5e24ddccf5de2fba743a97c1790406259399d18 | Create one fixture for all tests | ujilia/python_training2,ujilia/python_training2,ujilia/python_training2 | conftest.py | conftest.py | import pytest
from fixture.application import Application
@pytest.fixture(scope = "session")
def app(request):
fixture = Application()
request.addfinalizer(fixture.destroy)
return fixture
| apache-2.0 | Python |
|
74d274f02fa23f1a6799e9f96ccb1ef77162f1bc | Add new package: consul (#18044) | LLNL/spack,LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack,iulian787/spack,iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/consul/package.py | var/spack/repos/builtin/packages/consul/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Consul(MakefilePackage):
"""Consul is a distributed, highly available,
and data center aware solution to connect and configure applications
across dynamic, distributed infrastructure."""
homepage = "https://www.consul.io"
url = "https://github.com/hashicorp/consul/archive/v1.8.1.tar.gz"
version('1.8.1', sha256='c173e9866e6181b3679a942233adade118976414f6ca2da8deaea0fa2bba9b06')
version('1.8.0', sha256='a87925bde6aecddf532dfd050e907b6a0a6447cdd5dc4f49b46d97c9f73b58f9')
version('1.7.6', sha256='893abad7563c1f085303705f72d8789b338236972123f0ab6d2be24dbb58c2ac')
depends_on('go@1.14:')
def install(self, spec, prefix):
install_tree('bin', prefix.bin)
install_tree('lib', prefix.lib)
| lgpl-2.1 | Python |
|
6427406fc627b467dd4851f32b6a15a74356ef2d | Create new package. (#6043) | LLNL/spack,EmreAtes/spack,EmreAtes/spack,skosukhin/spack,iulian787/spack,matthiasdiener/spack,LLNL/spack,lgarren/spack,lgarren/spack,matthiasdiener/spack,matthiasdiener/spack,mfherbst/spack,tmerrick1/spack,lgarren/spack,iulian787/spack,EmreAtes/spack,mfherbst/spack,mfherbst/spack,iulian787/spack,mfherbst/spack,tmerrick1/spack,EmreAtes/spack,skosukhin/spack,matthiasdiener/spack,LLNL/spack,krafczyk/spack,matthiasdiener/spack,krafczyk/spack,lgarren/spack,mfherbst/spack,krafczyk/spack,krafczyk/spack,skosukhin/spack,tmerrick1/spack,skosukhin/spack,tmerrick1/spack,iulian787/spack,LLNL/spack,iulian787/spack,skosukhin/spack,LLNL/spack,EmreAtes/spack,tmerrick1/spack,krafczyk/spack,lgarren/spack | var/spack/repos/builtin/packages/r-gviz/package.py | var/spack/repos/builtin/packages/r-gviz/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RGviz(RPackage):
"""Genomic data analyses requires integrated visualization
of known genomic information and new experimental data. Gviz
uses the biomaRt and the rtracklayer packages to perform live
annotation queries to Ensembl and UCSC and translates this to
e.g. gene/transcript structures in viewports of the grid
graphics package. This results in genomic information plotted
together with your data."""
homepage = "http://bioconductor.org/packages/Gviz/"
url = "https://git.bioconductor.org/packages/Gviz"
version('1.20.0', git='https://git.bioconductor.org/packages/Gviz', commit='299b8255e1b03932cebe287c3690d58c88f5ba5c')
depends_on('r@3.4.0:3.4.9', when='@1.20.0')
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-genomicranges', type=('build', 'run'))
depends_on('r-xvector', type=('build', 'run'))
depends_on('r-rtracklayer', type=('build', 'run'))
depends_on('r-lattice', type=('build', 'run'))
depends_on('r-rcolorbrewer', type=('build', 'run'))
depends_on('r-biomart', type=('build', 'run'))
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-genomicfeatures', type=('build', 'run'))
depends_on('r-bsgenome', type=('build', 'run'))
depends_on('r-biostrings', type=('build', 'run'))
depends_on('r-biovizbase', type=('build', 'run'))
depends_on('r-rsamtools', type=('build', 'run'))
depends_on('r-latticeextra', type=('build', 'run'))
depends_on('r-matrixstats', type=('build', 'run'))
depends_on('r-genomicalignments', type=('build', 'run'))
depends_on('r-genomeinfodb', type=('build', 'run'))
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-digest', type=('build', 'run'))
| lgpl-2.1 | Python |
|
3603669e0359f612b8e68a24b035849e9694aaaf | Add win_system state module | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/states/win_system.py | salt/states/win_system.py | # -*- coding: utf-8 -*-
'''
Management of Windows system information
========================================
This state is used to manage system information such as the computer name and
description.
.. code-block:: yaml
ERIK-WORKSTATION:
system:
- computer_name
This is Erik's computer, don't touch!:
system:
- computer_desc
'''
# Import python libs
import logging
import sys
# Import salt libs
from salt._compat import string_types
import salt.utils
log = logging.getLogger(__name__)
def __virtual__():
'''
This only supports Windows
'''
if salt.utils.is_windows() and 'system.get_computer_desc' in __salt__:
return 'system'
return False
def computer_desc(name):
'''
Manage the computer's description field
name
The desired computer description
'''
# Just in case someone decides to enter a numeric description
name = str(name)
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Computer description already set to {0!r}'.format(name)}
before_desc = __salt__['system.get_computer_desc']()
if before_desc == name:
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('Computer description will be changed to {0!r}'
.format(name))
return ret
if not __salt__['system.set_computer_desc'](name):
ret['result'] = False
ret['comment'] = ('Unable to set computer description to '
'{0!r}'.format(name))
else:
ret['comment'] = ('Computer description successfully changed to {0!r}'
.format(name))
ret['changes'] = {'old': before_desc, 'new': name}
return ret
computer_description = computer_desc
def computer_name(name):
'''
Manage the computer's name
name
The desired computer name
'''
# Just in case someone decides to enter a numeric description
name = str(name).upper()
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Computer name already set to {0!r}'.format(name)}
before_name = __salt__['system.get_computer_name']()
pending_name = __salt__['system.get_pending_computer_name']()
if before_name == name and pending_name is None:
return ret
elif pending_name == name:
ret['comment'] = ('The current computer name is {0!r}, but will be '
'changed to {1!r} on the next reboot'
.format(before_name, name))
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Computer name will be changed to {0!r}'.format(name)
return ret
result = __salt__['system.set_computer_name'](name)
if result is not False:
after_name = result['Computer Name']['Current']
after_pending = result['Computer Name'].get('Pending')
if ((after_pending is not None and after_pending == name) or
(after_pending is None and after_name == name)):
ret['comment'] = 'Computer name successfully set to {0!r}'.format(name)
if after_pending is not None:
ret['comment'] += ' (reboot required for change to take effect)'
ret['changes'] = {'old': before_name, 'new': name}
else:
ret['result'] = False
ret['comment'] = 'Unable to set computer name to {0!r}'.format(name)
return ret
| apache-2.0 | Python |
|
0f68667e2ddfee6a370afe5c816a1358cfba799e | Correct GitHub URL. | openfisca/openfisca-matplotlib,openfisca/openfisca-qt,adrienpacifico/openfisca-matplotlib,openfisca/openfisca-matplotlib,openfisca/openfisca-qt | openfisca_qt/widgets/__init__.py | openfisca_qt/widgets/__init__.py | # -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# The widgets variables are created by each country-specific package (cf function init_country())
# Note: The variables below are not inited (to None) here, to ensure that execution will fail when they are used before
# OpenFisca country-specific package is properly inited.
__all__ = [
'CompositionWidget',
]
| # -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014 OpenFisca Team
# https://github.com/openfisca/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# The widgets variables are created by each country-specific package (cf function init_country())
# Note: The variables below are not inited (to None) here, to ensure that execution will fail when they are used before
# OpenFisca country-specific package is properly inited.
__all__ = [
'CompositionWidget',
]
| agpl-3.0 | Python |
b02b3e2e385bc04b2f1b1160371d55f8b6122006 | add migration file | nanchenchen/script-analysis,nanchenchen/script-analysis,nanchenchen/script-analysis,nanchenchen/script-analysis,nanchenchen/script-analysis | pyanalysis/apps/corpus/migrations/0001_initial.py | pyanalysis/apps/corpus/migrations/0001_initial.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Dataset',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=150)),
('description', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Line',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('number', models.IntegerField(default=0)),
('text', models.TextField(default=b'', null=True, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Script',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=256)),
('last_modified', models.DateTimeField(default=django.utils.timezone.now)),
('dataset', models.ForeignKey(related_name='scripts', to='corpus.Dataset')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Token',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('st_col', models.IntegerField(default=0)),
('ed_col', models.IntegerField(default=0)),
('type', models.CharField(default=b'', max_length=32, null=True, blank=True)),
('text', models.TextField(default=b'', null=True, blank=True)),
('line', models.ForeignKey(related_name='tokens', to='corpus.Line')),
('script', models.ForeignKey(related_name='tokens', to='corpus.Script')),
],
options={
},
bases=(models.Model,),
),
migrations.AlterIndexTogether(
name='token',
index_together=set([('script', 'type')]),
),
migrations.AlterIndexTogether(
name='script',
index_together=set([('dataset', 'last_modified'), ('dataset', 'name')]),
),
migrations.AddField(
model_name='line',
name='script',
field=models.ForeignKey(related_name='lines', to='corpus.Script'),
preserve_default=True,
),
migrations.AlterIndexTogether(
name='line',
index_together=set([('script', 'number')]),
),
]
| mit | Python |
|
980594ab26887a4628620e9e0e00d89ddbdc4e49 | Create hackstring.py | gvaduha/homebrew,gvaduha/homebrew,gvaduha/homebrew,gvaduha/homebrew,gvaduha/homebrew,gvaduha/homebrew | hackstring.py | hackstring.py | #! /usr/bin/env python
import sys
print "".join(["%%%02x" % ord(x) for x in sys.argv[1]])
print "".join(["\\u%04x" % ord(x) for x in sys.argv[1]])
| mit | Python |
|
739af3ccb50df93b108185ac1e7c0b47cd0bbf31 | Add happycopy2.py. | dw/scratch,dw/scratch,shekkbuilder/scratch,dw/scratch,shekkbuilder/scratch,shekkbuilder/scratch | happycopy2.py | happycopy2.py | #!/usr/bin/env python
#
# Like happycopy.py, but make efforts to fill the buffer when encountering many
# small files (e.g. OS X .sparsebundle)
#
# Picture the scene: converting a drive from HFS+ to NTFS so your TV can play
# movies from it directly.
#
# Problem: copying media files from partition at the end of the drive to the
# new partition at the start of the drive.
#
# Attempt #1: Finder / rsync: 10.4mb/sec, disk rattles like crazy.
# Investigating, since this is a removable disk, write caching is minimal.
# Result: read bandwidth is artificially starved because writes are being
# forced to disk sooner than necessary. Result: huge amount of time wasted on
# disk seeks.
#
# Attempt #2: happycopy.py!@#"!one. Beat rsync at its own game by a clean
# 4mb/sec, with 10% lower CPU utilization. Read 1gb at a time, then write that
# buffer out, rinse repeat. Result: nice fast sequential reads.
#
# Attempt 1 IO graphs:
# Read: /-?_\-/_|?-\-/-|_?_|/-\/--\/
# Write: /-?_\-/_|?-\-/-|_?_|/-\/--\/
#
# Attempt 2 IO graphs:
# Read: /------------\_____________/--------------\_______
# Write: _____________/-------------\______________/-------
#
# Result: happy :)
#
import os
import sys
import time
MAX_BUF = 1048576 * 1024 * 1
def die(msg):
print msg
raise SystemExit(1)
def target_path(src_dir, dst_dir, path):
rel = os.path.relpath(path, src_dir)
return os.path.join(dst_dir, rel)
def stats(s, size, dur):
print >> sys.stderr, s, '%.2fMb/sec' % ((float(size) / dur) / 1048576)
def read_phase(to_copy):
buffered = 0
buffers = []
while to_copy and buffered < MAX_BUF:
src, dst, start = to_copy.pop()
with open(src, 'rb') as fp:
fp.seek(start)
buf = fp.read(MAX_BUF - buffered)
if buf:
buffered += len(buf)
buffers.append((src, dst, buf))
to_copy.append((src, dst, start + len(buf)))
return buffered, buffers
def write_phase(buffers):
for src_path, dst_path, buf in buffers:
with file(dst_path, 'ab') as fp:
fp.write(buf)
print 'write', dst_path, len(buf)
def do_copy(to_copy):
start_ts = time.time()
read = 0
read_secs = 0
written = 0
write_secs = 0
while to_copy:
t0 = time.time()
buffered, buffers = read_phase(to_copy)
read_secs += time.time() - t0
read += buffered
stats('Read', read, read_secs)
t0 = time.time()
write_phase(buffers)
write_secs += time.time() - t0
written += buffered
stats('Write', written, write_secs)
stats('Throughput', written, time.time() - start_ts)
def main():
if len(sys.argv) != 3:
die('Usage: prog src_dir dst_dir')
src_dir, dst_dir = sys.argv[1:]
if not os.path.isdir(src_dir):
die('src dir must be dir')
to_copy = []
for dirpath, dirnames, filenames in os.walk(src_dir):
tgt = target_path(src_dir, dst_dir, dirpath)
if not os.path.exists(tgt):
os.makedirs(tgt)
elif not os.path.isdir(tgt):
print 'gah!', tgt
for filename in filenames:
src_path = os.path.join(dirpath, filename)
dst_path = target_path(src_dir, dst_dir, src_path)
if os.path.exists(dst_path) and \
os.path.getsize(src_path) == os.path.getsize(dst_path):
print 'skip', src_path
else:
to_copy.append((src_path, dst_path, 0))
print 'going to copy', len(to_copy), 'files'
to_copy.reverse()
do_copy(to_copy)
if __name__ == '__main__':
main()
| mit | Python |
|
084dd7fa3836f63d322a5bbf9e0289aa49488abb | Add fastagz field to data objects of process upload-fasta-nucl | genialis/resolwe-bio,genialis/resolwe-bio,genialis/resolwe-bio,genialis/resolwe-bio | resolwe_bio/migrations/0011_nucletide_seq.py | resolwe_bio/migrations/0011_nucletide_seq.py | import gzip
import os
import shutil
from django.conf import settings
from django.db import migrations
from resolwe.flow.migration_ops import DataDefaultOperation, ResolweProcessAddField, ResolweProcessRenameField
from resolwe.flow.utils import iterate_fields
FASTA_SCHEMA = {
'name': 'fasta',
'label': 'FASTA file',
'type': 'basic:file:',
}
class DefaultUnzipFasta(DataDefaultOperation):
"""Set default value."""
def prepare(self, data, from_state):
pass
def get_default_for(self, data, from_state):
"""Return default for given data object."""
fastagz = os.path.join(
settings.FLOW_EXECUTOR['DATA_DIR'],
data.location.subpath,
data.output['fastagz']['file']
)
assert fastagz.endswith('.gz')
fasta = fastagz[:-3]
# Decompress.
with gzip.open(fastagz, 'rb') as infile, open(fasta, 'wb') as outfile:
shutil.copyfileobj(infile, outfile)
size = os.path.getsize(fasta)
return {
'file': os.path.basename(fasta),
'size': size,
'total_size': size,
}
def recompute_data_size(apps, schema_editor):
"""Recompute size of all data objects of process ``upload-fasta-nucl``."""
Data = apps.get_model("flow", "Data") # pylint: disable=invalid-name
for data in Data.objects.filter(process__slug='upload-fasta-nucl'):
hydrate_size(data)
data.save()
def hydrate_size(data):
"""Compute size of all Data object outputs and its cumultative size.
This is a simplified version of original ``hydrate_size`` function,
since we need just a subset of it.
"""
def add_file_size(obj):
"""Add file size to the basic:file field."""
path = os.path.join(settings.FLOW_EXECUTOR['DATA_DIR'], data.location.subpath, obj['file'])
obj['size'] = os.path.getsize(path)
obj['total_size'] = obj['size']
data_size = 0
for field_schema, fields in iterate_fields(data.output, data.process.output_schema):
name = field_schema['name']
value = fields[name]
if 'type' in field_schema:
if field_schema['type'].startswith('basic:file:'):
add_file_size(value)
data_size += value.get('total_size', 0)
data.size = data_size
class Migration(migrations.Migration):
"""
Make outputs of ``upload-fasta-nucl`` consistent with ``upload-genome``.
Process ``upload-genome`` stores compressed output in ``fastagz``
and uncompressed in ``fasta``. Process ``upload-fasta-nucl`` stores
compressed output in ``fasta`` output field and does not have a
field with uncompressed output. Therefore ``fasta`` field is first
renamed to ``fastagz``. Only then ``fasta`` field is added with
decompressed content.
"""
dependencies = [
('resolwe_bio', '0010_add_relation_types'),
('flow', '0028_add_data_location'),
]
operations = [
ResolweProcessRenameField(
process='upload-fasta-nucl',
field='output.fasta',
new_field='fastagz',
),
ResolweProcessAddField(
process='upload-fasta-nucl',
field='output.fasta',
schema=FASTA_SCHEMA,
default=DefaultUnzipFasta(),
),
migrations.RunPython(recompute_data_size),
]
| apache-2.0 | Python |
|
2df34105a58a05fd1f50f88bc967360b4bd9afc8 | Create LongestIncreasingSubseq_001.py | Chasego/codirit,cc13ny/algo,cc13ny/algo,Chasego/codi,Chasego/codi,Chasego/codirit,Chasego/cod,cc13ny/Allin,Chasego/codi,Chasego/codirit,Chasego/cod,cc13ny/algo,cc13ny/Allin,Chasego/codi,Chasego/codirit,cc13ny/algo,Chasego/cod,Chasego/codi,Chasego/cod,cc13ny/Allin,cc13ny/Allin,cc13ny/Allin,Chasego/codirit,Chasego/cod,cc13ny/algo | leetcode/300-Longest-Increasing-Subsequence/LongestIncreasingSubseq_001.py | leetcode/300-Longest-Increasing-Subsequence/LongestIncreasingSubseq_001.py | class Solution(object):
def lengthOfLIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
n = len(nums)
if n == 0:
return 0
maxlen = [1 for i in range(n)]
for i in range(1, n):
for j in range(i):
if nums[j] < nums[i] and maxlen[j] + 1 > maxlen[i]:
maxlen[i] = maxlen[j] + 1
return max(maxlen)
| mit | Python |
|
d37f57bc2b6816759a6e7108cef4a03322a622ce | Create generator.py | imreeciowy/wfrp-gen | generator.py | generator.py | #!/usr/bin/python3
import random
import time
import sys
import Being
# print python version- dev purposes
print(sys.version)
# generic dice
def x_dices_n(x,n):
result = 0
for i in range(0, x):
roll_dice_n = random.randint(1,n)
result = roll_dice_n + result
return result
# crude race selector ;)
player = Being.Human()
# roll for stats with generic dice
fresh_stats=[]
for x in range(0, 8):
fresh_stats.append(x_dices_n(2, 10))
# sorts rolled results, removes lowest result, adds 11 as Shalya'a Favor, sorts again
fresh_stats.sort()
fresh_stats.pop(0)
fresh_stats.append(11)
fresh_stats.sort(reverse=True)
# print list without brackets(stat_listed_String)
stat_listed_String = ' '.join(str(S) for S in fresh_stats)
print('rolled')
print(stat_listed_String)
# raw list for chosen stats
chosen_stats = [0] * 8
# empty list for roll enumeration - to avoid doubled attribution
used_stats=[]
# tuple with stat names
stat_first_names = ('WS', 'BS', 'S', 'T', 'Ag', 'Int', 'WP', 'Fel')
# tuple with second line stat names
stat_second_names = ('A', 'W', 'SB', 'TB', 'M', 'Mag', 'IP', 'FP')
# stats preparation
# value as a string
for idx, val in enumerate(fresh_stats):
print('value '+str(val)+' you want to have as ?')
for Ind, Vart in enumerate(stat_first_names):
if (used_stats.count(Ind))==1:
print('*',end='')
print(Vart,end='\t')
print('\n')
for i in range(8):
print(i,end='\t')
print('\n')
while True:
try:
index = int(input('? ')) # input stat index
if (used_stats.count(index)!=0): # check if not assigned already
raise StatPresentError() # give one more time if already assigned
chosen_stats[index]=val # assign value to index
used_stats.append(index) # notes what is assigned
except KeyboardInterrupt:
print('BYE!')
sys.exit(0)
except:
print('Provide once more for what do you want to assign value '+str(val))
continue
else:
break
for w in range(0, 60):
print("*", end='')
print('\n')
print(*stat_first_names, sep='\t')
print(*chosen_stats, sep='\t')
# test purposes
# print(*used_stats, sep='\t')
# print(*fresh_stats, sep='\t')
# increment race base with chosen stats
print('Your character body')
player.body1 = [sum(x) for x in zip(player.base_line_1, chosen_stats)]
player.base_line_2[2] = player.body1[2] // 10
player.base_line_2[3] = player.body1[3] // 10
print(*stat_first_names, sep='\t')
print(*player.body1, sep='\t')
print(*stat_second_names, sep='\t')
print(*player.base_line_2, sep='\t')
# save to file
time_string = time.strftime("%Y-%m-%d--%H%M%S")
filename = ('statistics-' + time_string + '.txt')
f = open(filename, 'w')
for S in fresh_stats:
f.write(str(S))
f.write('\t')
f.write('\n'+str(sum(fresh_stats)))
f.write('\n')
for i in range(8):
f.write(str(stat_first_names[i]))
f.write('\t')
f.write('\n')
for D in chosen_stats:
f.write(str(D))
f.write('\t')
f.write('\n')
for A in player.body1:
f.write(str(A))
f.write('\t')
f.close()
| mit | Python |
|
c2d26a5942cb22f4510abd6d5ff8c83d6a386810 | make migrations and model updates | linea-it/masterlist,linea-it/masterlist,linea-it/masterlist | masterlist/candidates/migrations/0005_auto_20160725_1759.py | masterlist/candidates/migrations/0005_auto_20160725_1759.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('candidates', '0004_auto_20160708_1422'),
]
operations = [
migrations.RemoveField(
model_name='candidate',
name='mask_blue',
),
migrations.RemoveField(
model_name='candidate',
name='mask_red',
),
migrations.RemoveField(
model_name='candidate',
name='sb_max',
),
migrations.RemoveField(
model_name='candidate',
name='sb_min',
),
migrations.AddField(
model_name='candidate',
name='data_season',
field=models.CharField(max_length=64, null=True, verbose_name=b'DES data season'),
),
migrations.AddField(
model_name='candidate',
name='dec_field',
field=models.FloatField(null=True, verbose_name=b'Dec Field'),
),
migrations.AddField(
model_name='candidate',
name='followup_date',
field=models.CharField(max_length=64, null=True, verbose_name=b'followup date'),
),
migrations.AddField(
model_name='candidate',
name='followup_facility',
field=models.CharField(max_length=64, null=True, verbose_name=b'followup facility'),
),
migrations.AddField(
model_name='candidate',
name='followup_success',
field=models.CharField(max_length=64, null=True, verbose_name=b'followup success'),
),
migrations.AddField(
model_name='candidate',
name='lens_class',
field=models.CharField(max_length=64, null=True, verbose_name=b'Lens (Y/N)'),
),
migrations.AddField(
model_name='candidate',
name='ra_field',
field=models.FloatField(null=True, verbose_name=b'RA Field'),
),
migrations.AddField(
model_name='candidate',
name='system_type',
field=models.CharField(max_length=64, null=True, verbose_name=b'Type of Candidate (gal or qso)'),
),
migrations.AddField(
model_name='candidate',
name='z_phot_lens',
field=models.FloatField(null=True, verbose_name=b'Z photo lens'),
),
migrations.AddField(
model_name='candidate',
name='z_spec_src',
field=models.FloatField(null=True, verbose_name=b'Z spec source '),
),
]
| mit | Python |
|
94e4d30dbdbcf9765bf731b1bd792d0fcf3f9d4a | Add prettification middleware | fmorgner/django-maccman,fmorgner/django-maccman | maccman/middleware/prettify.py | maccman/middleware/prettify.py | from bs4 import BeautifulSoup
class PrettifyMiddleware(object):
def process_response(self, request, response):
if response.status_code == 200:
if response["content-type"].startswith("text/html"):
beauty = BeautifulSoup(response.content)
response.content = beauty.prettify()
return response
| bsd-3-clause | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.