commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
4b696c2a54f7afd95013763c098aec30b08409d6 | Create bulb-switcher-ii.py | kamyu104/LeetCode,kamyu104/LeetCode,yiwen-luo/LeetCode,yiwen-luo/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode | Python/bulb-switcher-ii.py | Python/bulb-switcher-ii.py | # Time: O(1)
# Space: O(1)
class Solution(object):
def flipLights(self, n, m):
"""
:type n: int
:type m: int
:rtype: int
"""
if m == 0: return 1
if n == 1: return 2
if m == 1 and n == 2: return 3
if m == 1 or n == 2 return 4
if m == 2: return 7
return 8
| mit | Python |
|
0621b935558b6805d2b45fee49bc2e959201fd7a | add number-of-digit-one | zeyuanxy/leet-code,zeyuanxy/leet-code,EdisonAlgorithms/LeetCode,zeyuanxy/leet-code,EdisonAlgorithms/LeetCode,EdisonAlgorithms/LeetCode | vol5/number-of-digit-one/number-of-digit-one.py | vol5/number-of-digit-one/number-of-digit-one.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Zeyuan Shang
# @Date: 2015-11-03 15:21:00
# @Last Modified by: Zeyuan Shang
# @Last Modified time: 2015-11-03 15:21:14
import itertools
class Solution(object):
def countDigitOne(self, n):
"""
:type n: int
:rtype: int
"""
if n <= 0:
return 0
def digits(n):
while n:
yield n % 10
n /= 10
def pows(b):
x = 1
while True:
yield x
x *= 10
def g(d, m):
if d < 1:
return n / (m * 10) * m
elif d == 1:
return n / (m * 10) * m + n % m + 1
else:
return (n / (m * 10) + 1) * m
return sum(itertools.starmap(g, itertools.izip(digits(n), pows(10)))) | mit | Python |
|
27d37833663842405f159127f30c6351958fcb10 | Add draft of example using the new @bench | ktbs/ktbs-bench,ktbs/ktbs-bench | bench_examples/bench_dec_insert.py | bench_examples/bench_dec_insert.py | from csv import DictWriter
from ktbs_bench.utils.decorators import bench
@bench
def batch_insert(graph, file):
"""Insert triples in batch."""
print(graph, file)
if __name__ == '__main__':
# Define some graph/store to use
graph_list = ['g1', 'g2']
# Define some files to get the triples from
n3file_list = ['f1', 'f2']
# Testing batch insert
res = {'func_name': 'batch_insert'}
for graph in graph_list:
for n3file in n3file_list:
time_res = batch_insert(graph, n3file)
res[time_res[0]] = time_res[1]
# Setup the result CSV
with open('/tmp/res.csv', 'wb') as outfile:
res_csv = DictWriter(outfile, fieldnames=res.keys())
res_csv.writeheader()
# Write the results
res_csv.writerow(res)
| mit | Python |
|
7c0a37e2ad123dfeb409c682a1cab37630678642 | Improve preprocessing text docs | sarvex/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,sarvex/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,davidzchen/tensorflow,gunan/tensorflow,annarev/tensorflow,davidzchen/tensorflow,aldian/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,frreiss/tensorflow-fred,paolodedios/tensorflow,paolodedios/tensorflow,freedomtan/tensorflow,annarev/tensorflow,gautam1858/tensorflow,cxxgtxy/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,freedomtan/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,sarvex/tensorflow,annarev/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,gunan/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,aam-at/tensorflow,frreiss/tensorflow-fred,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,frreiss/tensorflow-fred,freedomtan/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,aam-at/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,gunan/tensorflow,yongtang/tensorflow,aldian/tensorflow,karllessard/tensorflow,sarvex/tensorflow,frreiss/tensorflow-fred,annarev/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,cxxgtxy/tensorflow,aam-at/tensorflow,aam-at/tensorflow,Intel-tensorflow/tensorflow,petewarden/tensorflow,gunan/tensorflow,aldian/tensorflow,petewarden/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,sarvex/tensorflow,aldian/tensorflow,cxxgtxy/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,petewarden/tensorflow,tensorflow/tensorflow,aldian/tensorflow,sarvex/tensorflow,freedomtan/tensorflow,gunan/tensorflow,davidzchen/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,petewarden/tensorflow,davidzchen/tensorflow,gautam1858/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,tensorflow/tensorflow,Intel-Corporation/tensorflow,aam-at/tensorflow,frreiss/tensorflow-fred,frreiss/tensorflow-fred,aam-at/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,gautam1858/tensorflow,gunan/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,petewarden/tensorflow,petewarden/tensorflow,aam-at/tensorflow,Intel-tensorflow/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_saved_model,frreiss/tensorflow-fred,gautam1858/tensorflow,petewarden/tensorflow,annarev/tensorflow,gunan/tensorflow,gautam1858/tensorflow,sarvex/tensorflow,davidzchen/tensorflow,tensorflow/tensorflow,freedomtan/tensorflow,karllessard/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,aam-at/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,annarev/tensorflow,cxxgtxy/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_saved_model,freedomtan/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,gunan/tensorflow,karllessard/tensorflow,davidzchen/tensorflow,aam-at/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,davidzchen/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-Corporation/tensorflow,aam-at/tensorflow,gautam1858/tensorflow,petewarden/tensorflow,petewarden/tensorflow,davidzchen/tensorflow,paolodedios/tensorflow,aldian/tensorflow,frreiss/tensorflow-fred,cxxgtxy/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,gunan/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-Corporation/tensorflow,davidzchen/tensorflow,gunan/tensorflow,freedomtan/tensorflow,petewarden/tensorflow,cxxgtxy/tensorflow,aldian/tensorflow,freedomtan/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,petewarden/tensorflow,sarvex/tensorflow,Intel-Corporation/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_tf_optimizer,gautam1858/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,davidzchen/tensorflow,davidzchen/tensorflow,Intel-tensorflow/tensorflow,freedomtan/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,davidzchen/tensorflow,petewarden/tensorflow,paolodedios/tensorflow,annarev/tensorflow,annarev/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,annarev/tensorflow,gautam1858/tensorflow,cxxgtxy/tensorflow,gunan/tensorflow,yongtang/tensorflow,Intel-Corporation/tensorflow,Intel-Corporation/tensorflow,paolodedios/tensorflow,aldian/tensorflow,gautam1858/tensorflow,cxxgtxy/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,gautam1858/tensorflow,yongtang/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,freedomtan/tensorflow,annarev/tensorflow,aam-at/tensorflow,annarev/tensorflow,karllessard/tensorflow,aam-at/tensorflow,gunan/tensorflow,yongtang/tensorflow,freedomtan/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,Intel-Corporation/tensorflow,freedomtan/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,gautam1858/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once | tensorflow/python/keras/preprocessing/text.py | tensorflow/python/keras/preprocessing/text.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for text input preprocessing.
"""
# pylint: disable=invalid-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras_preprocessing import text
from tensorflow.python.util.tf_export import keras_export
hashing_trick = text.hashing_trick
Tokenizer = text.Tokenizer
@keras_export('keras.preprocessing.text.text_to_word_sequence')
def text_to_word_sequence(text,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True, split=" "):
"""Converts a text to a sequence of words (or tokens).
This function transforms a string of text into a list of words
while ignoring `filters` which include punctuations by default.
>>> text = 'This is a sample sentence.'
>>> tf.keras.preprocessing.text.text_to_word_sequence(text)
['this', 'is', 'a', 'sample', 'sentence']
Arguments:
text: Input text (string).
filters: list (or concatenation) of characters to filter out, such as
punctuation. Default: `'!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\\t\\n'`,
includes basic punctuation, tabs, and newlines.
lower: boolean. Whether to convert the input to lowercase.
split: str. Separator for word splitting.
Returns:
A list of words (or tokens).
"""
return text.text_to_word_sequence(
text, filters=filters, lower=lower, split=split)
@keras_export('tf.keras.preprocessing.text.one_hot')
def one_hot(text, n,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True,
split=' '):
"""One-hot encodes a text into a list of word indexes of size `n`.
This function receives as input a string of text and returns a
list of encoded integers each corresponding to a word (or token)
in the given input string.
>>> text = 'This is a sample sentence.'
>>> tf.keras.preprocessing.text.one_hot(text, 20)
[4, 18, 1, 15, 17]
Arguments:
text: Input text (string).
n: int. Size of vocabulary.
filters: list (or concatenation) of characters to filter out, such as
punctuation. Default: ``!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\\t\\n``,
includes basic punctuation, tabs, and newlines.
lower: boolean. Whether to set the text to lowercase.
split: str. Separator for word splitting.
Returns:
List of integers in `[1, n]`. Each integer encodes a word
(unicity non-guaranteed).
"""
return text.one_hot(
text, n, filters=filters, lower=lower, split=split)
# text.tokenizer_from_json is only available if keras_preprocessing >= 1.1.0
try:
tokenizer_from_json = text.tokenizer_from_json
keras_export('keras.preprocessing.text.tokenizer_from_json')(
tokenizer_from_json)
except AttributeError:
pass
keras_export('keras.preprocessing.text.hashing_trick')(hashing_trick)
keras_export('keras.preprocessing.text.Tokenizer')(Tokenizer)
| # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for text input preprocessing.
"""
# pylint: disable=invalid-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras_preprocessing import text
from tensorflow.python.util.tf_export import keras_export
text_to_word_sequence = text.text_to_word_sequence
one_hot = text.one_hot
hashing_trick = text.hashing_trick
Tokenizer = text.Tokenizer
keras_export(
'keras.preprocessing.text.text_to_word_sequence')(text_to_word_sequence)
keras_export('keras.preprocessing.text.one_hot')(one_hot)
keras_export('keras.preprocessing.text.hashing_trick')(hashing_trick)
keras_export('keras.preprocessing.text.Tokenizer')(Tokenizer)
# text.tokenizer_from_json is only available if keras_preprocessing >= 1.1.0
try:
tokenizer_from_json = text.tokenizer_from_json
keras_export('keras.preprocessing.text.tokenizer_from_json')(
tokenizer_from_json)
except AttributeError:
pass
| apache-2.0 | Python |
175470eea9716f587a2339932c1cfb6c5240c4df | add tools.testing module for asserts (numpy, pandas compat wrapper) | wzbozon/statsmodels,wzbozon/statsmodels,phobson/statsmodels,nvoron23/statsmodels,phobson/statsmodels,detrout/debian-statsmodels,cbmoore/statsmodels,kiyoto/statsmodels,wkfwkf/statsmodels,astocko/statsmodels,adammenges/statsmodels,gef756/statsmodels,yl565/statsmodels,hlin117/statsmodels,saketkc/statsmodels,DonBeo/statsmodels,Averroes/statsmodels,bzero/statsmodels,wwf5067/statsmodels,Averroes/statsmodels,wwf5067/statsmodels,YihaoLu/statsmodels,DonBeo/statsmodels,bzero/statsmodels,nguyentu1602/statsmodels,adammenges/statsmodels,adammenges/statsmodels,alekz112/statsmodels,musically-ut/statsmodels,nguyentu1602/statsmodels,rgommers/statsmodels,jseabold/statsmodels,cbmoore/statsmodels,nvoron23/statsmodels,musically-ut/statsmodels,statsmodels/statsmodels,statsmodels/statsmodels,bzero/statsmodels,bert9bert/statsmodels,bsipocz/statsmodels,gef756/statsmodels,huongttlan/statsmodels,josef-pkt/statsmodels,phobson/statsmodels,yl565/statsmodels,josef-pkt/statsmodels,kiyoto/statsmodels,detrout/debian-statsmodels,cbmoore/statsmodels,jstoxrocky/statsmodels,jseabold/statsmodels,yl565/statsmodels,alekz112/statsmodels,ChadFulton/statsmodels,edhuckle/statsmodels,alekz112/statsmodels,wkfwkf/statsmodels,edhuckle/statsmodels,bert9bert/statsmodels,wkfwkf/statsmodels,wzbozon/statsmodels,bashtage/statsmodels,nvoron23/statsmodels,astocko/statsmodels,cbmoore/statsmodels,bert9bert/statsmodels,huongttlan/statsmodels,waynenilsen/statsmodels,nvoron23/statsmodels,alekz112/statsmodels,rgommers/statsmodels,YihaoLu/statsmodels,saketkc/statsmodels,hainm/statsmodels,musically-ut/statsmodels,yl565/statsmodels,DonBeo/statsmodels,wdurhamh/statsmodels,hlin117/statsmodels,jstoxrocky/statsmodels,statsmodels/statsmodels,jseabold/statsmodels,statsmodels/statsmodels,bert9bert/statsmodels,kiyoto/statsmodels,waynenilsen/statsmodels,huongttlan/statsmodels,ChadFulton/statsmodels,bashtage/statsmodels,gef756/statsmodels,YihaoLu/statsmodels,rgommers/statsmodels,bzero/statsmodels,wdurhamh/statsmodels,edhuckle/statsmodels,edhuckle/statsmodels,gef756/statsmodels,wdurhamh/statsmodels,wdurhamh/statsmodels,phobson/statsmodels,detrout/debian-statsmodels,bsipocz/statsmodels,ChadFulton/statsmodels,josef-pkt/statsmodels,saketkc/statsmodels,wwf5067/statsmodels,hainm/statsmodels,detrout/debian-statsmodels,huongttlan/statsmodels,rgommers/statsmodels,hainm/statsmodels,saketkc/statsmodels,wzbozon/statsmodels,jstoxrocky/statsmodels,josef-pkt/statsmodels,cbmoore/statsmodels,waynenilsen/statsmodels,YihaoLu/statsmodels,wzbozon/statsmodels,phobson/statsmodels,astocko/statsmodels,astocko/statsmodels,jstoxrocky/statsmodels,wkfwkf/statsmodels,gef756/statsmodels,bsipocz/statsmodels,edhuckle/statsmodels,saketkc/statsmodels,ChadFulton/statsmodels,wwf5067/statsmodels,hlin117/statsmodels,jseabold/statsmodels,nvoron23/statsmodels,hainm/statsmodels,nguyentu1602/statsmodels,josef-pkt/statsmodels,kiyoto/statsmodels,waynenilsen/statsmodels,ChadFulton/statsmodels,bert9bert/statsmodels,rgommers/statsmodels,Averroes/statsmodels,bzero/statsmodels,DonBeo/statsmodels,musically-ut/statsmodels,nguyentu1602/statsmodels,ChadFulton/statsmodels,bashtage/statsmodels,bashtage/statsmodels,wdurhamh/statsmodels,YihaoLu/statsmodels,Averroes/statsmodels,statsmodels/statsmodels,bsipocz/statsmodels,bashtage/statsmodels,statsmodels/statsmodels,kiyoto/statsmodels,josef-pkt/statsmodels,bashtage/statsmodels,adammenges/statsmodels,hlin117/statsmodels,jseabold/statsmodels,DonBeo/statsmodels,wkfwkf/statsmodels,yl565/statsmodels | statsmodels/tools/testing.py | statsmodels/tools/testing.py | """assert functions from numpy and pandas testing
"""
import re
from distutils.version import StrictVersion
import numpy as np
import numpy.testing as npt
import pandas
import pandas.util.testing as pdt
# for pandas version check
def strip_rc(version):
return re.sub(r"rc\d+$", "", version)
def is_pandas_min_version(min_version):
'''check whether pandas is at least min_version
'''
from pandas.version import short_version as pversion
return StrictVersion(strip_rc(pversion)) >= min_version
# local copies, all unchanged
from numpy.testing import (assert_allclose, assert_almost_equal,
assert_approx_equal, assert_array_almost_equal,
assert_array_almost_equal_nulp, assert_array_equal, assert_array_less,
assert_array_max_ulp, assert_raises, assert_string_equal, assert_warns)
# adjusted functions
def assert_equal(actual, desired, err_msg='', verbose=True, **kwds):
if not is_pandas_min_version('0.14.1'):
npt.assert_equal(actual, desired, err_msg='', verbose=True)
else:
if isinstance(desired, pandas.Index):
pdt.assert_index_equal(actual, desired)
elif isinstance(desired, pandas.Series):
pdt.assert_series_equal(actual, desired, **kwds)
elif isinstance(desired, pandas.DataFrame):
pdt.assert_frame_equal(actual, desired, **kwds)
else:
npt.assert_equal(actual, desired, err_msg='', verbose=True)
| bsd-3-clause | Python |
|
68cd37c1c1bf279bc67e3d6391c8f4b88e0eb7a0 | add buggy profiler, not ready each instanciation add 2sec to exec time | short-edition/syntaxnet-wrapper | syntaxnet_wrapper/test/profile_execution.py | syntaxnet_wrapper/test/profile_execution.py | from syntaxnet_wrapper.wrapper import SyntaxNetWrapper
from time import time
from prettytable import PrettyTable
def profile_exec(niter, action, keep_wrapper):
t = time()
sentence = 'une phrase de test'
for i in range(niter):
if keep_wrapper == False or i == 0:
sn_wrapper = SyntaxNetWrapper('French')
if action == 'morpho':
sn_wrapper.morpho_sentence(sentence)
elif action == 'tagger':
sn_wrapper.tag_sentence(sentence)
elif action == 'parser':
sn_wrapper.parse_sentence(sentence)
del sn_wrapper
return time() - t
x = PrettyTable(['Action', 'niter', 'keep wrapper', 'execution_time'])
# Describe test case
test_cases = [
{'action': 'morpho', 'niter': 1, 'keep_wrapper': False},
{'action': 'morpho', 'niter': 10, 'keep_wrapper': True},
#{'action': 'morpho', 'niter': 100, 'keep_wrapper': True},
#{'action': 'morpho', 'niter': 1000, 'keep_wrapper': True},
{'action': 'tagger', 'niter': 1, 'keep_wrapper': True},
#{'action': 'tagger', 'niter': 10, 'keep_wrapper': True},
#{'action': 'tagger', 'niter': 100, 'keep_wrapper': True},
#{'action': 'tagger', 'niter': 1000, 'keep_wrapper': True},
{'action': 'parser', 'niter': 1, 'keep_wrapper': True},
#{'action': 'parser', 'niter': 10, 'keep_wrapper': True},
#{'action': 'parser', 'niter': 100, 'keep_wrapper': True},
#{'action': 'parser', 'niter': 1000, 'keep_wrapper': True},
{'action': 'morpho', 'niter': 1, 'keep_wrapper': False},
#{'action': 'morpho', 'niter': 10, 'keep_wrapper': False},
#{'action': 'morpho', 'niter': 100, 'keep_wrapper': False},
#{'action': 'morpho', 'niter': 1000, 'keep_wrapper': False},
{'action': 'tagger', 'niter': 1, 'keep_wrapper': False},
#{'action': 'tagger', 'niter': 10, 'keep_wrapper': False},
#{'action': 'tagger', 'niter': 100, 'keep_wrapper': False},
#{'action': 'tagger', 'niter': 1000, 'keep_wrapper': False},
{'action': 'parser', 'niter': 1, 'keep_wrapper': False},
#{'action': 'parser', 'niter': 10, 'keep_wrapper': False},
#{'action': 'parser', 'niter': 100, 'keep_wrapper': False},
#{'action': 'parser', 'niter': 1000, 'keep_wrapper': False},
]
for test_case in test_cases:
exec_time = profile_exec(**test_case)
x.add_row([test_case['action'],
test_case['niter'],
test_case['keep_wrapper'],
exec_time])
with open('output_profiling.txt', 'wb') as file_:
file_.write(x.get_string())
| apache-2.0 | Python |
|
c76c7b19afdf364ade2b7d0793cbdb14cb315131 | add smalltalk like object model | loucq123/object_model | smalltalk_like/obj_model.py | smalltalk_like/obj_model.py | class Base(object):
def __init__(self, cls, fields):
self.cls = cls
self.fields = fields
def read_attribute(self, field_name):
return self.fields.get(field_name)
def write_attribute(self, field_name, value):
self.fields[field_name] = value
def call_method(self, method_name, *args):
method = self.cls.find_method(method_name)
return method(self, *args)
def isinstance(self, cls):
return self.cls.issubclass(cls)
class Class(Base):
def __init__(self, name, base_class, fields, metaclass):
Base.__init__(self, metaclass, fields)
self.name = name
self.base_class = base_class
def super_class_traversal(self):
if self.base_class is None:
return [self]
else:
return [self] + self.base_class.super_class_traversal()
def issubclass(self, cls):
return cls in self.super_class_traversal()
def find_method(self, method_name):
for cls in self.super_class_traversal():
if method_name in cls.fields:
return cls.fields[method_name]
return MISSING
class Instance(Base):
def __init__(self, cls):
assert isinstance(cls, Class)
Base.__init__(self, cls, {})
OBJECT = Class(name='object', base_class=None, fields={}, metaclass=None)
TYPE = Class(name='TYPE', base_class=OBJECT, fields={}, metaclass=None)
TYPE.cls = TYPE
OBJECT.cls = TYPE
MISSING = object()
| mit | Python |
|
5bcd31440322d19262b694a5df299f43af577e5e | Create app.py | Kalimaha/fake_data_crud_service | app.py | app.py | from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
if __name__ == "__main__":
app.run()
| mit | Python |
|
f6624531e47c599af42e75d84708359eaa982569 | Solve AoC 2020-12-25/1 | matslindh/codingchallenges,matslindh/codingchallenges | adventofcode2020/25.py | adventofcode2020/25.py |
def loop_size_finder(inp, subject_number=7):
i = 1
c = 0
while i != inp:
i *= subject_number
i %= 20201227
c += 1
return c
def transformer(iterations, subject_number=7):
i = 1
for _ in range(0, iterations):
i *= subject_number
i %= 20201227
return i
def test_loop_size_finder():
assert loop_size_finder(5764801) == 8
assert loop_size_finder(17807724) == 11
assert transformer(11, subject_number=5764801) == transformer(8, subject_number=17807724)
if __name__ == '__main__':
card_loops = loop_size_finder(10212254)
door_loops = loop_size_finder(12577395)
print(transformer(card_loops, 12577395))
| mit | Python |
|
b5433672a4e27db4e8f8698c311d05055462ac00 | Create main file | rcs333/ClinVirusSeq | annotate_clin_virus.py | annotate_clin_virus.py | import timeit
import subprocess
import glob
import sys
import argparse
start = timeit.default_timer()
# This program runs some shit and does some shit about clinical virus samples
# Gonna write more as I need too
# parser = argparse.ArgumentParser(description= 'Annotate a set of UW clinical viral samples, pulling virus information from prokka and blast')
# parser.add_argument('file_dir', help='Input file directory, all .fasta files will be processed and .seq and .gbf files will be produced in the format input_dir/output/FASTA_name')
# parser.add_argument('metadata_info_sheet_location', help='.csv file where all of the metadata is stored')
# parser.add_argument('sbt_file_loc', help='location of .sbt file for .gbf file creation')
# args = parser.parse_args()
# Here I assume that the .fasta file has multiple fastas as opposed to being given a directory, this is subject to later change
fasta_filename = '10fasta_UWViroClinSeq.fasta'
metadata_info_sheet = 'UWVIROCLINSEQ - SCCA.csv'
gff_file_loc = 'HPIV3_121416.gff'
# Takes the name of a clincical virus as specified on the metadata sheet and returns a list of the relevant metadata
def pull_metadata(virus_name):
for line in open(metadata_info_sheet):
if line.split(',')[1] == virus_name:
# Parse and steal input
# reutrn two strings, one for the cmt file and the other for the .fsa features
def parse_gff(gff_file_loc):
# First two lines are garbarge
# One line a sequence format: ##TYPE DNA virus_name
# then sequences start:
# FORMAT:
# RNA NAME
# SEQUENCE
# end-
# all of them, also in the same order as the first list
# NAME GENEIOUS cds ## ## stupid shit then the names
# all named, and also in order
# Write this into lists
# write the damn files right here
# pull_metadata(name)
# write the .tbl and .fsa right here
def write_output():
# make a folder for each, name it the sample name
# Go through and make .fsa and .tbl files out of our data
# TODO: generalize, but first I'mma run it with hard coded filepaths
def run_tbl():
# run .tbl2asn on all of the folders and process the .sqn files for submission
# Probobly entails throwing the .sbt file into each folder
#
# Process the fasta_file
# Now we go through and actually work our magic on the viruses
for x in range(0,len(virus_name_list)):
clin_data_list = pull_metadata(virus_name_list[x])
# TODO: Modify fasta/cmt file
# TODO: Run Prokka - with options stolen from sheet
| mit | Python |
|
92aaff39dbd670f65dcbdeb34a2a506e0fcdf58b | add basic show_urls test | haakenlid/django-extensions,linuxmaniac/django-extensions,linuxmaniac/django-extensions,haakenlid/django-extensions,django-extensions/django-extensions,linuxmaniac/django-extensions,haakenlid/django-extensions,django-extensions/django-extensions,django-extensions/django-extensions | tests/management/commands/test_show_urls.py | tests/management/commands/test_show_urls.py | # -*- coding: utf-8 -*-
from django.core.management import call_command
from django.utils.six import StringIO
def test_show_urls_format_dense():
out = StringIO()
call_command('show_urls', stdout=out)
output = out.getvalue()
assert "/admin/\tdjango.contrib.admin.sites.index\tadmin:index\n" in output
assert "/admin/<app_label>/\tdjango.contrib.admin.sites.app_index\tadmin:app_list\n" in output
def test_show_urls_format_verbose():
out = StringIO()
call_command('show_urls', format="verbose", stdout=out)
output = out.getvalue()
assert """/login/
\tController: django.contrib.auth.views.LoginView
\tURL Name: login""" in output
| mit | Python |
|
74a4f56d28497de89415f29ca3e1d6298c2fdd23 | Create drivers.py | ariegg/webiopi-drivers,ariegg/webiopi-drivers | chips/sensor/simulation/drivers.py | chips/sensor/simulation/drivers.py | # This code has to be added to the corresponding __init__.py
DRIVERS["simulatedsensors"] = ["PRESSURE", "TEMPERATURE", "LUMINOSITY", "DISTANCE", "HUMIDITY",
"COLOR", "CURRENT", "VOLTAGE", "POWER",
"LINEARACCELERATION", "ANGULARACCELERATION", "ACCELERATION", "LINEARVELOCITY", "ANGULARVELOCITY", "VELOCITY",
"SENSORS"]
| apache-2.0 | Python |
|
0d77fe363b6e6e8b1a0424cec7631cf13b669968 | add linear simulation | harmslab/epistasis,Zsailer/epistasis | epistasis/simulate/linear.py | epistasis/simulate/linear.py | __doc__ = """Submodule with various classes for generating/simulating genotype-phenotype maps."""
# ------------------------------------------------------------
# Imports
# ------------------------------------------------------------
import numpy as np
from gpmap.gpm import GenotypePhenotypeMap
# local imports
from epistasis.decomposition import generate_dv_matrix
from epistasis.simulate.base import BaseSimulation
# ------------------------------------------------------------
# ArtificialMap object can be used to quickly generating a toy
# space for testing the EpistasisModels
# ------------------------------------------------------------
class LinearSimulation(BaseSimulation):
"""Construct an genotype-phenotype from linear building blocks and
epistatic coefficients.
Example
-------
Phenotype = b0 + b1 + b2 + b3 + b12 + b13 + b13 + b123
Parameters
---------
wildtype : str
Wildtype genotype
mutations : dict
Mapping for each site to its alphabet
order : int
Order of epistasis in simulated genotype-phenotype map
betas : array-like
values of epistatic coefficients (must be positive for this function
to work. Log is taken)
model_type : str
Use a local or global (i.e. Walsh space) epistasis model to construct
phenotypes
"""
def __init__(self, wildtype, mutations,
model_type='local',
):
# Construct epistasis mapping objects (empty)
super(LinearSimulation,self).__init__(
wildtype,
mutations,
)
self.model_type = model_type
@property
def p_additive(self):
"""Get the additive phenotypes"""
orders = self.epistasis.getorder
labels = list(orders[0].labels) + list(orders[1].labels)
vals = list(orders[0].values) + list(orders[1].values)
x = generate_dv_matrix(self.binary.genotypes, labels, model_type=self.model_type)
return np.dot(x, vals)
def build(self):
""" Build the phenotype map from epistatic interactions. """
# Allocate phenotype numpy array
_phenotypes = np.zeros(self.n, dtype=float)
# Get model type:
self.X = generate_dv_matrix(self.binary.genotypes, self.epistasis.labels, model_type=self.model_type)
self.phenotypes = np.dot( self.X, self.epistasis.values)
| unlicense | Python |
|
14e637720d6c80ed88232130b00385ceb4d451da | Create manual/__init__.py | MichaelCurrin/twitterverse,MichaelCurrin/twitterverse | app/tests/manual/__init__.py | app/tests/manual/__init__.py | """
Manual test module.
Note that while `TEST_MODE` should be set an environment variable for the
unit and integration tests, we want that off here so we can test against
local config data.
"""
| mit | Python |
|
5bd4534b375efed2ce5026a64228a45a9acc1d64 | add parallel runner | datamicroscopes/kernels,datamicroscopes/kernels,datamicroscopes/kernels | microscopes/kernels/parallel.py | microscopes/kernels/parallel.py | """Contains a parallel runner implementation, with support
for various backends
"""
from microscopes.common import validator
import multiprocessing as mp
def _mp_work(args):
runner, niters = args
runner.run(niters)
return runner.get_latent()
class runner(object):
def __init__(self, runners, backend='multiprocessing', **kwargs):
self._runners = runners
if backend not in ('multiprocessing',):
raise ValueError("invalid backend: {}".format(backend))
self._backend = backend
if backend == 'multiprocessing':
validator.validate_kwargs(kwargs, ('processes',))
if 'processes' not in kwargs:
kwargs['processes'] = mp.cpu_count()
validator.validate_positive(kwargs['processes'], 'processes')
self._processes = kwargs['processes']
else:
assert False, 'should not be reached'
def run(self, niters=10000):
"""Run each runner for `niters`, using the backend for parallelism
"""
if self._backend == 'multiprocessing':
pool = mp.Pool(processes=self._processes)
args = [(runner, niters) for runner in self._runners]
# map_async() + get() allows us to workaround a bug where
# control-C doesn't kill multiprocessing workers
self._latents = pool.map_async(_mp_work, args).get(10000000)
pool.close()
pool.join()
else:
assert False, 'should not be reached'
def get_latents(self):
return self._latents
| bsd-3-clause | Python |
|
0cdc87edc4d5e4c967e7bc5bd35c5b30151d5a6e | Create admin_pages.py | marbindrakon/eve-wspace,evewspace/eve-wspace,mmalyska/eve-wspace,evewspace/eve-wspace,marbindrakon/eve-wspace,evewspace/eve-wspace,evewspace/eve-wspace,marbindrakon/eve-wspace,marbindrakon/eve-wspace,mmalyska/eve-wspace,mmalyska/eve-wspace,mmalyska/eve-wspace | evewspace/API/admin_pages.py | evewspace/API/admin_pages.py | from core.admin_page_registry import registry
registry.register('SSO', 'sso_admin.html', 'API.change_ssoaccesslist')
| apache-2.0 | Python |
|
48190b463bcbafc0b1d3af6c41677a295237e3ba | Add missing file | Simran-B/arangodb,Simran-B/arangodb,fceller/arangodb,fceller/arangodb,fceller/arangodb,wiltonlazary/arangodb,graetzer/arangodb,graetzer/arangodb,graetzer/arangodb,Simran-B/arangodb,wiltonlazary/arangodb,graetzer/arangodb,baslr/ArangoDB,arangodb/arangodb,joerg84/arangodb,graetzer/arangodb,graetzer/arangodb,wiltonlazary/arangodb,m0ppers/arangodb,joerg84/arangodb,fceller/arangodb,baslr/ArangoDB,graetzer/arangodb,fceller/arangodb,hkernbach/arangodb,graetzer/arangodb,m0ppers/arangodb,joerg84/arangodb,hkernbach/arangodb,hkernbach/arangodb,baslr/ArangoDB,joerg84/arangodb,joerg84/arangodb,m0ppers/arangodb,baslr/ArangoDB,m0ppers/arangodb,joerg84/arangodb,baslr/ArangoDB,hkernbach/arangodb,Simran-B/arangodb,Simran-B/arangodb,baslr/ArangoDB,fceller/arangodb,fceller/arangodb,baslr/ArangoDB,hkernbach/arangodb,arangodb/arangodb,baslr/ArangoDB,graetzer/arangodb,joerg84/arangodb,hkernbach/arangodb,fceller/arangodb,m0ppers/arangodb,joerg84/arangodb,graetzer/arangodb,fceller/arangodb,arangodb/arangodb,m0ppers/arangodb,m0ppers/arangodb,Simran-B/arangodb,hkernbach/arangodb,baslr/ArangoDB,hkernbach/arangodb,m0ppers/arangodb,wiltonlazary/arangodb,arangodb/arangodb,arangodb/arangodb,joerg84/arangodb,arangodb/arangodb,hkernbach/arangodb,hkernbach/arangodb,m0ppers/arangodb,m0ppers/arangodb,fceller/arangodb,graetzer/arangodb,wiltonlazary/arangodb,joerg84/arangodb,wiltonlazary/arangodb,Simran-B/arangodb,arangodb/arangodb,hkernbach/arangodb,joerg84/arangodb,m0ppers/arangodb,graetzer/arangodb,Simran-B/arangodb,baslr/ArangoDB,graetzer/arangodb,graetzer/arangodb,wiltonlazary/arangodb,hkernbach/arangodb,hkernbach/arangodb,arangodb/arangodb,baslr/ArangoDB,wiltonlazary/arangodb,baslr/ArangoDB,baslr/ArangoDB,joerg84/arangodb,joerg84/arangodb,baslr/ArangoDB,Simran-B/arangodb,m0ppers/arangodb,hkernbach/arangodb,joerg84/arangodb,m0ppers/arangodb,Simran-B/arangodb | 3rdParty/V8/V8-5.0.71.39/build/has_valgrind.py | 3rdParty/V8/V8-5.0.71.39/build/has_valgrind.py | #!/usr/bin/env python
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
VALGRIND_DIR = os.path.join(BASE_DIR, 'third_party', 'valgrind')
LINUX32_DIR = os.path.join(VALGRIND_DIR, 'linux_x86')
LINUX64_DIR = os.path.join(VALGRIND_DIR, 'linux_x64')
def DoMain(_):
"""Hook to be called from gyp without starting a separate python
interpreter."""
return int(os.path.exists(LINUX32_DIR) and os.path.exists(LINUX64_DIR))
if __name__ == '__main__':
print DoMain([])
| apache-2.0 | Python |
|
82860a07e361aa5322b7d055c60c7178e40296bd | Create search_accepted_nodes_for_queries.py | DynamoDS/Coulomb,DynamoDS/Coulomb,DynamoDS/Coulomb | SearchTools/search_accepted_nodes_for_queries.py | SearchTools/search_accepted_nodes_for_queries.py | # Search and accept, looks for each accept what the previously entered search text
# and the node that was accepted
import gzip
import json
import base64
import sys # Library of system calls
import traceback
import time
import os
from os.path import isfile, join
# Check that the script has been given the right argumets
if len(sys.argv) != 3:
print "Usage: python search_actions_extract.py path_to_data results_path"
print "Export the search and accept actions in the logs"
exit(1)
# Load the arguments into local variables
VERBOSE = True
path = sys.argv[1] # First command line argument (input path)
out_path = sys.argv[2] # Second command line argument (results path)
# Setup the tracking data structures
results = [] # Holds the results
linesCount = 0 # Number of lines processed
searchCount = 0 # Number of search messages processed
err = 0; # Error count
lastSeenSearch = None;
# Print the header row
print time.strftime("%Y-%m-%d %H:%M:%S"), "LinesCount", "SearchesCount", "Errors Count"
# Recursively list the files in sub folders
files = [os.path.join(dp, f) for dp, dn, fn in os.walk(path) for f in fn]
for filePath in files:
# If the file isn't a sorted file, skip it
if not filePath.endswith('sorted'):
continue
# Open the file, decompressing it as we go
f = gzip.open (filePath)
# Walk over every line in the file
for ln in f:
linesCount += 1 # Count them
# If we've seen 10,000 lines emit a progress indicator message
if linesCount % 10000 == 0:
print time.strftime("%Y-%m-%d %H:%M:%S"), linesCount, searchCount,err
try:
if not ln.startswith("{"):
continue # It wasn't a valid data line, maybe a header or an error
data = json.loads(ln) # The data lines are JSON packed, so load them into a map
# At this point `data` contains a map of all the data fields in the message
tag = data["Tag"] # Extract the tag
if tag != "Search" and tag != "Search-NodeAdded": # If it isn't a search message, skip
continue
searchCount += 1
result = {} # Assemble an empty result structure
# Copy over the relevant data
result["Session"] = data["SessionID"] # Populate the sessions
result["MicroTime"] = data["MicroTime"] # Add the timing
result["Query"] = base64.b64decode(data["Data"]) # The thing that is being searched for
# Now manually compute a data item called 'Action', what the user was doing
if tag == "Search":
result["Action"] = "SEARCH"
lastSeenSearch = result
if tag == "Search-NodeAdded":
result["Action"] = "ACCEPT"
if (lastSeenSearch['Session'] == result['Session']):
searchAnswer = {}
searchAnswer['Session'] = lastSeenSearch['Session']
searchAnswer['Query'] = lastSeenSearch['Query']
searchAnswer['Accepted'] = result['Query']
searchAnswer['TimeSinceLastSearch'] = int(result['MicroTime']) - int(lastSeenSearch['MicroTime'])
results.append(searchAnswer)
if VERBOSE:
print searchAnswer
except:
# If there is a problem, print what went wrong
print filePath
print "FAILED LINE: "+ ln
print traceback.format_exc()
err += 1
# Output the results into the output file
print time.strftime("%Y-%m-%d %H:%M:%S"), "Writing results"
out_file = open(out_path, "w")
out_file.write(json.dumps(results))
out_file.close()
print time.strftime("%Y-%m-%d %H:%M:%S"), "Done"
| mit | Python |
|
ccce1108e1deab466fd72c022949fa05fa807a3a | add initial files for launch | googleapis/nodejs-policy-troubleshooter,googleapis/nodejs-policy-troubleshooter,googleapis/nodejs-policy-troubleshooter | synth.py | synth.py | # http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import synthtool.languages.node as node
import subprocess
import logging
logging.basicConfig(level=logging.DEBUG)
# run the gapic generator
gapic = gcp.GAPICBazel()
versions = ["v1"]
name = 'policytroubleshooter'
for version in versions:
library = gapic.node_library(
name,
version,
proto_path = f'google/cloud/policytroubleshooter/{version}')
s.copy(library, excludes=[])
# Copy common templates
common_templates = gcp.CommonTemplates()
templates = common_templates.node_library(
source_location='build/src', versions=["v1"], default_version="v1")
s.copy(templates, excludes=[])
node.postprocess_gapic_library()
| apache-2.0 | Python |
|
f480a0a8d51c5c059a05165f30f64bb310299ee3 | Add 'rescore' command | dbinetti/barberscore-django,dbinetti/barberscore,dbinetti/barberscore-django,dbinetti/barberscore,barberscore/barberscore-api,barberscore/barberscore-api,barberscore/barberscore-api,barberscore/barberscore-api | project/apps/api/management/commands/rescore.py | project/apps/api/management/commands/rescore.py | from django.core.management.base import (
BaseCommand,
)
from apps.api.models import (
Contestant,
Appearance,
Performance,
)
class Command(BaseCommand):
help = "Command to denormailze data."
def handle(self, *args, **options):
ps = Performance.objects.all()
for p in ps:
p.save()
as_ = Appearance.objects.all()
for a in as_:
a.save()
cs = Contestant.objects.all()
for c in cs:
c.save()
return "Done"
| bsd-2-clause | Python |
|
d4a7bbe27b285e455a3beafefd22fc493edeb161 | Add unittest for eventlogger config validation. | ketoo/Astron,pizcogirl/Astron,ketoo/Astron,blindsighttf2/Astron,blindsighttf2/Astron,ketoo/Astron,pizcogirl/Astron,pizcogirl/Astron,ketoo/Astron,blindsighttf2/Astron,pizcogirl/Astron,blindsighttf2/Astron | test/test_config_eventlogger.py | test/test_config_eventlogger.py | #!/usr/bin/env python2
import unittest
import subprocess
import threading
import tempfile
import os
from testdc import *
DAEMON_PATH = './astrond'
TERMINATED = -15
EXITED = 1
class ConfigTest(object):
def __init__(self, config):
self.config = config
self.process = None
def run(self, timeout):
def target():
self.process = subprocess.Popen([DAEMON_PATH, self.config])
self.process.communicate()
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
self.process.terminate()
thread.join()
return self.process.returncode
class TestConfigEventLogger(unittest.TestCase):
@classmethod
def setUpClass(cls):
cfg, cls.config_file = tempfile.mkstemp()
os.close(cfg)
cls.test_command = ConfigTest(cls.config_file)
@classmethod
def tearDownClass(cls):
if cls.config_file is not None:
os.remove(cls.config_file)
@classmethod
def write_config(cls, config):
f = open(cls.config_file, "w")
f.write(config)
f.close()
@classmethod
def run_test(cls, config, timeout = 2):
cls.write_config(config)
return cls.test_command.run(timeout)
def test_eventlogger_good(self):
config = """\
messagedirector:
bind: 127.0.0.1:57123
roles:
- type: eventlogger
bind: 0.0.0.0:9090
output: /var/log/astron/eventlogger/el-%Y-%m-%d-%H-%M-%S.log
rotate_interval: 1d
"""
self.assertEquals(self.run_test(config), TERMINATED)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | Python |
|
1578c4328542dd1b1c7ccd1f08dd2b2455055190 | Add integration test covering all cql types | kracekumar/python-driver,tempbottle/python-driver,bbirand/python-driver,coldeasy/python-driver,datastax/python-driver,thobbs/python-driver,yi719/python-driver,mambocab/python-driver,kishkaru/python-driver,mike-tr-adamson/python-driver,markflorisson/python-driver,kracekumar/python-driver,sontek/python-driver,aholmberg/python-driver,stef1927/python-driver,jfelectron/python-driver,sontek/python-driver,mobify/python-driver,jregovic/python-driver,bbirand/python-driver,jfelectron/python-driver,vipjml/python-driver,thelastpickle/python-driver,stef1927/python-driver,kishkaru/python-driver,jregovic/python-driver,vipjml/python-driver,thelastpickle/python-driver,yi719/python-driver,HackerEarth/cassandra-python-driver,beobal/python-driver,coldeasy/python-driver,markflorisson/python-driver,HackerEarth/cassandra-python-driver,beobal/python-driver,datastax/python-driver,aholmberg/python-driver,mike-tr-adamson/python-driver,thobbs/python-driver,tempbottle/python-driver,mobify/python-driver,mambocab/python-driver | tests/integration/test_types.py | tests/integration/test_types.py | from decimal import Decimal
from datetime import datetime
from uuid import uuid1, uuid4
import unittest
from cassandra.cluster import Cluster
from cassandra.query import ColumnCollection
class TypeTests(unittest.TestCase):
def test_basic_types(self):
c = Cluster()
s = c.connect()
s.execute("""
CREATE KEYSPACE typetests
WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}
""")
s.set_keyspace("typetests")
s.execute("""
CREATE TABLE mytable (
a text,
b text,
c ascii,
d bigint,
e blob,
f boolean,
g decimal,
h double,
i float,
j inet,
k int,
l list<text>,
m set<int>,
n map<text, int>,
o text,
p timestamp,
q uuid,
r timeuuid,
s varint,
PRIMARY KEY (a, b)
)
""")
v1_uuid = uuid1()
v4_uuid = uuid4()
mydatetime = datetime(2013, 1, 1, 1, 1, 1)
params = (
"sometext",
"sometext",
"ascii", # ascii
12345678923456789, # bigint
"blob".encode('hex'), # blob
True, # boolean
Decimal('1.234567890123456789'), # decimal
0.000244140625, # double
1.25, # float
"1.2.3.4", # inet
12345, # int
ColumnCollection(['a', 'b', 'c']), # list<text> collection
ColumnCollection({1, 2, 3}), # set<int> collection
ColumnCollection({'a': 1, 'b': 2}), # map<text, int> collection
"text", # text
mydatetime, # timestamp
v4_uuid, # uuid
v1_uuid, # timeuuid
123456789123456789123456789 # varint
)
s.execute("""
INSERT INTO mytable (a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
""", params)
results = s.execute("SELECT * FROM mytable")
expected = (
"sometext",
"sometext",
"ascii", # ascii
12345678923456789, # bigint
"blob", # blob
True, # boolean
Decimal('1.234567890123456789'), # decimal
0.000244140625, # double
1.25, # float
"1.2.3.4", # inet
12345, # int
('a', 'b', 'c'), # list<text> collection
{1, 2, 3}, # set<int> collection
{'a': 1, 'b': 2}, # map<text, int> collection
"text", # text
mydatetime, # timestamp
v4_uuid, # uuid
v1_uuid, # timeuuid
123456789123456789123456789 # varint
)
for expected, actual in zip(expected, results[0]):
self.assertEquals(expected, actual)
| apache-2.0 | Python |
|
78c9f392a02c0fdb72294e08a3d5ce78262443f5 | Create 1.py | jreyes97/hello-world | 1.py | 1.py | u=1
| apache-2.0 | Python |
|
36d7de73c2908aff574acb06a41660240ca554d4 | Select support | jhamrick/dbtools,jhamrick/dbtools | db.py | db.py | import sqlite3 as sql
import numpy as np
import pandas as pd
class Table(object):
def __init__(self, db, name):
self.db = db
self.name = name
conn = sql.connect(self.db)
with conn:
cur = conn.cursor()
cur.execute("PRAGMA table_info('%s')" % self.name)
rows = cur.fetchall()
# id, column name, data type, null, default, primary key
self.meta = np.array(rows)
self.columns = tuple(self.meta[:, 1])
# parse data types
dtype = []
for dt in self.meta[:, 2]:
if dt == "INTEGER":
dtype.append(int)
elif dt == "TEXT":
dtype.append(str)
else:
raise ValueError("unhandled dtype: %s" % dt)
self.dtype = tuple(dtype)
# parse primary key, if any
pk = np.nonzero(self.meta[:, 5])[0]
if len(pk) > 1:
raise ValueError("more than one primary key: %s" % pk)
elif len(pk) == 1:
self.pk = self.columns[pk[0]]
else:
self.pk = None
def select(self, columns=None, where=None):
# argument parsing
if columns is None:
cols = list(self.columns)
else:
if not hasattr(columns, '__iter__'):
cols = [columns]
else:
cols = list(columns)
# select primary key even if not given, so we can use the
# correct index later
if self.pk not in cols:
cols.insert(0, self.pk)
sel = ",".join(cols)
# base query
query = "SELECT %s FROM %s" % (sel, self.name)
# add a selection filter, if specified
if where is not None:
where_str, where_args = where
query += " WHERE %s" % where_str
if not hasattr(where_args, "__iter__"):
where_args = (where_args,)
args = (query, where_args)
else:
args = (query,)
# connect to the database and execute the query
conn = sql.connect(self.db)
with conn:
cur = conn.cursor()
cur.execute(*args)
rows = cur.fetchall()
# now we need to parse the result into a DataFrame
if self.pk in cols:
index = self.pk
else:
index = None
data = pd.DataFrame.from_records(rows, columns=cols, index=index)
return data
def __getitem__(self, key):
if isinstance(key, int):
# select a row
if self.pk is None:
raise ValueError("no primary key column")
data = self.select(where=("%s=?" % self.pk, key))
elif isinstance(key, slice):
# select multiple rows
if self.pk is None:
raise ValueError("no primary key column")
if key.step not in (None, 1):
raise ValueError("cannot handle step size > 1")
if key.start is None and key.stop is None:
where = None
elif key.start is None:
where = ("%s<?" % self.pk, key.stop)
elif key.stop is None:
where = ("%s>=?" % self.pk, key.start)
else:
where = ("%s<? AND %s>=?" % (self.pk, self.pk),
(key.stop, key.start))
data = self.select(where=where)
elif isinstance(key, str):
# select a column
data = self.select(key)
elif all(isinstance(k, str) for k in key):
# select multiple columns
data = self.select(key)
else:
raise ValueError("invalid key: %s" % key)
return data
tbl = Table("data.db", "Participants")
| mit | Python |
|
d596bfbbfa725111fb4c0f6d4abf6789669f06de | Create sets.py | davidone/misc,davidone/misc | sets.py | sets.py | #!/usr/bin/env python2
'''
Generates automatically one array, a.
Prints an ordered list with only unique elems
'''
import random
SIZE_LIST_A = 10
a = []
def populate_arrays():
for i in range(0, SIZE_LIST_A):
a.append(random.randint(1, 100))
if __name__ == "__main__":
populate_arrays()
print "a: {:s}".format(str(a))
b = list(set(a))
b.sort()
print "b: {:s}".format(str(b))
exit(0)
| mit | Python |
|
563b9e1f826433179a5e3c5e611d40efc8736c4a | Create Hexbin Example | altair-viz/altair,jakevdp/altair | altair/examples/hexbins.py | altair/examples/hexbins.py | """
Hexbin Chart
-----------------
This example shows a hexbin chart.
"""
import altair as alt
from vega_datasets import data
source = data.seattle_weather()
# Size of the hexbins
size = 15
# Count of distinct x features
xFeaturesCount = 12
# Count of distinct y features
yFeaturesCount = 7
# Name of the x field
xField = 'date'
# Name of the y field
yField = 'date'
# the shape of a hexagon
hexagon = "M0,-2.3094010768L2,-1.1547005384 2,1.1547005384 0,2.3094010768 -2,1.1547005384 -2,-1.1547005384Z"
alt.Chart(source).mark_point(size=size**2, shape=hexagon).encode(
x=alt.X('xFeaturePos:Q', axis=alt.Axis(title='Month',
grid=False, tickOpacity=0, domainOpacity=0)),
y=alt.Y('day(' + yField + '):O', axis=alt.Axis(title='Weekday',
labelPadding=20, tickOpacity=0, domainOpacity=0)),
stroke=alt.value('black'),
strokeWidth=alt.value(0.2),
fill=alt.Color('mean(temp_max):Q', scale=alt.Scale(scheme='darkblue')),
tooltip=['month(' + xField + '):O', 'day(' + yField + '):O', 'mean(temp_max):Q']
).transform_calculate(
# This field is required for the hexagonal X-Offset
xFeaturePos='(day(datum.' + yField + ') % 2) / 2 + month(datum.' + xField + ')'
).properties(
# Exact scaling factors to make the hexbins fit
width=size * xFeaturesCount * 2,
height=size * yFeaturesCount * 1.7320508076, # 1.7320508076 is approx. sin(60°)*2
).configure_view(
strokeWidth=0
)
| bsd-3-clause | Python |
|
8118dc283eececdd074bac675c57975ceeba3739 | Create gateway.py | jbetsinger/HomeAutomation,jbetsinger/HomeAutomation | Gateway/gateway.py | Gateway/gateway.py | \\ This will be the Gateway.py file for the RPi Gateway
| apache-2.0 | Python |
|
d9dcf34a73b4168885a02c495fb9b808a55b5c9e | Add spu debugger printer module | matthiaskramm/corepy,matthiaskramm/corepy,matthiaskramm/corepy,matthiaskramm/corepy | corepy/lib/printer/spu_debugger.py | corepy/lib/printer/spu_debugger.py | # Copyright (c) 2006-2009 The Trustees of Indiana University.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# - Neither the Indiana University nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import corepy.spre.spe as spe
import corepy.spre.syn_util as syn_util
class SPU_Debugger(object):
"""
InstructionStream printer for the Interactive SPU debugger.
Output syntax from this printer is designed to be easily used by the SPU
debugger:
ilhu(3, 0xDEAD)
iohl(3, 0xBEEF)
stqd(3, 0, 1)
"""
def __init__(self):
return
def __del__(self):
return
def header(self, fd):
return
def footer(self, fd):
return
def prologue(self, fd):
""" Allow the module to print a prologue header if desired.
The return value should be a boolean indicating whether prologue
instructions should be printed. """
return False
def epilogue(self, fd):
""" Allow the module to print a prologue header if desired.
The return value should be a boolean indicating whether epilogue
instructions should be printed. """
return False
def stream(self, fd, stream):
return
def string(self, fd, str):
"""Print a string (assumedly representing an instruction)."""
print >>fd, "\t%s" % (str)
return
def instruction(self, fd, inst):
op_str = ', '.join([self.str_op(op) for op in inst._supplied_operands])
for k, v in inst._supplied_koperands.items():
op_str += ", %s = %s" % (str(k), str(v))
print >>fd, "%s(%s)" % (inst.__class__.__name__, op_str)
return
def label(self, fd, lbl):
print >>fd, "\n%s:" % lbl.name
return
def str_op(self, op):
if isinstance(op, spe.Register):
return str(op.reg)
elif isinstance(op, spe.Variable):
return str(op.reg.reg)
return str(op)
| bsd-3-clause | Python |
|
2c0ce3c64720122bf2fdd80aeb2ff8359873ac83 | Test that noindex flag will only show robots metatag when set | Code4SA/municipal-data,Code4SA/municipal-data,Code4SA/municipal-data,Code4SA/municipal-data | municipal_finance/tests/test_analytics.py | municipal_finance/tests/test_analytics.py | from django.test import TestCase
from django.conf import settings
class TestAnalytics(TestCase):
def test_noindex_flag(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
self.assertTrue('<meta name="robots" content="noindex">' not in str(response.content))
settings.NO_INDEX = "True"
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
self.assertTrue('<meta name="robots" content="noindex">' in str(response.content)) | mit | Python |
|
11dd2daf7dd125e0be6a604dd22ae25efed16226 | Update at 2017-07-20 14-05-11 | amoshyc/tthl-code | test.py | test.py | import json
from pathlib import Path
import numpy as np
import pandas as pd
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
from keras.models import Sequential, Model
from keras.preprocessing import image
from keras.layers import *
from keras.optimizers import *
from data import *
from utils import get_callbacks
def main():
with tf.device('/gpu:3'):
model = Sequential()
model.add(TimeDistributed(BatchNormalization(), input_shape=(TIMESTEPS, 224, 224, 3)))
model.add(TimeDistributed(Conv2D(4, kernel_size=5, strides=3, activation='relu')))
model.add(TimeDistributed(Conv2D(8, kernel_size=5, strides=2, activation='relu')))
model.add(TimeDistributed(Conv2D(12, kernel_size=3, strides=1, activation='relu')))
model.add(TimeDistributed(BatchNormalization()))
model.add(TimeDistributed(MaxPooling2D(pool_size=3)))
model.add(Conv3D(4, kernel_size=5, strides=1, activation='relu'))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(16))
model.add(Dropout(0.3))
model.add(Dense(1, activation='sigmoid'))
model_arg = {
'loss': 'binary_crossentropy',
'optimizer': 'sgd',
'metrics': ['binary_accuracy']
}
model.compile(**model_arg)
model.summary()
n_train, n_val = 5000, 1000
x_train = np.zeros((n_train, TIMESTEPS, 224, 224, 3), dtype=np.float32)
y_train = np.zeros((n_train, 1), dtype=np.uint8)
x_val = np.zeros((n_val, TIMESTEPS, 224, 224, 3), dtype=np.float32)
y_val = np.zeros((n_val, 1), dtype=np.uint8)
print('Loading data...', end='')
for i in range(n_train):
x, y = next(window_train_gen)
x_train[i] = x
y_train[i] = y
for i in range(n_val):
x, y = next(window_val_gen)
x_val[i] = x
y_val[i] = y
print('ok')
fit_arg = {
'x': x_train,
'y': y_train,
'batch_size': WINDOW_BATCH_SIZE,
'epochs': 30,
'validation_data': (x_val, y_val),
'shuffle': True
}
model.fit(**fit_arg)
if __name__ == '__main__':
main() | apache-2.0 | Python |
|
0c76fa59e77786c577f0750c65f97d24eb3c4157 | Test script | hyperlex/vdcnn | test.py | test.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
import os
import time
import datetime
import tables
from sklearn.metrics import f1_score,confusion_matrix
# ===================== Preparation des données =============================
# Load data
print("Loading data...")
alphabet = "abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]{} "
sequence_max_length = 1024 # Twitter has only 140 characters. We pad 4 blanks characters more to the right of tweets to be conformed with the architecture of A. Conneau et al (2016)
from tensorflow.core.protobuf import saver_pb2
checkpoint_file = tf.train.latest_checkpoint("./")
graph = tf.Graph()
# Input data.
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
input_x = graph.get_operation_by_name("input_x").outputs[0]
input_y = graph.get_operation_by_name("input_y").outputs[0]
is_training = graph.get_operation_by_name(
"phase").outputs[0]
### To update the computation of moving_mean & moving_var, we must put it on the parent graph of minimizing loss
accuracy = graph.get_operation_by_name(
"accuracy/accuracy").outputs[0]
predictions = graph.get_operation_by_name(
"fc-3/predictions").outputs[0]
hdf5_path = "my_extendable_compressed_data_test.hdf5"
batch_size = 1000
extendable_hdf5_file = tables.open_file(hdf5_path, mode='r')
y_true_ = []
predictions_= []
for ptr in range(0, 70000, batch_size):
feed_dict = {cnn.input_x: extendable_hdf5_file.root.data[ptr:ptr + batch_size], cnn.input_y: extendable_hdf5_file.root.clusters[ptr:ptr + batch_size] , cnn.is_training: False }
y_true = tf.argmax(extendable_hdf5_file.root.clusters[ptr:ptr + batch_size] , 1)
y_true_bis,predictions_bis ,accuracy = sess.run([y_true,predictions,cnn.accuracy], feed_dict= feed_dict)
y_true_.extend(y_true_bis)
predictions_.extend(predictions_bis)
confusion_matrix_ = confusion_matrix(y_true_,predictions_)
print(confusion_matrix_)
print ("f1_score", f1_score(y_true_, predictions_ ,average ='weighted'))
print ("f1_score", f1_score(y_true_, predictions_ ,average =None))
extendable_hdf5_file.close()
| mit | Python |
|
77effff7ece070eabb3853ba918d40b7eb1c3de5 | Create sc.py | voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts | sc.py | sc.py | #!/usr/bin/env python
import soundcloud
from clize import clize, run
from subprocess import call
@clize
def sc_load(tracks='', likes='', tags='', group=''):
opts = {}
if likes:
method = 'favorites'
elif tracks or group:
method = 'tracks'
elif tags:
method = 'tracks'
opts = {'tags': tags}
else:
return
client = soundcloud.Client(client_id='c4c979fd6f241b5b30431d722af212e8')
if likes or tracks:
user = likes or tracks
track = client.get('/resolve', url='https://soundcloud.com/' + user)
user_id = track.id
url = '/users/%d/' % user_id
elif group:
track = client.get('/resolve', url='https://soundcloud.com/groups/' + group)
group_id = track.id
url = '/groups/%d/' % group_id
else:
url = '/'
end = '%s%s' % (url, method)
for i, sound in enumerate(client.get(end, **opts)):
print("%d Loading %s..." % (i, sound.obj['title']))
call(['mpc', '-h', '<motdepasse>@entrecote', 'load',
'soundcloud://url/%s' % sound.obj['permalink_url'].replace('http:', 'https:')])
if __name__ == '__main__':
run(sc_load)
| mit | Python |
|
2055fc1eda896103931eaba5fb01238506aaac1a | Add signup in urls | gentoo/identity.gentoo.org,dastergon/identity.gentoo.org,dastergon/identity.gentoo.org,gentoo/identity.gentoo.org | urls.py | urls.py | from django.conf.urls.defaults import patterns, include, url
from django.contrib import admin
from okupy.login.views import *
from okupy.user.views import *
from okupy.signup.views import *
admin.autodiscover()
urlpatterns = patterns('',
url(r'^login/$', mylogin),
url(r'^$', user),
url(r'^signup/', signup),
url(r'^admin/', include(admin.site.urls)),
)
| from django.conf.urls.defaults import patterns, include, url
from django.contrib import admin
from okupy.login.views import *
from okupy.user.views import *
admin.autodiscover()
urlpatterns = patterns('',
url(r'^login/$', mylogin),
url(r'^$', user),
url(r'^admin/', include(admin.site.urls)),
)
| agpl-3.0 | Python |
d5b6299b802810748584b06242f614550155a283 | Create app.py | bmawji3/testing-my-man-bot | app.py | app.py | from flask import Flask, request
import requests
import json
import traceback
import random
import os
from urllib.parse import urlencode
from urllib.request import Request, urlopen
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def main():
# if request.method == 'POST':
# try:
# data = json.loads(request.data)
# print ('data: ', data)
# print ('request.data: ', request.data)
# except:
# print ('error?')
# elif request.method == 'GET':
# print('get')
# print (request.data)
# return 'get'
# return 'all fails\n'
if request.method == 'POST':
data = request.get_json()
if data['name'] != 'My Man':
# msg = '{}, you sent "{}".'.format(data['name'], data['text'])
msg = 'https://media.giphy.com/media/qPVzemjFi150Q/giphy.gif'
send_message(msg)
elif request.method == 'GET':
msg = 'https://media.giphy.com/media/3o7aCUqs54taGzqDWU/giphy.gif'
send_message(msg)
return ("My Man!!")
def send_message(msg):
url = 'https://api.groupme.com/v3/bots/post'
data = {
'bot_id' : os.getenv('BOT_ID'),
'text' : msg,
}
request = Request(url, urlencode(data).encode())
json = urlopen(request).read().decode()
if __name__ == '__main__':
app.run()
| mit | Python |
|
4ff22a24a7d681a3c62f7d7e4fe56c0032a83370 | Improve logging | zhangwei0181/ldap-passwd-webui,jirutka/change-password | app.py | app.py | import bottle
from bottle import get, post, static_file, request, route, template
from bottle import SimpleTemplate
from configparser import ConfigParser
from ldap3 import Connection, LDAPBindError, LDAPInvalidCredentialsResult, Server
from ldap3 import AUTH_SIMPLE, SUBTREE
from os import path
@get('/')
def get_index():
return index_tpl()
@post('/')
def post_index():
form = request.forms.get
def error(msg):
return index_tpl(username=form('username'), alerts=[('error', msg)])
if form('new-password') != form('confirm-password'):
return error("Password doesn't match the confirmation!")
if len(form('new-password')) < 8:
return error("Password must be at least 8 characters long!")
if not change_password(form('username'), form('old-password'), form('new-password')):
print("Unsuccessful attemp to change password for: %s" % form('username'))
return error("Username or password is incorrect!")
print("Password successfully changed for: %s" % form('username'))
return index_tpl(alerts=[('success', "Password has been changed")])
@route('/static/<filename>', name='static')
def serve_static(filename):
return static_file(filename, root=path.join(BASE_DIR, 'static'))
def index_tpl(**kwargs):
return template('index', **kwargs)
def change_password(username, old_pass, new_pass):
server = Server(CONF['ldap']['host'], int(CONF['ldap']['port']))
user_dn = find_user_dn(server, username)
try:
with Connection(server, authentication=AUTH_SIMPLE, raise_exceptions=True,
user=user_dn, password=old_pass) as c:
c.bind()
c.extend.standard.modify_password(user_dn, old_pass, new_pass)
return True
except (LDAPBindError, LDAPInvalidCredentialsResult):
return False
def find_user_dn(server, uid):
with Connection(server) as c:
c.search(CONF['ldap']['base'], "(uid=%s)" % uid, SUBTREE, attributes=['dn'])
return c.response[0]['dn'] if c.response else None
BASE_DIR = path.dirname(__file__)
CONF = ConfigParser()
CONF.read(path.join(BASE_DIR, 'settings.ini'))
bottle.TEMPLATE_PATH = [ BASE_DIR ]
# Set default attributes to pass into templates.
SimpleTemplate.defaults = dict(CONF['html'])
SimpleTemplate.defaults['url'] = bottle.url
# Run bottle internal test server when invoked directly (in development).
if __name__ == '__main__':
bottle.run(host='0.0.0.0', port=8080)
# Run bottle in application mode (in production under uWSGI server).
else:
application = bottle.default_app()
| import bottle
from bottle import get, post, static_file, request, route, template
from bottle import SimpleTemplate
from configparser import ConfigParser
from ldap3 import Connection, LDAPBindError, LDAPInvalidCredentialsResult, Server
from ldap3 import AUTH_SIMPLE, SUBTREE
from os import path
@get('/')
def get_index():
return index_tpl()
@post('/')
def post_index():
form = request.forms.get
def error(msg):
return index_tpl(username=form('username'), alerts=[('error', msg)])
if form('new-password') != form('confirm-password'):
return error("Password doesn't match the confirmation!")
if len(form('new-password')) < 8:
return error("Password must be at least 8 characters long!")
if not change_password(form('username'), form('old-password'), form('new-password')):
return error("Username or password is incorrect!")
return index_tpl(alerts=[('success', "Password has been changed")])
@route('/static/<filename>', name='static')
def serve_static(filename):
return static_file(filename, root=path.join(BASE_DIR, 'static'))
def index_tpl(**kwargs):
return template('index', **kwargs)
def change_password(username, old_pass, new_pass):
print("Changing password for user: %s" % username)
server = Server(CONF['ldap']['host'], int(CONF['ldap']['port']))
user_dn = find_user_dn(server, username)
try:
with Connection(server, authentication=AUTH_SIMPLE, raise_exceptions=True,
user=user_dn, password=old_pass) as c:
c.bind()
c.extend.standard.modify_password(user_dn, old_pass, new_pass)
return True
except (LDAPBindError, LDAPInvalidCredentialsResult):
return False
def find_user_dn(server, uid):
with Connection(server) as c:
c.search(CONF['ldap']['base'], "(uid=%s)" % uid, SUBTREE, attributes=['dn'])
return c.response[0]['dn'] if c.response else None
BASE_DIR = path.dirname(__file__)
CONF = ConfigParser()
CONF.read(path.join(BASE_DIR, 'settings.ini'))
bottle.TEMPLATE_PATH = [ BASE_DIR ]
# Set default attributes to pass into templates.
SimpleTemplate.defaults = dict(CONF['html'])
SimpleTemplate.defaults['url'] = bottle.url
# Run bottle internal test server when invoked directly (in development).
if __name__ == '__main__':
bottle.run(host='0.0.0.0', port=8080)
# Run bottle in application mode (in production under uWSGI server).
else:
application = bottle.default_app()
| mit | Python |
b720ecf75634718a122c97bcff29129e321aa9b2 | Add cat.py. | lemon24/python-practice | cat.py | cat.py | """
Usage: cat.py [FILE]...
Concatenate FILE(s), or standard input, to standard output.
"""
import sys
def iter_files(paths):
for path in paths:
try:
yield open(path, 'rb')
except (IOError, OSError) as e:
print("error: {}".format(e), file=sys.stderr)
def main(argv=None):
if not argv:
argv = list(sys.argv)
if len(argv) < 2:
files = [sys.stdin.buffer]
else:
files = iter_files(argv[1:])
for file in files:
for line in file:
sys.stdout.buffer.write(line)
file.close()
if __name__ == "__main__":
main()
| mit | Python |
|
3b58283f613fc827e024c8d971d89c24fc2b3ed0 | Create knn.py | lingcheng99/kagge-digit-recognition | knn.py | knn.py | import numpy as np
import pandas as pd
from sklearn import metrics
from sklearn.cross_validation import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.decomposition import PCA
#Read training data and split into train and test data
data=pd.read_csv('train.csv')
data1=data.values
X=data1[:,1:]
y=np.ravel(y)
Xtrain,Xtest,ytrain,ytest=train_test_split(X,y,test_size=0.25)
#Run PCA and KNN
pca=PCA(n_components=50).fit(Xtrain)
Xtrain_reduced=pca.transform(Xtrain)
Xtest_reduced=pca.transform(Xtest)
knn=KNeighborsClassifier(n_neighbors=5,weights='distance',p=3)
knn.fit(Xtrain_reduced,ytrain)
pred=knn.predict(Xtest_reduced)
print("Classification report for classifier %s:\n%s\n"
% (knn, metrics.classification_report(ytest,pred)))
#Run prediction on test data and make submissions
test=pd.read_csv('test.csv')
test_reduced=pca.transform(test)
pred2=knn.predict(test_reduced)
pred2 = pd.DataFrame(pred2)
pred2['ImageId'] = pred2.index + 1
pred2 = pred2[['ImageId', 0]]
pred2.columns = ['ImageId', 'Label']
pred2.to_csv('pred2.csv', index=False)
| mit | Python |
|
1faa3c76d1c752de02149af34954ed538fe10fa1 | Add test | albertyw/albertyw.com,albertyw/albertyw.com,albertyw/albertyw.com,albertyw/albertyw.com,albertyw/albertyw.com | app/tests/test_data.py | app/tests/test_data.py | import unittest
from app import data
class TestProjects(unittest.TestCase):
def test_load(self) -> None:
projects = data.Projects.load()
self.assertNotEqual(projects.data, {})
self.assertIn('Python', projects.data)
self.assertIn('Git Browse', projects.data['Python'])
self.assertIn('description', projects.data['Python']['Git Browse'])
| mit | Python |
|
5813474651299998fb27c64c6d179a0a59bbe28c | Create otc.py | stqism/THE_KGB,KittyHawkIrc/core | otc.py | otc.py | def tick(a,b,c):
if a == 'help':
msg = '^otc {currency}, specify a 2nd currency for rates, add --last/high/low etc for that alone.'
return msg
import urllib2,json,StringIO
a = a.lower()
b = b.lower()
c = c.lower()
if b.startswith('-'):
c = b
b = 'usd'
if b == 'none':
b = 'usd'
btce = urllib2.Request('https://btc-e.com/api/2/' + a + '_' + b + '/ticker')
get = urllib2.urlopen(btce)
parse = get.read()
if parse == '{"error":"invalid pair"}':
b = 'btc'
btce = urllib2.Request('https://btc-e.com/api/2/' + a + '_' + b + '/ticker')
get = urllib2.urlopen(btce)
parse = get.read()
try:
ticker3 = "{" + parse.split('{',2)[2].split('}',2)[0] + "}".replace('"','\'').replace(':',':"').replace(',','",').replace('}','"}')
ticker2 = ticker3.replace(':',':"').replace(',','",')
ticker = json.loads(ticker2)
except:
return 'Unknown currency'
if c == 'none':
msg = 'BTC-E ' + a.upper() + b.upper() + ' ticker | High: ' + ticker['high'] + ', Low: ' + ticker['low'] + ', avg: ' + ticker['avg'] + ', Last: ' + ticker['last'] + ', Buy: ' + ticker['buy'] + ', Sell: ' + ticker['sell']
elif c.startswith('--'):
msg = ticker[c[2:]]
else:
msg = 'That flag does not exist'
return msg
| mit | Python |
|
bf678628cf98b1c18a75f09fa15d26526ea0e3ac | Add gender choices fields | masschallenge/django-accelerator,masschallenge/django-accelerator | accelerator/migrations/0028_add_gender_fields.py | accelerator/migrations/0028_add_gender_fields.py | from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0027_add_gender_choices_object'),
]
operations = [
migrations.AddField(
model_name='entrepreneurprofile',
name='gender_self_description',
field=models.TextField(blank=True, default=''),
),
migrations.AddField(
model_name='entrepreneurprofile',
name='gender_identity',
field=models.ManyToManyField(
blank=True,
to=settings.ACCELERATOR_GENDERCHOICES_MODEL),
),
migrations.AddField(
model_name='expertprofile',
name='gender_self_description',
field=models.TextField(blank=True, default=''),
),
migrations.AddField(
model_name='expertprofile',
name='gender_identity',
field=models.ManyToManyField(
blank=True,
to=settings.ACCELERATOR_GENDERCHOICES_MODEL),
),
migrations.AddField(
model_name='memberprofile',
name='gender_self_description',
field=models.TextField(blank=True, default=''),
),
migrations.AddField(
model_name='memberprofile',
name='gender_identity',
field=models.ManyToManyField(
blank=True,
to=settings.ACCELERATOR_GENDERCHOICES_MODEL),
),
]
| mit | Python |
|
bac06acb1e6255040f371232776f3da75fb9247a | Add data migration to populate preprint_doi_created field on existing published preprints where DOI identifier exists. Set to preprint date_published field. | baylee-d/osf.io,baylee-d/osf.io,erinspace/osf.io,cslzchen/osf.io,mattclark/osf.io,mfraezz/osf.io,cslzchen/osf.io,caseyrollins/osf.io,CenterForOpenScience/osf.io,laurenrevere/osf.io,Johnetordoff/osf.io,saradbowman/osf.io,icereval/osf.io,brianjgeiger/osf.io,felliott/osf.io,cslzchen/osf.io,TomBaxter/osf.io,felliott/osf.io,aaxelb/osf.io,adlius/osf.io,aaxelb/osf.io,felliott/osf.io,laurenrevere/osf.io,Johnetordoff/osf.io,sloria/osf.io,caseyrollins/osf.io,HalcyonChimera/osf.io,leb2dg/osf.io,baylee-d/osf.io,brianjgeiger/osf.io,brianjgeiger/osf.io,caseyrollins/osf.io,brianjgeiger/osf.io,adlius/osf.io,CenterForOpenScience/osf.io,erinspace/osf.io,aaxelb/osf.io,HalcyonChimera/osf.io,mfraezz/osf.io,mattclark/osf.io,chennan47/osf.io,laurenrevere/osf.io,pattisdr/osf.io,crcresearch/osf.io,TomBaxter/osf.io,icereval/osf.io,erinspace/osf.io,chennan47/osf.io,icereval/osf.io,binoculars/osf.io,Johnetordoff/osf.io,mfraezz/osf.io,binoculars/osf.io,felliott/osf.io,saradbowman/osf.io,Johnetordoff/osf.io,crcresearch/osf.io,sloria/osf.io,mattclark/osf.io,pattisdr/osf.io,crcresearch/osf.io,binoculars/osf.io,cslzchen/osf.io,mfraezz/osf.io,adlius/osf.io,aaxelb/osf.io,leb2dg/osf.io,sloria/osf.io,CenterForOpenScience/osf.io,pattisdr/osf.io,TomBaxter/osf.io,HalcyonChimera/osf.io,HalcyonChimera/osf.io,leb2dg/osf.io,leb2dg/osf.io,chennan47/osf.io,adlius/osf.io,CenterForOpenScience/osf.io | osf/migrations/0069_auto_20171127_1119.py | osf/migrations/0069_auto_20171127_1119.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-27 17:19
from __future__ import unicode_literals
import logging
from django.db import migrations
from osf.models import PreprintService
logger = logging.getLogger(__name__)
def add_preprint_doi_created(apps, schema_editor):
"""
Data migration that makes preprint_doi_created equal to date_published for existing published preprints.
"""
null_preprint_doi_created = PreprintService.objects.filter(preprint_doi_created__isnull=True, date_published__isnull=False)
preprints_count = null_preprint_doi_created.count()
current_preprint = 0
logger.info('{} published preprints found with preprint_doi_created is null.'.format(preprints_count))
for preprint in null_preprint_doi_created:
current_preprint += 1
if preprint.get_identifier('doi'):
preprint.preprint_doi_created = preprint.date_published
preprint.save()
logger.info('Preprint ID {}, {}/{} preprint_doi_created field populated.'.format(preprint._id, current_preprint, preprints_count))
else:
logger.info('Preprint ID {}, {}/{} skipped because a DOI has not been created.'.format(preprint._id, current_preprint, preprints_count))
def reverse_func(apps, schema_editor):
"""
Reverses data migration. Sets preprint_doi_created field back to null.
"""
preprint_doi_created_not_null = PreprintService.objects.filter(preprint_doi_created__isnull=False)
preprints_count = preprint_doi_created_not_null.count()
current_preprint = 0
logger.info('Reversing preprint_doi_created migration.')
for preprint in preprint_doi_created_not_null:
current_preprint += 1
preprint.preprint_doi_created = None
preprint.save()
logger.info('Preprint ID {}, {}/{} preprint_doi_created field set to None.'.format(preprint._id, current_preprint, preprints_count))
class Migration(migrations.Migration):
dependencies = [
('osf', '0068_preprintservice_preprint_doi_created'),
]
operations = [
migrations.RunPython(add_preprint_doi_created, reverse_func)
]
| apache-2.0 | Python |
|
167a6497d79a4a18badd5ea85a87e7eefcd02696 | Add init file to the root acceptance tests folder | telefonicaid/fiware-pep-steelskin,agroknow/fiware-pep-steelskin,agroknow/fiware-pep-steelskin,agroknow/fiware-pep-steelskin,telefonicaid/fiware-pep-steelskin,agroknow/fiware-pep-steelskin,telefonicaid/fiware-pep-steelskin,telefonicaid/fiware-pep-steelskin | test/acceptance/__init__.py | test/acceptance/__init__.py | # -*- coding: utf-8 -*-
"""
Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U
This file is part of fiware-orion-pep
fiware-orion-pep is free software: you can redistribute it and/or
modify it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the License,
or (at your option) any later version.
fiware-orion-pep is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public
License along with fiware-orion-pep.
If not, see http://www.gnu.org/licenses/.
For those usages not covered by the GNU Affero General Public License
please contact with::[iot_support@tid.es]
"""
__author__ = 'Jon Calderin Goñi <jon.caldering@gmail.com>'
import os
"""
Make sure the logs path exists and create it otherwise.
"""
if not os.path.exists('logs'):
os.makedirs('logs') | agpl-3.0 | Python |
|
d290b3b2cc15a3bab907ed3847da709ab31edace | disable unpredictable tests | looker/sentry,beeftornado/sentry,jean/sentry,gencer/sentry,gencer/sentry,looker/sentry,beeftornado/sentry,jean/sentry,ifduyue/sentry,looker/sentry,beeftornado/sentry,JackDanger/sentry,gencer/sentry,mvaled/sentry,mvaled/sentry,JamesMura/sentry,jean/sentry,JamesMura/sentry,JackDanger/sentry,JamesMura/sentry,JackDanger/sentry,ifduyue/sentry,jean/sentry,JamesMura/sentry,mvaled/sentry,looker/sentry,gencer/sentry,ifduyue/sentry,mvaled/sentry,jean/sentry,looker/sentry,mvaled/sentry,ifduyue/sentry,mvaled/sentry,ifduyue/sentry,gencer/sentry,JamesMura/sentry | tests/acceptance/test_api.py | tests/acceptance/test_api.py | from __future__ import absolute_import
from sentry.testutils import AcceptanceTestCase
class ApiTokensTest(AcceptanceTestCase):
def setUp(self):
super(ApiTokensTest, self).setUp()
self.user = self.create_user('foo@example.com')
self.login_as(self.user)
self.path = '/api/'
def test_simple(self):
self.browser.get(self.path)
self.browser.wait_until_not('.loading')
self.browser.snapshot('api tokens - no tokens')
# self.browser.click('.ref-create-token')
# self.browser.wait_until_not('.loading')
# self.browser.snapshot('api tokens - new token')
# self.browser.click('.btn-primary')
# self.browser.wait_until_not('.loading')
# self.browser.snapshot('api tokens - single token')
class ApiApplicationTest(AcceptanceTestCase):
def setUp(self):
super(ApiApplicationTest, self).setUp()
self.user = self.create_user('foo@example.com')
self.login_as(self.user)
self.path = '/api/applications/'
def test_simple(self):
self.browser.get(self.path)
self.browser.wait_until_not('.loading')
self.browser.snapshot('api applications - no applications')
# self.browser.click('.ref-create-application')
# self.browser.wait_until_not('.loading')
# self.browser.snapshot('api applications - new application')
# self.browser.click('.btn-primary')
# self.browser.wait_until_not('.loading')
# self.browser.snapshot('api applications - single application')
| from __future__ import absolute_import
from sentry.testutils import AcceptanceTestCase
class ApiTokensTest(AcceptanceTestCase):
def setUp(self):
super(ApiTokensTest, self).setUp()
self.user = self.create_user('foo@example.com')
self.login_as(self.user)
self.path = '/api/'
def test_simple(self):
self.browser.get(self.path)
self.browser.wait_until_not('.loading')
self.browser.snapshot('api tokens - no tokens')
self.browser.click('.ref-create-token')
self.browser.wait_until_not('.loading')
self.browser.snapshot('api tokens - new token')
self.browser.click('.btn-primary')
self.browser.wait_until_not('.loading')
self.browser.snapshot('api tokens - single token')
class ApiApplicationTest(AcceptanceTestCase):
def setUp(self):
super(ApiApplicationTest, self).setUp()
self.user = self.create_user('foo@example.com')
self.login_as(self.user)
self.path = '/api/applications/'
def test_simple(self):
self.browser.get(self.path)
self.browser.wait_until_not('.loading')
self.browser.snapshot('api applications - no applications')
self.browser.click('.ref-create-application')
self.browser.wait_until_not('.loading')
self.browser.snapshot('api applications - new application')
self.browser.click('.btn-primary')
self.browser.wait_until_not('.loading')
self.browser.snapshot('api applications - single application')
| bsd-3-clause | Python |
8fa776fd2fa63a44cb048a39fe7359ee9366c5e8 | Add basic Processor tests | Hero1378/bucky,trbs/bucky,dimrozakis/bucky,dimrozakis/bucky,ewdurbin/bucky,trbs/bucky,JoseKilo/bucky,ewdurbin/bucky,jsiembida/bucky3,JoseKilo/bucky,Hero1378/bucky | tests/003-test-processor.py | tests/003-test-processor.py | import time
import random
import multiprocessing
from functools import wraps
try:
import queue
except ImportError:
import Queue as queue
import t
import bucky.processor
import bucky.cfg as cfg
cfg.debug = True
def processor(func):
@wraps(func)
def run():
inq = multiprocessing.Queue()
outq = multiprocessing.Queue()
proc = bucky.processor.CustomProcessor(inq, outq, cfg)
proc.start()
func(inq, outq, proc)
inq.put(None)
dead = False
for i in range(5):
if not proc.is_alive():
dead = True
break
time.sleep(0.1)
if not dead:
raise RuntimeError("Server didn't die.")
return run
def send_get_data(indata, inq, outq):
for sample in indata:
inq.put(sample)
while True:
try:
sample = outq.get(True, 1)
except queue.Empty:
break
yield sample
def identity(host, name, val, time):
return host, name, val, time
@t.set_cfg("processor", identity)
@processor
def test_start_stop(inq, outq, proc):
assert proc.is_alive(), "Processor not alive."
inq.put(None)
time.sleep(0.5)
assert not proc.is_alive(), "Processor not killed by putting None in queue"
@t.set_cfg("processor", identity)
@processor
def test_plumbing(inq, outq, proc):
data = []
times = 100
for i in range(times):
host = "tests.host-%d" % i
name = "test-plumbing-%d" % i
value = i
timestamp = int(time.time() + i)
data.append((host, name, value, timestamp))
i = 0
for sample in send_get_data(data, inq, outq):
t.eq(sample, data[i])
i += 1
t.eq(i, times)
def filter_even(host, name, val, timestamp):
if not val % 2:
return None
return host, name, val, timestamp
@t.set_cfg("processor", filter_even)
@processor
def test_filter(inq, outq, proc):
data = []
times = 100
for i in range(times):
host = "tests.host-%d" % i
name = "test-filter-%d" % i
timestamp = int(time.time() + i)
data.append((host, name, 0, timestamp))
data.append((host, name, 1, timestamp))
i = 0
for sample in send_get_data(data, inq, outq):
t.eq(sample[2], 1)
i += 1
t.eq(i, times)
| apache-2.0 | Python |
|
0b185bb6a30cb7c9b02c80051a8426dc736da3d6 | Add sample WSGI app | locke105/mclib | examples/wsgi.py | examples/wsgi.py |
import cgi
import json
from wsgiref import simple_server
import falcon
from mclib import mc_info
class MCInfo(object):
def on_get(self, req, resp):
host = req.get_param('host', required=True)
port = req.get_param_as_int('port', min=1024,
max=65565)
try:
if port is not None:
info = mc_info.get_info(host=host,
port=port)
else:
info = mc_info.get_info(host=host)
except Exception:
raise Exception('Couldn\'t retrieve info.')
if '.json' in req.uri:
resp.body = self.get_json(info)
return
preferred = req.client_prefers(['application/json', 'text/html'])
if 'html' in preferred:
resp.content_type = 'text/html'
resp.body = self.get_html(info)
else:
resp.body = self.get_json(info)
def get_html(self, info):
html = """<body>
<style>
table,th,td
{
border:1px solid black;
border-collapse:collapse
}
th,td
{
padding: 5px
}
</style>
<table>
"""
for k,v in info.iteritems():
items = {'key': cgi.escape(k)}
if isinstance(v, basestring):
items['val'] = cgi.escape(v)
else:
items['val'] = v
html = html + '<tr><td>%(key)s</td><td>%(val)s</td></tr>' % items
html = html + '</table></body>'
return html
def get_json(self, info):
return json.dumps(info)
app = falcon.API()
mcinfo = MCInfo()
app.add_route('/mcinfo', mcinfo)
app.add_route('/mcinfo.json', mcinfo)
if __name__ == '__main__':
httpd = simple_server.make_server('0.0.0.0', 3000, app)
httpd.serve_forever()
| apache-2.0 | Python |
|
b097075f7606563fc8ae80274e73b74dedd8129f | prepare a new folder "resources" for json files to replace python dynamic_resources | muslih/alfanous,muslih/alfanous,muslih/alfanous,muslih/alfanous,muslih/alfanous,muslih/alfanous,muslih/alfanous | src/alfanous/Data.py | src/alfanous/Data.py | '''
Created on Jun 15, 2012
@author: assem
'''
class Configs:
pass
class Indexes:
pass
class Ressources:
pass
| agpl-3.0 | Python |
|
b171eb0c77f2d68051b48145f4e49275ed6860b9 | Add tests for signup code exists method | pinax/django-user-accounts,pinax/django-user-accounts | account/tests/test_models.py | account/tests/test_models.py | from django.conf import settings
from django.core import mail
from django.core.urlresolvers import reverse
from django.test import TestCase, override_settings
from django.contrib.auth.models import User
from account.models import SignupCode
class SignupCodeModelTestCase(TestCase):
def test_exists_no_match(self):
code = SignupCode(email='foobar@example.com', code='FOOFOO')
code.save()
self.assertFalse(SignupCode.exists(code='BARBAR'))
self.assertFalse(SignupCode.exists(email='bar@example.com'))
self.assertFalse(SignupCode.exists(email='bar@example.com', code='BARBAR'))
self.assertFalse(SignupCode.exists())
def test_exists_email_only_match(self):
code = SignupCode(email='foobar@example.com', code='FOOFOO')
code.save()
self.assertTrue(SignupCode.exists(email='foobar@example.com'))
def test_exists_code_only_match(self):
code = SignupCode(email='foobar@example.com', code='FOOFOO')
code.save()
self.assertTrue(SignupCode.exists(code='FOOFOO'))
self.assertTrue(SignupCode.exists(email='bar@example.com', code='FOOFOO'))
def test_exists_email_match_code_mismatch(self):
code = SignupCode(email='foobar@example.com', code='FOOFOO')
code.save()
self.assertTrue(SignupCode.exists(email='foobar@example.com', code='BARBAR'))
def test_exists_code_match_email_mismatch(self):
code = SignupCode(email='foobar@example.com', code='FOOFOO')
code.save()
self.assertTrue(SignupCode.exists(email='bar@example.com', code='FOOFOO'))
def test_exists_both_match(self):
code = SignupCode(email='foobar@example.com', code='FOOFOO')
code.save()
self.assertTrue(SignupCode.exists(email='foobar@example.com', code='FOOFOO'))
| mit | Python |
|
f5140f87e0e4326fe189b2f5f3ff3ac90f8db5c8 | Add new heroku_worker.py to run as a Heroku worker process | mattstibbs/blockbuster-server,mattstibbs/blockbuster-server | blockbuster/heroku_worker.py | blockbuster/heroku_worker.py | import redis
from rq import Worker, Queue, Connection
import os
REDIS_URL = os.environ.get('REDIS_URL', 'redis://localhost:32769/1')
print(REDIS_URL)
listen = ['default']
conn = redis.from_url(REDIS_URL)
if __name__ == '__main__':
with Connection(conn):
worker = Worker(map(Queue, listen))
worker.work()
| mit | Python |
|
0722624244d107b19a006f07fd884d47597e4eb1 | Add utility class to filter text through external program | guillermooo/dart-sublime-bundle,guillermooo-forks/dart-sublime-bundle,guillermooo/dart-sublime-bundle,guillermooo-forks/dart-sublime-bundle,guillermooo-forks/dart-sublime-bundle,guillermooo-forks/dart-sublime-bundle,guillermooo/dart-sublime-bundle,guillermooo/dart-sublime-bundle | lib/filter.py | lib/filter.py | from subprocess import Popen
from subprocess import PIPE
from subprocess import TimeoutExpired
import threading
from Dart import PluginLogger
from Dart.lib.plat import supress_window
_logger = PluginLogger(__name__)
class TextFilter(object):
'''Filters text through an external program (sync).
'''
def __init__(self, args, timeout=10):
self.args = args
self.timeout = timeout
# Encoding the external program likes to receive.
self.in_encoding = 'utf-8'
# Encoding the external program will emit.
self.out_encoding = 'utf-8'
self._proc = None
def encode(self, text):
return text.encode(self.in_ecoding)
def decode(self, encoded_bytes):
return encoded_bytes.decode(self.out_encoding)
def clean(self, text):
return text.replace('\r', '').rstrip()
def _start(self):
try:
self._proc = Popen(self.args,
stdout=PIPE,
stderr=PIPE,
stdin=PIPE,
startupinfo=supress_window())
except OSError as e:
_logger.error('while starting text filter program: %s', e)
return
def filter(self, input_text):
self._start()
try:
in_bytes = input_text.encode(self.in_encoding)
out_bytes, err_bytes = self._proc.communicate(in_bytes,
self.timeout)
return self.clean(self.decode(out_bytes))
except TimeoutExpired:
_logger.debug('text filter program response timed out')
return None
except Exception as e:
_logger.error('while running TextFilter: %s', e)
return None
| bsd-3-clause | Python |
|
c7da0ed13838150f0276c4c9f425390822b5b43b | Add serializers for API models. | rcutmore/vinotes-api,rcutmore/vinotes-api | vinotes/apps/api/serializers.py | vinotes/apps/api/serializers.py | from django.contrib.auth.models import User
from rest_framework import serializers
from .models import Note, Trait, Wine, Winery
class WinerySerializer(serializers.ModelSerializer):
class Meta:
model = Winery
fields = ('id', 'name')
class WineSerializer(serializers.ModelSerializer):
class Meta:
model = Wine
fields = ('id', 'winery', 'name', 'vintage')
class TraitSerializer(serializers.ModelSerializer):
class Meta:
model = Trait
fields = ('id', 'name')
class NoteSerializer(serializers.ModelSerializer):
class Meta:
model = Note
fields = ('id', 'taster', 'tasted', 'wine', 'color_traits',
'nose_traits', 'taste_traits', 'finish_traits', 'rating')
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email', 'notes') | unlicense | Python |
|
383c67da4729886602227b715f65390427ccd8bc | Create w3_1.py | s40523239/2016fallcp_hw,s40523239/2016fallcp_hw,s40523239/2016fallcp_hw | w3_1.py | w3_1.py | print ("Hello World!")
| agpl-3.0 | Python |
|
66afbaab9abe51a83d6ea9765b7b8b70d045115e | Create question2.py | pythonzhichan/DailyQuestion,pythonzhichan/DailyQuestion | dingshubo/question2.py | dingshubo/question2.py | #_*_ coding:utf-8 _*_
#!/user/bin/python
import random
number_random = random.randint(1,100)
for chance in range(5): #玩家有5次机会
number_player=input('请输入一个1-100之间的整数:')
if(number_player>number_random):
print('这个数字偏大')
elif (number_player<number_random):
print('这个数字偏小')
print('你还有%d次机会')%(4-chance)
while (chance == 4): #当for遍历到第最后一次的时候
if (number_player == number_random):
print('恭喜你答对了')
break
else:
print('正确答案是:%s') % number_random
break
| mit | Python |
|
3189cd139b868d74caf35aa5b7a80f748f21c231 | add tool to process brian's files | akrherz/idep,akrherz/idep,akrherz/dep,akrherz/dep,akrherz/dep,akrherz/dep,akrherz/idep,akrherz/idep,akrherz/dep,akrherz/idep,akrherz/idep | scripts/import/import_brian_files.py | scripts/import/import_brian_files.py | import glob
import os
os.chdir("c")
for filename in glob.glob("*"):
tokens = filename.split("_")
huc12 = tokens[1]
typ = tokens[2].split(".")[1]
newfn = "/i/%s/%s/%s" % (typ, huc12, filename)
os.rename(filename, newfn) | mit | Python |
|
e73b5fadbcff141fab2478954345ebaac22d8e63 | add K-means | LeoZ123/Machine-Learning-Practice | K-means/K-means.py | K-means/K-means.py | '''
Created on Apr 30, 2017
@author: Leo Zhong
'''
import numpy as np
# Function: K Means
# -------------
# K-Means is an algorithm that takes in a dataset and a constant
# k and returns k centroids (which define clusters of data in the
# dataset which are similar to one another).
def kmeans(X, k, maxIt):
#get col and row
numPoints, numDim = X.shape
dataSet = np.zeros((numPoints, numDim + 1))
dataSet[:, :-1] = X
# Initialize centroids randomly
centroids = dataSet[np.random.randint(numPoints, size = k), :]
#Randomly assign labels to initial centorid
centroids[:, -1] = range(1, k +1)
# Initialize book keeping vars.
iterations = 0
oldCentroids = None
# Run the main k-means algorithm
while not shouldStop(oldCentroids, centroids, iterations, maxIt):
print ("iteration: \n", iterations)
print ("dataSet: \n", dataSet)
print ("centroids: \n", centroids)
# Save old centroids for convergence test. Book keeping.
oldCentroids = np.copy(centroids)
iterations += 1
# Assign labels to each datapoint based on centroids
updateLabels(dataSet, centroids)
# Assign centroids based on datapoint labels
centroids = getCentroids(dataSet, k)
# We can get the labels too by calling getLabels(dataSet, centroids)
return dataSet
# Function: Should Stop
# -------------
# Returns True or False if k-means is done. K-means terminates either
# because it has run a maximum number of iterations OR the centroids
# stop changing.
def shouldStop(oldCentroids, centroids, iterations, maxIt):
if iterations > maxIt:
return True
return np.array_equal(oldCentroids, centroids)
# Function: Get Labels
# -------------
# Update a label for each piece of data in the dataset.
def updateLabels(dataSet, centroids):
# For each element in the dataset, chose the closest centroid.
# Make that centroid the element's label.
numPoints, numDim = dataSet.shape
for i in range(0, numPoints):
dataSet[i, -1] = getLabelFromClosestCentroid(dataSet[i, :-1], centroids)
def getLabelFromClosestCentroid(dataSetRow, centroids):
label = centroids[0, -1];
minDist = np.linalg.norm(dataSetRow - centroids[0, :-1])
for i in range(1 , centroids.shape[0]):
dist = np.linalg.norm(dataSetRow - centroids[i, :-1])
if dist < minDist:
minDist = dist
label = centroids[i, -1]
print ("minDist:", minDist)
return label
# Function: Get Centroids
# -------------
# Returns k random centroids, each of dimension n.
def getCentroids(dataSet, k):
# Each centroid is the geometric mean of the points that
# have that centroid's label. Important: If a centroid is empty (no points have
# that centroid's label) you should randomly re-initialize it.
result = np.zeros((k, dataSet.shape[1]))
for i in range(1, k + 1):
oneCluster = dataSet[dataSet[:, -1] == i, :-1]
result[i - 1, :-1] = np.mean(oneCluster, axis = 0)
result[i - 1, -1] = i
return result
x1 = np.array([1, 1])
x2 = np.array([2, 1])
x3 = np.array([4, 3])
x4 = np.array([5, 4])
testX = np.vstack((x1, x2, x3, x4))
result = kmeans(testX, 2, 10)
print ("final result:")
print (result)
| mit | Python |
|
7e17363eaf8d17f0d595ca5199e59a51c7b1df65 | Add the core social_pipeline. | WillianPaiva/1flow,1flow/1flow,WillianPaiva/1flow,1flow/1flow,WillianPaiva/1flow,1flow/1flow,WillianPaiva/1flow,1flow/1flow,WillianPaiva/1flow,1flow/1flow | oneflow/core/social_pipeline.py | oneflow/core/social_pipeline.py | # -*- coding: utf-8 -*-
u"""
Copyright 2013-2014 Olivier Cortès <oc@1flow.io>.
This file is part of the 1flow project.
It provides {python,django}-social-auth pipeline helpers.
1flow is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
1flow is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public
License along with 1flow. If not, see http://www.gnu.org/licenses/
"""
import logging
# from constance import config
# from django.shortcuts import redirect
from social_auth.backends.facebook import FacebookBackend
from social_auth.backends.twitter import TwitterBackend
from social_auth.backends import google
from models import (
TwitterAccount,
# FacebookAccount, FacebookFeed,
)
LOGGER = logging.getLogger(__name__)
def check_feeds(social_user, user, details, request, response, backend,
is_new=False, *args, **kwargs):
""" Create Accounts & feeds associated with social networks. """
try:
if isinstance(backend, FacebookBackend):
pass
elif isinstance(backend, google.GoogleOAuth2Backend):
pass
elif isinstance(backend, TwitterBackend):
TwitterAccount.check_social_user(social_user, user, backend)
except:
LOGGER.exception(u'Could not check feeds for user %s from '
u'backend %s.', user, social_user)
| agpl-3.0 | Python |
|
ee533a5e2a4eff99641383741e1cbe8e57c43e1f | add typing stub/compat package | charlievieth/GoSubl,charlievieth/GoSubl | gosubl/typing.py | gosubl/typing.py | try:
# ST builds >= 4000
from mypy_extensions import TypedDict
from typing import Any
from typing import Callable
from typing import Dict
from typing import Generator
from typing import IO
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Mapping
from typing import Optional
from typing import Set
from typing import Tuple
from typing import Type
from typing import Union
from typing_extensions import Protocol
except ImportError:
# ST builds < 4000
def _make_type(name: str) -> '_TypeMeta':
return _TypeMeta(name, (Type,), {}) # type: ignore
class _TypeMeta(type):
def __getitem__(self, args: 'Any') -> 'Any':
if not isinstance(args, tuple):
args = (args,)
name = '{}[{}]'.format(
str(self),
', '.join(map(str, args))
)
return _make_type(name)
def __str__(self) -> str:
return self.__name__
class Type(metaclass=_TypeMeta): # type: ignore
pass
class TypedDict(Type, dict): # type: ignore
def __init__(*args, **kwargs) -> None: # type: ignore
pass
class Any(Type): # type: ignore
pass
class Callable(Type): # type: ignore
pass
class Dict(Type): # type: ignore
pass
class Generator(Type): # type: ignore
pass
class IO(Type): # type: ignore
pass
class Iterable(Type): # type: ignore
pass
class Iterator(Type): # type: ignore
pass
class List(Type): # type: ignore
pass
class Mapping(Type): # type: ignore
pass
class Optional(Type): # type: ignore
pass
class Set(Type): # type: ignore
pass
class Tuple(Type): # type: ignore
pass
class Union(Type): # type: ignore
pass
Protocol = object # type: ignore
| mit | Python |
|
2761e3bfd8d2c8281db565e54f6e3ea687bd5663 | add backfill problem_id script | stopstalk/stopstalk-deployment,stopstalk/stopstalk-deployment,stopstalk/stopstalk-deployment,stopstalk/stopstalk-deployment,stopstalk/stopstalk-deployment | private/scripts/extras/backfill_problem_id.py | private/scripts/extras/backfill_problem_id.py | """
Copyright (c) 2015-2019 Raj Patel(raj454raj@gmail.com), StopStalk
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import time
ptable = db.problem
stable = db.submission
links = db(ptable).select(ptable.id, ptable.link)
plink_to_id = dict([(x.link, x.id) for x in links])
BATCH_SIZE = 25000
for i in xrange(10000):
rows = db(stable).select(limitby=(i * BATCH_SIZE, (i + 1) * BATCH_SIZE))
print rows.first().id, rows.last().id,
updated = 0
for srecord in rows:
if srecord.problem_id is None and \
srecord.problem_link in plink_to_id:
srecord.update_record(problem_id=plink_to_id[srecord.problem_link])
updated += 1
if updated > 0:
db.commit()
time.sleep(0.1)
print "updated", updated
else:
print "no updates"
| mit | Python |
|
a3de0337f6e3511cc3381f92f7bbc384d7667dfd | Create xmas.py | sdlwdr/misc | xmas.py | xmas.py | gifts=['A Partridge in a Pear Tree', 'Two Turtle Doves, and', 'Three French Hens', 'Four Calling Birds', 'Five Golden Rings', 'Six Geese-a-Laying', 'Seven Swans-a-Swimming', 'Eight Maids-a-Milking', 'Nine Ladies Dancing', 'Ten Lords-a-Leaping', 'Eleven Pipers Piping', 'Twelve Drummers Drumming']
ordinal=['st', 'nd', 'rd', 'th', 'th', 'th', 'th', 'th', 'th', 'th', 'th', 'th']
for day in range(12):
print('On the ' + str(day+1) + str(ordinal[day]) + ' day of Christmas, my true love sent to me...')
gift=day
while gift >= 0:
print(str(gifts[gift]))
gift-=1
print('\n')
| mit | Python |
|
8fa4888dbf82d225f52b6df347372a0381c08237 | Add __main__.py for running python -m grip. | mgoddard-pivotal/grip,jbarreras/grip,ssundarraj/grip,mgoddard-pivotal/grip,joeyespo/grip,ssundarraj/grip,jbarreras/grip,joeyespo/grip | grip/__main__.py | grip/__main__.py | """\
Grip
----
Render local readme files before sending off to Github.
:copyright: (c) 2014 by Joe Esposito.
:license: MIT, see LICENSE for more details.
"""
from command import main
if __name__ == '__main__':
main()
| mit | Python |
|
95874a5e06ff70d1cbea49321549beee5cc5abba | Create an example of storing units in HDF5 | h5py/h5py,h5py/h5py,h5py/h5py | examples/store_and_retrieve_units_example.py | examples/store_and_retrieve_units_example.py | """
Author: Daniel Berke, berke.daniel@gmail.com
Date: October 27, 2019
Requirements: h5py>=2.10.0, unyt>=v2.4.0
Notes: This short example script shows how to save unit information attached
to a `unyt_array` using `attrs` in HDF5, and recover it upon reading the file.
It uses the Unyt package (https://github.com/yt-project/unyt) because that's
what I'm familiar with, but presumably similar options exist for Pint and
astropy.units.
"""
import h5py
import tempfile
import unyt as u
# Set up a temporary file for this example.
tf = tempfile.TemporaryFile()
f = h5py.File(tf, 'a')
# Create some mock data with moderately complicated units (this is the
# dimensional representation of Joules of energy).
test_data = [1, 2, 3, 4, 5] * u.kg * ( u.m / u.s ) ** 2
print(test_data.units)
# kg*m**2/s**2
# Create a data set to hold the numerical information:
f.create_dataset('stored data', data=test_data)
# Save the units information as a string in `attrs`.
f['stored data'].attrs['units'] = str(test_data.units)
# Now recover the data, using the saved units information to reconstruct the
# original quantities.
reconstituted_data = u.unyt_array(f['stored data'],
units=f['stored data'].attrs['units'])
print(reconstituted_data.units)
# kg*m**2/s**2
assert reconstituted_data.units == test_data.units
| bsd-3-clause | Python |
|
4fe50fda289be7db3fb96450e713eb8f1a815026 | Add weighted linear algorithm | swarmer/autoscaler | autoscaler/server/scaling/algorithms/weighted.py | autoscaler/server/scaling/algorithms/weighted.py | import math
from autoscaler.server.request_history import RequestHistory
from autoscaler.server.scaling.utils import parse_interval
class WeightedScalingAlgorithm:
def __init__(self, algorithm_config):
self.interval_seconds = parse_interval(
algorithm_config['interval']
)
self.requests_per_instance_interval = (
algorithm_config['requests_per_instance_interval']
)
self.weights = algorithm_config['weights']
def get_instance_count(self, request_history: RequestHistory):
intervals = request_history.get_last_intervals(
self.interval_seconds, len(self.weights)
)
normalized_weights = self._normalized_weights(self.weights)
weighted_request_count = sum(
len(interval) * weight
for weight, interval in zip(normalized_weights, intervals)
)
return max(1, math.ceil(
weighted_request_count / self.requests_per_instance_interval)
)
@staticmethod
def _normalized_weights(weights):
weight_sum = sum(weights)
return [weight / weight_sum for weight in weights]
| mit | Python |
|
ca43479fc10505b04ec8861de074f25c80c6f5e1 | add rhythm description module | jvbalen/catchy,jvbalen/catchy | rhythm_features.py | rhythm_features.py |
from __future__ import division, print_function
import os
import numpy as np
import utils
onsets_dir = ''
beats_dir = ''
def compute_and_write(data_dir, track_list=None, features=None):
"""Compute frame-based features for all audio files in a folder.
Args:
data_dir (str): where to write features
track_list (str or None): list of file ids. Set to None to infer from
files in ioi_dir and chroma_dir.
features (dict): dictionary with (unique) feature names as keys and
tuples as values, each containing a feature extraction function and a
parameter dictionary.
Feature extraction functions can be any function that returns one
or more 1d or 2d-arrays that share their first dimension.
Required global variables:
beats_dir (str): where to find beat data
onsets_dir (str): where to find onset data
"""
if track_list is None:
track_list = [filename.split('.')[0] for filename in os.listdir(ioi_dir)]
if features is None:
features = {'ioihist': (get_ioi_hist, {})}
for track_id in track_list:
print("Computing features for track {}...".format(track_id))
for feature in features:
# run feature function
func, params = features[feature]
X = func(track_id, **params)
# normalize (!) and flatten
X = X.flatten() / np.sum(X)
# write
utils.write_feature(X, [data_dir, feature, track_id])
def get_ioi_hist(track_id, min_length = -7, max_length = 0, step=1):
"""Compute a IOI histogram, with bins logarithmically spaced between
`min_length` (def: -7) and `max_length` (0), with step `step`.
"""
t, ioi = get_norm_ioi(track_id)
log_ioi = np.log2(ioi)
halfstep = step / 2.0
nbins = (max_length - min_length) / step + 1
binedges = np.linspace(minpitch - halfstep, maxpitch + halfstep, nbins + 1)
ioi_hist, _ = np.histogram(log_ioi, binedges)
ioi_hist = ioi_hist / np.sum(ioi_hist)
return ioi_hist
def get_beats(track_id):
"""Read beat data from file beats_dir + track_id + '.csv'.
File should contain a time column followed by one column of
beat intervals.
"""
beats_file = os.path.join(beats_dir, track_id + '.csv')
t, beat_intervals = utils.read_feature(beats_file, time=True)
return t, beat_intervals
def get_onsets(track_id):
"""Read ioi data from file onsets_dir + track_id + '.csv'.
File should contain a time column followed by one column of
inter-onset intervals.
"""
onsets_file = os.path.join(onsets_dir, track_id + '.csv')
t, ioi = utils.read_feature(onsets_file, time=True)
return t, ioi
# TODO
def get_norm_ioi(track_id):
pass
if __name__ == '__main__':
compute_and_write(sys.argv[1], sys.argv[2]) | mit | Python |
|
a726625e13ac08d0b6c2c686de476b6e78bc0f48 | Add unit test for _skeleton | MichelJuillard/dlstats,mmalter/dlstats,Widukind/dlstats,MichelJuillard/dlstats,mmalter/dlstats,Widukind/dlstats,MichelJuillard/dlstats,mmalter/dlstats | dlstats/fetchers/test__skeleton.py | dlstats/fetchers/test__skeleton.py | import unittest
from datetime import datetime
from _skeleton import Dataset
class DatasetTestCase(unittest.TestCase):
def test_full_example(self):
self.assertIsInstance(Dataset(provider='Test provider',name='GDP',dataset_code='nama_gdp_fr',dimension_list=[{'name':'COUNTRY','values':[('FR','France'),('DE','Germany')]}],doc_href='rasessr',last_update=datetime(2014,12,2)),Dataset)
def test_empty_doc_href(self):
self.assertIsInstance(Dataset(provider='Test provider',name='GDP',dataset_code='nama_gdp_fr',dimension_list=[{'name':'COUNTRY','values':[('FR','France'),('DE','Germany')]}],last_update=datetime(2014,12,2)),Dataset)
if __name__ == '__main__':
unittest.main()
| agpl-3.0 | Python |
|
e54c82c336827c1fc835837006885c245a05e5cb | Add html stripper for announcements | karenang/ivle-bot,karen/ivle-bot | html_stripper.py | html_stripper.py | from html.parser import HTMLParser
class HTMLStripper(HTMLParser):
def __init__(self):
super().__init__()
self.reset()
self.strict = False
self.convert_charrefs= True
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = HTMLStripper()
s.feed(html)
return s.get_data() | mit | Python |
|
20830e9fb2785eda94bf9e7c0dab70d476bc82b4 | Add `sample_settings.py` | avinassh/Reddit-GoodReads-Bot | sample_settings.py | sample_settings.py | # Rename this file to `settings.py` in deployment
# supported_subreddits = 'india'
supported_subreddits = 'india+indianbooks'
user_agent = ('Goodreads, v0.1. Gives info of the book whenever goodreads'
'link to a book is posted. (by /u/avinassh)')
scopes = ['identity', 'submit', 'privatemessages', 'read']
be_gentle_to_reddit = True
# reddit app
app_key = 'K...q'
app_secret = 'y...i'
# bot account
access_token = '3...R'
refresh_token = '3...m'
# good reads
goodreads_api_key = '5...v'
goodreads_api_secret = 'T...4'
| mit | Python |
|
638c6383acf4431c95327fd0cbdb535e115e027d | Create admin util for user management. | manylabs/flow-server,manylabs/flow-server,manylabs/flow-server | flow-admin.py | flow-admin.py | #!/usr/bin/env python
#
# To ensure you can import rhizo-server modules set PYTHONPATH
# to point to rhize-server base dir.
# E.g.
# export PYTHONPATH=/home/user/rhizo-server/
#
from optparse import OptionParser
from main.users.auth import create_user
from main.users.models import User, OrganizationUser
from main.resources.resource_util import find_resource, _create_folders
from main.app import db
if __name__ == '__main__':
parser = OptionParser()
parser.add_option( '-c',
'--create-user',
dest='flow_user_spec',
help='Create flow user specified in the format email:username:password:fullname',
default='')
parser.add_option( '-d',
'--delete-user',
dest='delete_username',
help='Delete flow user specified by username',
default='')
(options, args) = parser.parse_args()
if options.flow_user_spec:
parts = options.flow_user_spec.split(':')
email = parts[0]
username = parts[1]
password = parts[2]
fullname = parts[3]
assert '.' in email and '@' in email
#
# Create user
#
print("Creating user %s" % (username))
user_id = create_user( email,
username,
password,
fullname,
User.STANDARD_USER)
#
# Add user to flow organization
#
print("Creating organization user.")
org_user = OrganizationUser()
org_user.organization_id = find_resource('/testing').id
org_user.user_id = user_id
org_user.is_admin = False
db.session.add(org_user)
db.session.commit()
#
# Create a folder for this user to store their programs
#
student_folder = 'testing/student-folders/%s' % (username)
print("Creating student folder %s." % (student_folder))
_create_folders(student_folder)
print('Created flow user: %s' % (email))
elif options.delete_username:
#
# Delete the specified user by username
#
username = options.delete_username
user = User.query.filter(User.user_name == username).first()
if user is None:
print("No such user %s." % (username))
exit(1)
#
# Delete user folder
#
student_folder = find_resource('/testing/student-folders/%s' % (username))
if student_folder is not None:
print("Deleting student folder %s." % (student_folder.name))
db.session.delete(student_folder)
db.session.commit()
else:
print("No student folder to delete.")
#
# Delete organization user
#
org_id = find_resource('/testing').id
org_user = OrganizationUser.query.filter(
OrganizationUser.organization_id == org_id,
OrganizationUser.user_id == user.id ).first()
if org_user is not None:
print("Deleting organization user.")
db.session.delete(org_user)
db.session.commit()
else:
print("No organization user to delete.")
#
# Now delete the user
#
db.session.delete(user)
db.session.commit()
print('Deleted flow user: %s.' % (username))
| mit | Python |
|
0da1d2edc0f2a01d90cfc7cbf2bb4d37d1cc58d9 | Add examples from JModelica User's Manual (1.17.0) | michael-okeefe/soep-sandbox | src/ast_example.py | src/ast_example.py | # Import library for path manipulations
import os.path
# Import the JModelica.org Python packages
import pymodelica
from pymodelica.compiler_wrappers import ModelicaCompiler
# Import numerical libraries
import numpy as N
import ctypes as ct
import matplotlib.pyplot as plt
# Import JPype
import jpype
# Create a reference to the java package 'org'
org = jpype.JPackage('org')
# Create a compiler and compiler target object
mc = ModelicaCompiler()
# Build trees as if for an FMU or Model Exchange v 1.0
target = mc.create_target_object("me", "1.0")
# Don't parse the file if it has already been parsed
try:
source_root.getProgramRoot()
except:
# Parse the file CauerLowPassAnalog.mo and get the root node
# of the AST
model = mc.get_modelicapath() + "\\Modelica"
source_root = mc.parse_model(model)
# Don't load the standard library if it is already loaded
try:
modelica.getName().getID()
except NameError, e:
# Load the Modelica standard library and get the class
# declaration AST node corresponding to the Modelica
# package.
modelica = source_root.getProgram().getLibNode(0). \
getStoredDefinition().getElement(0)
def count_classes(class_decl, depth):
"""
Count the number of classes hierarchically contained
in a class declaration.
"""
# get an iterator over all local classes using the method
# ClassDecl.classes() which returns a Java Iterable object
# over ClassDecl objects
local_classes = class_decl.classes().iterator()
num_classes = 0
# Loop over all local classes
while local_classes.hasNext():
# Call count_classes recursively for all local classes
# (including the contained class itself)
num_classes += 1 + count_classes(local_classes.next(), depth + 1)
# If the class declaration corresponds to a package, print
# the number of hierarchically contained classes
if class_decl.isPackage() and depth <= 1:
print("The package %s has %d hierarchically contained classes"%(
class_decl.qualifiedName(), num_classes))
# Return the number of hierarchically contained classes
return num_classes
# Call count_classes for 'Modelica'
num_classes = count_classes(modelica, 0)
try:
filter_source.getProgramRoot()
except:
filter_source = mc.parse_model("CauerLowPassAnalog.mo")
# Don't instantiate if instance has been computed already
try:
filter_instance.components()
except:
# Retrieve the node
filter_instance = mc.instantiate_model(
filter_source, "CauerLowPassAnalog", target)
def dump_inst_ast(inst_node, indent, fid):
"""
Pretty print an instance node, including its merged environment.
"""
# Get the merged environment of an instance node
env = inst_node.getMergedEnvironment()
# Create a string containing the type and name of the instance node
str = indent + inst_node.prettyPrint("")
str = str + " {"
# Loop over all elements in the merged modification environment
for i in range(env.size()):
str = str + env.get(i).toString()
if i < env.size() - 1:
str = str + ", "
str = str + "}"
# Print
fid.write(str + "\n")
# Get all components and dump them recursively
components = inst_node.instComponentDeclList
for i in range(components.getNumChild()):
# Assume the primitive variables are leafs in the instance AST
if (inst_node.getClass() is \
org.jmodelica.modelica.compiler.InstPrimitive) is False:
dump_inst_ast(components.getChild(i), indent + " ", fid)
# Get all extends clauses and dump them recursively
extends = inst_node.instExtendsList
for i in range(extends.getNumChild()):
# Assume that primitive variables are leafs in the instance AST
if (inst_node.getClass() is \
org.jmodelica.modelica.compiler.InstPrimitive) is False:
dump_inst_ast(extends.getChild(i), indent + " ", fid)
# dump the filter instance
with open('out.txt', 'w') as fid:
dump_inst_ast(filter_instance, "", fid)
print("Done!")
| mit | Python |
|
55dd21610a2ed1befed6b4560528e8a6bf3602e2 | Define function to retrieve imgur credentials | ueg1990/imgur-cli | imgur_cli/cli.py | imgur_cli/cli.py | import argparse
import logging
import os
import imgurpython
from collections import namedtuple
logger = logging.getLogger(__name__)
def imgur_credentials():
ImgurCredentials = namedtuple('ImgurCredentials', ['client_id', 'client_secret', 'access_token', 'refresh_token', 'mashape_key'])
try:
from config import config
client_id = config.get('IMGUR_CLIENT_ID')
client_secret = config.get('IMGUR_CLIENT_SECRET')
access_token = config.get('IMGUR_ACCESS_TOKEN')
refresh_token = config.get('IMGUR_REFRESH_TOKEN')
mashape_key = config.get('IMGUR_MASHAPE_KEY')
except ImportError:
client_id = os.environ.get('IMGUR_CLIENT_ID')
client_secret = os.environ.get('IMGUR_CLIENT_SECRET')
access_token = os.environ.get('IMGUR_ACCESS_TOKEN')
refresh_token = os.environ.get('IMGUR_REFRESH_TOKEN')
mashape_key = os.environ.get('IMGUR_MASHAPE_KEY')
if not client_id or not client_secret:
raise imgurpython.client.ImgurClientError('Client credentials not found. Ensure you have both client id and client secret')
return ImgurCredentials(client_id, client_secret, access_token, refresh_token, mashape_key)
| mit | Python |
|
d3ebb800c88be18861608f8b174cc652223ac67c | Add utils.py with get_options function | klpdotorg/dubdubdub,klpdotorg/dubdubdub,klpdotorg/dubdubdub,klpdotorg/dubdubdub | apps/ivrs/utils.py | apps/ivrs/utils.py | def get_options(question_number):
if question_number == 2:
return " Press 4 or 5 "
else:
return " Press 1 for Yes or 2 for No"
| mit | Python |
|
2c8752cd586f6d02ce8da4bc3a79660889ed7f3f | Add some minimal testing for BandRCModel to the test suite. | cjcardinale/climlab,brian-rose/climlab,brian-rose/climlab,cjcardinale/climlab,cjcardinale/climlab | climlab/tests/test_bandrc.py | climlab/tests/test_bandrc.py | import numpy as np
import climlab
import pytest
# The fixtures are reusable pieces of code to set up the input to the tests.
# Without fixtures, we would have to do a lot of cutting and pasting
# I inferred which fixtures to use from the notebook
# Latitude-dependent grey radiation.ipynb
@pytest.fixture()
def model():
return climlab.BandRCModel()
# helper for a common test pattern
def _check_minmax(array, amin, amax):
return (np.allclose(array.min(), amin) and
np.allclose(array.max(), amax))
def test_model_creation(model):
"""Just make sure we can create a model."""
assert len(model.Tatm)==30
def test_integrate_years(model):
"""Check that we can integrate forward the model and get the expected
surface temperature and water vapor.
Also check the climate sensitivity to doubling CO2."""
model.step_forward()
model.integrate_years(2)
Ts = model.Ts.copy()
assert np.isclose(Ts, 275.43383753)
assert _check_minmax(model.q, 5.E-6, 3.23764447e-03)
model.absorber_vmr['CO2'] *= 2.
model.integrate_years(2)
assert np.isclose(model.Ts - Ts, 3.180993)
| mit | Python |
|
c1ea660b72ac10fd0a2dea1416b45c6796ca5adb | add pascal voc ingest | NervanaSystems/aeon,NervanaSystems/aeon,NervanaSystems/aeon,NervanaSystems/aeon | ingest/pascal.py | ingest/pascal.py | #!/usr/bin/python
import json
import glob
import sys
import getopt
import collections
import os
from os.path import isfile, join
import xml.etree.ElementTree as et
from collections import defaultdict
# http://stackoverflow.com/questions/7684333/converting-xml-to-dictionary-using-elementtree
def etree_to_dict(t):
d = {t.tag: {} if t.attrib else None}
children = list(t)
if children:
dd = defaultdict(list)
for dc in map(etree_to_dict, children):
for k, v in dc.iteritems():
dd[k].append(v)
d = {t.tag: {k:v[0] if len(v) == 1 else v for k, v in dd.iteritems()}}
if t.attrib:
d[t.tag].update(('@' + k, v) for k, v in t.attrib.iteritems())
if t.text:
text = t.text.strip()
if children or t.attrib:
if text:
d[t.tag]['#text'] = text
else:
d[t.tag] = text
return d
def validate_metadata(jobj,file):
boxlist = jobj['object']
if not isinstance(boxlist,collections.Sequence):
print('{0} is not a sequence').format(file)
return False
# print("{0} has {1} boxes").format(jobj['filename'],len(boxlist))
index = 0;
for box in boxlist:
if 'part' in box:
parts = box['part']
if not isinstance(parts,collections.Sequence):
print('parts {0} is not a sequence').format(file)
return False
index += 1
return True
def convert_pascal_to_json(input_path,output_path):
#onlyfiles = [f for f in listdir(input_path) if isfile(join(input_path, f)) && file.endswith('.xml')]
if not os.path.exists(output_path):
os.makedirs(output_path)
onlyfiles = glob.glob(join(input_path,'*.xml'))
onlyfiles.sort()
for file in onlyfiles:
outfile = join(output_path,os.path.basename(file))
outfile = os.path.splitext(outfile)[0]+'.json'
print(outfile)
trimmed = parse_single_file(join(input_path,file))
if validate_metadata(trimmed,file):
result = json.dumps(trimmed, sort_keys=True, indent=4, separators=(',', ': '))
f = open(outfile,'w')
f.write(result)
else:
print('error parsing metadata {0}').format(file)
#print(result)
def parse_single_file(path):
tree = et.parse(path)
root = tree.getroot()
d = etree_to_dict(root)
trimmed = d['annotation']
olist = trimmed['object']
if not isinstance(olist,collections.Sequence):
trimmed['object'] = [olist];
return trimmed
def main(argv):
input_path = ''
output_path = ''
parse_file = ''
try:
opts, args = getopt.getopt(argv,"hi:o:p:")
except getopt.GetoptError:
print 'ingest.py -i <input> -o <output>'
sys.exit(2)
for opt, arg in opts:
print('opt {0}, arg {1}').format(opt,arg)
if opt == '-h':
print 'ingest.py -i <input> -o <output>'
sys.exit()
elif opt in ("-i", "--input"):
input_path = arg
elif opt in ("-o", "--output"):
output_path = arg
elif opt in ("-p", "--parse"):
parse_file = arg
print(parse_file)
if parse_file:
parsed = parse_single_file(parse_file)
json1 = json.dumps(parsed, sort_keys=True, indent=4, separators=(',', ': '))
print(json1)
elif input_path:
convert_pascal_to_json(input_path,output_path)
if __name__ == "__main__":
main(sys.argv[1:])
# file = '/usr/local/data/VOCdevkit/VOC2007/Annotations/006637.xml'
# tree = et.parse(file)
# root = tree.getroot()
# d = etree_to_dict(root)
# # et.dump(tree)
# json2 = d['annotation']
# json1 = json.dumps(json2, sort_keys=True, indent=4, separators=(',', ': '))
# print(json1)
# path = '/usr/local/data/VOCdevkit/VOC2007/Annotations/*.xml'
# convert_pascal_to_json(path)
| apache-2.0 | Python |
|
27899a91fc6cdf73dccc7f9c5c353b05d2433c42 | add example participant client inbound drop rule for blackholing | h2020-endeavour/endeavour,h2020-endeavour/endeavour | pclnt/blackholing_test.py | pclnt/blackholing_test.py | {
"inbound": [
{
"cookie": 3,
"match": {
"eth_src": "08:00:27:89:3b:9f"
},
"action": {
"drop": 0
}
}
]
} | apache-2.0 | Python |
|
cd910f95753a138e2df48a1370e666bee49ad1dd | Add py solution for 693. Binary Number with Alternating Bits | ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode | py/binary-number-with-alternating-bits.py | py/binary-number-with-alternating-bits.py | class Solution(object):
def hasAlternatingBits(self, n):
"""
:type n: int
:rtype: bool
"""
power_2 = (n ^ (n >> 1)) + 1
return (power_2 & -power_2) == power_2
| apache-2.0 | Python |
|
b34c0ec439a997705799136e56a926649bd93e52 | add new function to test whether an object is completely within the bounds of an image | danforthcenter/plantcv,danforthcenter/plantcv,stiphyMT/plantcv,danforthcenter/plantcv,stiphyMT/plantcv,stiphyMT/plantcv | plantcv/plantcv/within_frame.py | plantcv/plantcv/within_frame.py | import cv2 as cv2
import numpy as np
def within_frame(img, obj):
'''
This function tests whether the plant object is completely in the field of view
Input:
img - an image with the bounds you are interested in
obj - a single object, preferably after calling pcv.image_composition(), that is from within `img`
Returns:
in_bounds - a boolean (True or False) whether the object touches the edge of the image
:param img: numpy.ndarray
:param obj: str
:return in_bounds: boolean
'''
# Check if object is touching image boundaries (QC)
if len(np.shape(img)) == 3:
ix, iy, iz = np.shape(img)
else:
ix, iy = np.shape(img)
size1 = ix, iy
frame_background = np.zeros(size1, dtype=np.uint8)
frame = frame_background + 1
frame_contour, frame_hierarchy = cv2.findContours(frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:]
ptest = []
vobj = np.vstack(obj)
for i, c in enumerate(vobj):
xy = tuple(c)
pptest = cv2.pointPolygonTest(frame_contour[0], xy, measureDist=False)
ptest.append(pptest)
in_bounds = all(c == 1 for c in ptest)
return(in_bounds)
| mit | Python |
|
c4040803cb670f913bc8743ee68f5a5f0721d4f8 | Add game logic | HPI-Hackathon/cartets,HPI-Hackathon/cartets,HPI-Hackathon/cartets | backend/game.py | backend/game.py | # All game related code
import json
import random
class Game():
def __init__(self):
self.players = {}
self.turn = None
self.running = False
def add_player(self, conn, data):
player = Player(conn, data)
self.players[player.get_name()] = player
conn.send(json.dumps({'action': 'accepted', 'data': ''}))
return player
def wait_for_answer(self, player):
# Initial start of game
if not self.running() and len(self.players) == 3:
starter = self.start_game()
data = {'turn': starter.get_name(), 'cards': []}
return json.dumps({'action': 'start', 'data': data})
return self.handle_round(self, player)
def handle_round(self, player):
pass
def start_game(self):
self.turn = random.choice(self.players)
return self.turn
class Player():
def __init__(self, conn, data):
self.name = data['name']
self.connection = conn
self.cards = []
def get_name(self):
return self.name
class Card():
def __init__(self):
pass
| mit | Python |
|
69e22c778a576f746784270fa9971a6399433f92 | Add docstring to UnivariateFilter. | Myasuka/scikit-learn,bikong2/scikit-learn,billy-inn/scikit-learn,IndraVikas/scikit-learn,thilbern/scikit-learn,ogrisel/scikit-learn,jblackburne/scikit-learn,mwv/scikit-learn,moutai/scikit-learn,quheng/scikit-learn,tosolveit/scikit-learn,tomlof/scikit-learn,macks22/scikit-learn,krez13/scikit-learn,shangwuhencc/scikit-learn,raghavrv/scikit-learn,yanlend/scikit-learn,procoder317/scikit-learn,anurag313/scikit-learn,luo66/scikit-learn,Myasuka/scikit-learn,aewhatley/scikit-learn,dingocuster/scikit-learn,tdhopper/scikit-learn,florian-f/sklearn,joernhees/scikit-learn,shenzebang/scikit-learn,jakobworldpeace/scikit-learn,RPGOne/scikit-learn,xwolf12/scikit-learn,evgchz/scikit-learn,nesterione/scikit-learn,shyamalschandra/scikit-learn,Akshay0724/scikit-learn,idlead/scikit-learn,murali-munna/scikit-learn,vinayak-mehta/scikit-learn,vivekmishra1991/scikit-learn,yask123/scikit-learn,poryfly/scikit-learn,Fireblend/scikit-learn,michigraber/scikit-learn,ominux/scikit-learn,JosmanPS/scikit-learn,nesterione/scikit-learn,loli/semisupervisedforests,ElDeveloper/scikit-learn,victorbergelin/scikit-learn,heli522/scikit-learn,costypetrisor/scikit-learn,ChanChiChoi/scikit-learn,rajat1994/scikit-learn,mjudsp/Tsallis,jlegendary/scikit-learn,Jimmy-Morzaria/scikit-learn,mojoboss/scikit-learn,samzhang111/scikit-learn,hainm/scikit-learn,simon-pepin/scikit-learn,ishanic/scikit-learn,devanshdalal/scikit-learn,aflaxman/scikit-learn,huzq/scikit-learn,yunfeilu/scikit-learn,moutai/scikit-learn,rrohan/scikit-learn,henridwyer/scikit-learn,alexsavio/scikit-learn,lbishal/scikit-learn,Clyde-fare/scikit-learn,cauchycui/scikit-learn,xiaoxiamii/scikit-learn,mugizico/scikit-learn,RayMick/scikit-learn,bthirion/scikit-learn,murali-munna/scikit-learn,jpautom/scikit-learn,hitszxp/scikit-learn,bhargav/scikit-learn,moutai/scikit-learn,hsiaoyi0504/scikit-learn,pv/scikit-learn,hsuantien/scikit-learn,jlegendary/scikit-learn,zaxtax/scikit-learn,macks22/scikit-learn,abhishekkrthakur/scikit-learn,marcocaccin/scikit-learn,RPGOne/scikit-learn,pnedunuri/scikit-learn,jm-begon/scikit-learn,larsmans/scikit-learn,billy-inn/scikit-learn,MatthieuBizien/scikit-learn,massmutual/scikit-learn,cl4rke/scikit-learn,tosolveit/scikit-learn,LiaoPan/scikit-learn,zorojean/scikit-learn,shangwuhencc/scikit-learn,vshtanko/scikit-learn,rohanp/scikit-learn,zorroblue/scikit-learn,hlin117/scikit-learn,ogrisel/scikit-learn,pianomania/scikit-learn,Garrett-R/scikit-learn,spallavolu/scikit-learn,B3AU/waveTree,PatrickChrist/scikit-learn,mhdella/scikit-learn,fredhusser/scikit-learn,ngoix/OCRF,TomDLT/scikit-learn,ilo10/scikit-learn,arjoly/scikit-learn,thientu/scikit-learn,jorge2703/scikit-learn,amueller/scikit-learn,aabadie/scikit-learn,jkarnows/scikit-learn,kagayakidan/scikit-learn,xuewei4d/scikit-learn,aminert/scikit-learn,olologin/scikit-learn,liangz0707/scikit-learn,nelson-liu/scikit-learn,shenzebang/scikit-learn,victorbergelin/scikit-learn,beepee14/scikit-learn,shusenl/scikit-learn,ssaeger/scikit-learn,abimannans/scikit-learn,3manuek/scikit-learn,yask123/scikit-learn,pkruskal/scikit-learn,jmetzen/scikit-learn,poryfly/scikit-learn,cwu2011/scikit-learn,quheng/scikit-learn,r-mart/scikit-learn,cwu2011/scikit-learn,Fireblend/scikit-learn,nhejazi/scikit-learn,mlyundin/scikit-learn,cwu2011/scikit-learn,vybstat/scikit-learn,Garrett-R/scikit-learn,qifeigit/scikit-learn,mhue/scikit-learn,mjudsp/Tsallis,Obus/scikit-learn,IshankGulati/scikit-learn,fabianp/scikit-learn,anirudhjayaraman/scikit-learn,aetilley/scikit-learn,CforED/Machine-Learning,vibhorag/scikit-learn,JPFrancoia/scikit-learn,vermouthmjl/scikit-learn,0x0all/scikit-learn,robin-lai/scikit-learn,mhdella/scikit-learn,jzt5132/scikit-learn,0asa/scikit-learn,scikit-learn/scikit-learn,hugobowne/scikit-learn,yonglehou/scikit-learn,arabenjamin/scikit-learn,arjoly/scikit-learn,zorojean/scikit-learn,Fireblend/scikit-learn,jjx02230808/project0223,henrykironde/scikit-learn,khkaminska/scikit-learn,sergeyf/scikit-learn,tosolveit/scikit-learn,CforED/Machine-Learning,DSLituiev/scikit-learn,ominux/scikit-learn,vortex-ape/scikit-learn,wanggang3333/scikit-learn,fyffyt/scikit-learn,schets/scikit-learn,rohanp/scikit-learn,espg/scikit-learn,shusenl/scikit-learn,saiwing-yeung/scikit-learn,AnasGhrab/scikit-learn,Nyker510/scikit-learn,AIML/scikit-learn,fzalkow/scikit-learn,DonBeo/scikit-learn,liangz0707/scikit-learn,shenzebang/scikit-learn,kmike/scikit-learn,xwolf12/scikit-learn,NelisVerhoef/scikit-learn,jpautom/scikit-learn,gclenaghan/scikit-learn,mjgrav2001/scikit-learn,mattgiguere/scikit-learn,loli/semisupervisedforests,yask123/scikit-learn,dsullivan7/scikit-learn,mxjl620/scikit-learn,Vimos/scikit-learn,jakirkham/scikit-learn,lenovor/scikit-learn,shenzebang/scikit-learn,r-mart/scikit-learn,xavierwu/scikit-learn,nrhine1/scikit-learn,ltiao/scikit-learn,clemkoa/scikit-learn,glemaitre/scikit-learn,florian-f/sklearn,fbagirov/scikit-learn,madjelan/scikit-learn,schets/scikit-learn,simon-pepin/scikit-learn,aewhatley/scikit-learn,kagayakidan/scikit-learn,marcocaccin/scikit-learn,HolgerPeters/scikit-learn,JeanKossaifi/scikit-learn,luo66/scikit-learn,ClimbsRocks/scikit-learn,evgchz/scikit-learn,UNR-AERIAL/scikit-learn,BiaDarkia/scikit-learn,ivannz/scikit-learn,clemkoa/scikit-learn,frank-tancf/scikit-learn,zihua/scikit-learn,davidgbe/scikit-learn,trungnt13/scikit-learn,gotomypc/scikit-learn,fabianp/scikit-learn,fredhusser/scikit-learn,466152112/scikit-learn,rahul-c1/scikit-learn,scikit-learn/scikit-learn,Srisai85/scikit-learn,MartinDelzant/scikit-learn,mxjl620/scikit-learn,jkarnows/scikit-learn,nelson-liu/scikit-learn,rajat1994/scikit-learn,sanketloke/scikit-learn,dingocuster/scikit-learn,NunoEdgarGub1/scikit-learn,chrisburr/scikit-learn,nvoron23/scikit-learn,themrmax/scikit-learn,michigraber/scikit-learn,andaag/scikit-learn,yonglehou/scikit-learn,kagayakidan/scikit-learn,Lawrence-Liu/scikit-learn,iismd17/scikit-learn,vermouthmjl/scikit-learn,466152112/scikit-learn,vortex-ape/scikit-learn,maheshakya/scikit-learn,btabibian/scikit-learn,tomlof/scikit-learn,icdishb/scikit-learn,jorik041/scikit-learn,joshloyal/scikit-learn,MechCoder/scikit-learn,wazeerzulfikar/scikit-learn,frank-tancf/scikit-learn,devanshdalal/scikit-learn,aflaxman/scikit-learn,cl4rke/scikit-learn,LiaoPan/scikit-learn,liberatorqjw/scikit-learn,manashmndl/scikit-learn,clemkoa/scikit-learn,deepesch/scikit-learn,terkkila/scikit-learn,etkirsch/scikit-learn,MatthieuBizien/scikit-learn,billy-inn/scikit-learn,abhishekkrthakur/scikit-learn,Adai0808/scikit-learn,moutai/scikit-learn,procoder317/scikit-learn,rahuldhote/scikit-learn,jm-begon/scikit-learn,xyguo/scikit-learn,ahoyosid/scikit-learn,ominux/scikit-learn,loli/sklearn-ensembletrees,fengzhyuan/scikit-learn,Srisai85/scikit-learn,waterponey/scikit-learn,devanshdalal/scikit-learn,jblackburne/scikit-learn,zhenv5/scikit-learn,toastedcornflakes/scikit-learn,JPFrancoia/scikit-learn,sumspr/scikit-learn,Aasmi/scikit-learn,herilalaina/scikit-learn,poryfly/scikit-learn,xavierwu/scikit-learn,PrashntS/scikit-learn,PatrickChrist/scikit-learn,henridwyer/scikit-learn,vigilv/scikit-learn,DonBeo/scikit-learn,bigdataelephants/scikit-learn,trungnt13/scikit-learn,russel1237/scikit-learn,IshankGulati/scikit-learn,liangz0707/scikit-learn,q1ang/scikit-learn,henrykironde/scikit-learn,Adai0808/scikit-learn,schets/scikit-learn,dhruv13J/scikit-learn,fyffyt/scikit-learn,nrhine1/scikit-learn,ningchi/scikit-learn,sinhrks/scikit-learn,hrjn/scikit-learn,glennq/scikit-learn,victorbergelin/scikit-learn,wlamond/scikit-learn,beepee14/scikit-learn,jereze/scikit-learn,abhishekkrthakur/scikit-learn,fredhusser/scikit-learn,samuel1208/scikit-learn,tawsifkhan/scikit-learn,spallavolu/scikit-learn,kylerbrown/scikit-learn,mhue/scikit-learn,samuel1208/scikit-learn,JsNoNo/scikit-learn,bikong2/scikit-learn,rvraghav93/scikit-learn,rahul-c1/scikit-learn,hitszxp/scikit-learn,aflaxman/scikit-learn,rajat1994/scikit-learn,harshaneelhg/scikit-learn,glouppe/scikit-learn,abimannans/scikit-learn,imaculate/scikit-learn,LohithBlaze/scikit-learn,DSLituiev/scikit-learn,fzalkow/scikit-learn,massmutual/scikit-learn,ZenDevelopmentSystems/scikit-learn,mrshu/scikit-learn,AnasGhrab/scikit-learn,Jimmy-Morzaria/scikit-learn,herilalaina/scikit-learn,mehdidc/scikit-learn,Djabbz/scikit-learn,pypot/scikit-learn,akionakamura/scikit-learn,kaichogami/scikit-learn,ky822/scikit-learn,xuewei4d/scikit-learn,lazywei/scikit-learn,chrsrds/scikit-learn,tosolveit/scikit-learn,joshloyal/scikit-learn,terkkila/scikit-learn,abhishekgahlot/scikit-learn,beepee14/scikit-learn,anirudhjayaraman/scikit-learn,0asa/scikit-learn,shahankhatch/scikit-learn,bthirion/scikit-learn,MartinSavc/scikit-learn,khkaminska/scikit-learn,AIML/scikit-learn,vigilv/scikit-learn,mayblue9/scikit-learn,sergeyf/scikit-learn,PrashntS/scikit-learn,untom/scikit-learn,sgenoud/scikit-learn,cdegroc/scikit-learn,bhargav/scikit-learn,thilbern/scikit-learn,rishikksh20/scikit-learn,hsiaoyi0504/scikit-learn,mattilyra/scikit-learn,abimannans/scikit-learn,rvraghav93/scikit-learn,nmayorov/scikit-learn,alexeyum/scikit-learn,bthirion/scikit-learn,cainiaocome/scikit-learn,glemaitre/scikit-learn,roxyboy/scikit-learn,arahuja/scikit-learn,h2educ/scikit-learn,sonnyhu/scikit-learn,jblackburne/scikit-learn,quheng/scikit-learn,gclenaghan/scikit-learn,mayblue9/scikit-learn,zorojean/scikit-learn,costypetrisor/scikit-learn,wazeerzulfikar/scikit-learn,RomainBrault/scikit-learn,ldirer/scikit-learn,MatthieuBizien/scikit-learn,samzhang111/scikit-learn,icdishb/scikit-learn,aetilley/scikit-learn,kylerbrown/scikit-learn,ilyes14/scikit-learn,treycausey/scikit-learn,huobaowangxi/scikit-learn,btabibian/scikit-learn,nhejazi/scikit-learn,mikebenfield/scikit-learn,yanlend/scikit-learn,lenovor/scikit-learn,tomlof/scikit-learn,B3AU/waveTree,spallavolu/scikit-learn,ivannz/scikit-learn,kylerbrown/scikit-learn,betatim/scikit-learn,sanketloke/scikit-learn,belltailjp/scikit-learn,ldirer/scikit-learn,untom/scikit-learn,mattgiguere/scikit-learn,B3AU/waveTree,nikitasingh981/scikit-learn,dsullivan7/scikit-learn,hsuantien/scikit-learn,IssamLaradji/scikit-learn,jereze/scikit-learn,phdowling/scikit-learn,rvraghav93/scikit-learn,manhhomienbienthuy/scikit-learn,abhishekgahlot/scikit-learn,kashif/scikit-learn,akionakamura/scikit-learn,eickenberg/scikit-learn,mfjb/scikit-learn,samuel1208/scikit-learn,0asa/scikit-learn,vigilv/scikit-learn,TomDLT/scikit-learn,3manuek/scikit-learn,jorik041/scikit-learn,RachitKansal/scikit-learn,cl4rke/scikit-learn,shangwuhencc/scikit-learn,fabioticconi/scikit-learn,ishanic/scikit-learn,0x0all/scikit-learn,walterreade/scikit-learn,saiwing-yeung/scikit-learn,mfjb/scikit-learn,Sentient07/scikit-learn,wzbozon/scikit-learn,jmschrei/scikit-learn,macks22/scikit-learn,PatrickOReilly/scikit-learn,CforED/Machine-Learning,fengzhyuan/scikit-learn,theoryno3/scikit-learn,olologin/scikit-learn,jzt5132/scikit-learn,jaidevd/scikit-learn,lesteve/scikit-learn,altairpearl/scikit-learn,Clyde-fare/scikit-learn,MechCoder/scikit-learn,djgagne/scikit-learn,sonnyhu/scikit-learn,pythonvietnam/scikit-learn,jakobworldpeace/scikit-learn,toastedcornflakes/scikit-learn,cybernet14/scikit-learn,AlexRobson/scikit-learn,sonnyhu/scikit-learn,jmetzen/scikit-learn,larsmans/scikit-learn,costypetrisor/scikit-learn,liangz0707/scikit-learn,dsullivan7/scikit-learn,yask123/scikit-learn,btabibian/scikit-learn,ChanderG/scikit-learn,aabadie/scikit-learn,liberatorqjw/scikit-learn,abimannans/scikit-learn,bigdataelephants/scikit-learn,RachitKansal/scikit-learn,cainiaocome/scikit-learn,alvarofierroclavero/scikit-learn,ilo10/scikit-learn,Adai0808/scikit-learn,arahuja/scikit-learn,lazywei/scikit-learn,cybernet14/scikit-learn,zihua/scikit-learn,Aasmi/scikit-learn,r-mart/scikit-learn,loli/semisupervisedforests,treycausey/scikit-learn,elkingtonmcb/scikit-learn,IndraVikas/scikit-learn,vshtanko/scikit-learn,pompiduskus/scikit-learn,ogrisel/scikit-learn,hugobowne/scikit-learn,iismd17/scikit-learn,BiaDarkia/scikit-learn,rvraghav93/scikit-learn,kmike/scikit-learn,Lawrence-Liu/scikit-learn,zaxtax/scikit-learn,rrohan/scikit-learn,anurag313/scikit-learn,xiaoxiamii/scikit-learn,quheng/scikit-learn,Windy-Ground/scikit-learn,raghavrv/scikit-learn,betatim/scikit-learn,IndraVikas/scikit-learn,nomadcube/scikit-learn,rishikksh20/scikit-learn,Garrett-R/scikit-learn,JosmanPS/scikit-learn,andrewnc/scikit-learn,evgchz/scikit-learn,liyu1990/sklearn,vermouthmjl/scikit-learn,kevin-intel/scikit-learn,ChanChiChoi/scikit-learn,pv/scikit-learn,nhejazi/scikit-learn,RayMick/scikit-learn,rahuldhote/scikit-learn,sinhrks/scikit-learn,lenovor/scikit-learn,xyguo/scikit-learn,loli/semisupervisedforests,f3r/scikit-learn,zuku1985/scikit-learn,NunoEdgarGub1/scikit-learn,andrewnc/scikit-learn,ChanderG/scikit-learn,khkaminska/scikit-learn,yyjiang/scikit-learn,yanlend/scikit-learn,pkruskal/scikit-learn,sanketloke/scikit-learn,rsivapr/scikit-learn,AlexandreAbraham/scikit-learn,walterreade/scikit-learn,pythonvietnam/scikit-learn,fabianp/scikit-learn,pnedunuri/scikit-learn,trankmichael/scikit-learn,nikitasingh981/scikit-learn,eg-zhang/scikit-learn,wlamond/scikit-learn,jakirkham/scikit-learn,MartinDelzant/scikit-learn,wlamond/scikit-learn,mwv/scikit-learn,RomainBrault/scikit-learn,f3r/scikit-learn,JeanKossaifi/scikit-learn,maheshakya/scikit-learn,vortex-ape/scikit-learn,mugizico/scikit-learn,yunfeilu/scikit-learn,florian-f/sklearn,adamgreenhall/scikit-learn,NelisVerhoef/scikit-learn,toastedcornflakes/scikit-learn,potash/scikit-learn,Akshay0724/scikit-learn,huzq/scikit-learn,meduz/scikit-learn,wzbozon/scikit-learn,aewhatley/scikit-learn,etkirsch/scikit-learn,luo66/scikit-learn,adamgreenhall/scikit-learn,olologin/scikit-learn,untom/scikit-learn,mrshu/scikit-learn,murali-munna/scikit-learn,NelisVerhoef/scikit-learn,MohammedWasim/scikit-learn,sarahgrogan/scikit-learn,nomadcube/scikit-learn,shyamalschandra/scikit-learn,iismd17/scikit-learn,YinongLong/scikit-learn,eg-zhang/scikit-learn,glennq/scikit-learn,ningchi/scikit-learn,AnasGhrab/scikit-learn,phdowling/scikit-learn,joshloyal/scikit-learn,AlexanderFabisch/scikit-learn,mblondel/scikit-learn,maheshakya/scikit-learn,bnaul/scikit-learn,fengzhyuan/scikit-learn,Sentient07/scikit-learn,shikhardb/scikit-learn,ltiao/scikit-learn,q1ang/scikit-learn,shikhardb/scikit-learn,pythonvietnam/scikit-learn,NunoEdgarGub1/scikit-learn,chrisburr/scikit-learn,bikong2/scikit-learn,djgagne/scikit-learn,depet/scikit-learn,murali-munna/scikit-learn,adamgreenhall/scikit-learn,alexsavio/scikit-learn,mlyundin/scikit-learn,luo66/scikit-learn,ycaihua/scikit-learn,MartinSavc/scikit-learn,nesterione/scikit-learn,robin-lai/scikit-learn,ldirer/scikit-learn,jorik041/scikit-learn,AlexRobson/scikit-learn,plissonf/scikit-learn,ishanic/scikit-learn,shahankhatch/scikit-learn,petosegan/scikit-learn,RomainBrault/scikit-learn,hdmetor/scikit-learn,vivekmishra1991/scikit-learn,raghavrv/scikit-learn,q1ang/scikit-learn,rsivapr/scikit-learn,JsNoNo/scikit-learn,mfjb/scikit-learn,potash/scikit-learn,procoder317/scikit-learn,0x0all/scikit-learn,sumspr/scikit-learn,zuku1985/scikit-learn,lazywei/scikit-learn,saiwing-yeung/scikit-learn,arabenjamin/scikit-learn,manashmndl/scikit-learn,hrjn/scikit-learn,pypot/scikit-learn,cdegroc/scikit-learn,yonglehou/scikit-learn,Myasuka/scikit-learn,anurag313/scikit-learn,henrykironde/scikit-learn,joshloyal/scikit-learn,hitszxp/scikit-learn,carrillo/scikit-learn,Garrett-R/scikit-learn,amueller/scikit-learn,fabioticconi/scikit-learn,lucidfrontier45/scikit-learn,dingocuster/scikit-learn,scikit-learn/scikit-learn,smartscheduling/scikit-learn-categorical-tree,ngoix/OCRF,altairpearl/scikit-learn,pkruskal/scikit-learn,saiwing-yeung/scikit-learn,maheshakya/scikit-learn,mxjl620/scikit-learn,alvarofierroclavero/scikit-learn,shikhardb/scikit-learn,kevin-intel/scikit-learn,RachitKansal/scikit-learn,cybernet14/scikit-learn,jakobworldpeace/scikit-learn,mojoboss/scikit-learn,YinongLong/scikit-learn,rrohan/scikit-learn,LohithBlaze/scikit-learn,B3AU/waveTree,robbymeals/scikit-learn,ycaihua/scikit-learn,AlexRobson/scikit-learn,mikebenfield/scikit-learn,LiaoPan/scikit-learn,ndingwall/scikit-learn,OshynSong/scikit-learn,voxlol/scikit-learn,RPGOne/scikit-learn,vshtanko/scikit-learn,kjung/scikit-learn,AlexandreAbraham/scikit-learn,giorgiop/scikit-learn,zhenv5/scikit-learn,mojoboss/scikit-learn,eickenberg/scikit-learn,arjoly/scikit-learn,heli522/scikit-learn,ngoix/OCRF,LohithBlaze/scikit-learn,hitszxp/scikit-learn,sumspr/scikit-learn,equialgo/scikit-learn,robbymeals/scikit-learn,manhhomienbienthuy/scikit-learn,hainm/scikit-learn,harshaneelhg/scikit-learn,herilalaina/scikit-learn,ycaihua/scikit-learn,krez13/scikit-learn,Clyde-fare/scikit-learn,zuku1985/scikit-learn,fyffyt/scikit-learn,bikong2/scikit-learn,liyu1990/sklearn,Achuth17/scikit-learn,robin-lai/scikit-learn,CVML/scikit-learn,kaichogami/scikit-learn,pythonvietnam/scikit-learn,joernhees/scikit-learn,nesterione/scikit-learn,madjelan/scikit-learn,jjx02230808/project0223,mrshu/scikit-learn,ningchi/scikit-learn,siutanwong/scikit-learn,macks22/scikit-learn,poryfly/scikit-learn,pompiduskus/scikit-learn,sinhrks/scikit-learn,xzh86/scikit-learn,meduz/scikit-learn,davidgbe/scikit-learn,ndingwall/scikit-learn,billy-inn/scikit-learn,jakobworldpeace/scikit-learn,cauchycui/scikit-learn,smartscheduling/scikit-learn-categorical-tree,plissonf/scikit-learn,vermouthmjl/scikit-learn,AlexandreAbraham/scikit-learn,ElDeveloper/scikit-learn,Windy-Ground/scikit-learn,djgagne/scikit-learn,ogrisel/scikit-learn,carrillo/scikit-learn,equialgo/scikit-learn,davidgbe/scikit-learn,mhue/scikit-learn,h2educ/scikit-learn,aminert/scikit-learn,arahuja/scikit-learn,Sentient07/scikit-learn,altairpearl/scikit-learn,lesteve/scikit-learn,arabenjamin/scikit-learn,tdhopper/scikit-learn,shyamalschandra/scikit-learn,mwv/scikit-learn,466152112/scikit-learn,liyu1990/sklearn,ndingwall/scikit-learn,nmayorov/scikit-learn,carrillo/scikit-learn,alvarofierroclavero/scikit-learn,Djabbz/scikit-learn,espg/scikit-learn,jorge2703/scikit-learn,dsquareindia/scikit-learn,lin-credible/scikit-learn,ishanic/scikit-learn,hugobowne/scikit-learn,appapantula/scikit-learn,michigraber/scikit-learn,lin-credible/scikit-learn,rishikksh20/scikit-learn,jayflo/scikit-learn,mlyundin/scikit-learn,mblondel/scikit-learn,ilyes14/scikit-learn,themrmax/scikit-learn,etkirsch/scikit-learn,themrmax/scikit-learn,pv/scikit-learn,jzt5132/scikit-learn,walterreade/scikit-learn,russel1237/scikit-learn,alexeyum/scikit-learn,Titan-C/scikit-learn,mayblue9/scikit-learn,petosegan/scikit-learn,ssaeger/scikit-learn,ClimbsRocks/scikit-learn,mattilyra/scikit-learn,phdowling/scikit-learn,NunoEdgarGub1/scikit-learn,jm-begon/scikit-learn,larsmans/scikit-learn,anurag313/scikit-learn,zuku1985/scikit-learn,jseabold/scikit-learn,depet/scikit-learn,UNR-AERIAL/scikit-learn,larsmans/scikit-learn,michigraber/scikit-learn,petosegan/scikit-learn,Titan-C/scikit-learn,rahuldhote/scikit-learn,florian-f/sklearn,mattilyra/scikit-learn,OshynSong/scikit-learn,theoryno3/scikit-learn,f3r/scikit-learn,mblondel/scikit-learn,terkkila/scikit-learn,mhdella/scikit-learn,fabioticconi/scikit-learn,imaculate/scikit-learn,mjgrav2001/scikit-learn,DonBeo/scikit-learn,frank-tancf/scikit-learn,jaidevd/scikit-learn,rexshihaoren/scikit-learn,yanlend/scikit-learn,xwolf12/scikit-learn,ephes/scikit-learn,bigdataelephants/scikit-learn,elkingtonmcb/scikit-learn,ky822/scikit-learn,deepesch/scikit-learn,MechCoder/scikit-learn,3manuek/scikit-learn,bnaul/scikit-learn,untom/scikit-learn,jseabold/scikit-learn,hdmetor/scikit-learn,ycaihua/scikit-learn,tomlof/scikit-learn,evgchz/scikit-learn,shusenl/scikit-learn,jayflo/scikit-learn,rsivapr/scikit-learn,Barmaley-exe/scikit-learn,vybstat/scikit-learn,JeanKossaifi/scikit-learn,pompiduskus/scikit-learn,kylerbrown/scikit-learn,nmayorov/scikit-learn,sanketloke/scikit-learn,cwu2011/scikit-learn,siutanwong/scikit-learn,themrmax/scikit-learn,krez13/scikit-learn,ZENGXH/scikit-learn,idlead/scikit-learn,treycausey/scikit-learn,xubenben/scikit-learn,RomainBrault/scikit-learn,siutanwong/scikit-learn,glouppe/scikit-learn,ilyes14/scikit-learn,pratapvardhan/scikit-learn,AlexandreAbraham/scikit-learn,sergeyf/scikit-learn,liyu1990/sklearn,fredhusser/scikit-learn,AlexanderFabisch/scikit-learn,fengzhyuan/scikit-learn,liberatorqjw/scikit-learn,mikebenfield/scikit-learn,tmhm/scikit-learn,dingocuster/scikit-learn,sarahgrogan/scikit-learn,glennq/scikit-learn,bhargav/scikit-learn,IssamLaradji/scikit-learn,maheshakya/scikit-learn,stylianos-kampakis/scikit-learn,chrsrds/scikit-learn,nomadcube/scikit-learn,henridwyer/scikit-learn,clemkoa/scikit-learn,mehdidc/scikit-learn,phdowling/scikit-learn,devanshdalal/scikit-learn,mugizico/scikit-learn,vinayak-mehta/scikit-learn,hitszxp/scikit-learn,B3AU/waveTree,jlegendary/scikit-learn,giorgiop/scikit-learn,gclenaghan/scikit-learn,kaichogami/scikit-learn,loli/sklearn-ensembletrees,PatrickChrist/scikit-learn,0asa/scikit-learn,jakirkham/scikit-learn,tdhopper/scikit-learn,mxjl620/scikit-learn,jayflo/scikit-learn,aminert/scikit-learn,fbagirov/scikit-learn,ChanderG/scikit-learn,yunfeilu/scikit-learn,vybstat/scikit-learn,ahoyosid/scikit-learn,idlead/scikit-learn,jzt5132/scikit-learn,jkarnows/scikit-learn,CforED/Machine-Learning,liberatorqjw/scikit-learn,Fireblend/scikit-learn,trungnt13/scikit-learn,0x0all/scikit-learn,jmschrei/scikit-learn,ankurankan/scikit-learn,carrillo/scikit-learn,ilo10/scikit-learn,rahul-c1/scikit-learn,Djabbz/scikit-learn,sonnyhu/scikit-learn,ashhher3/scikit-learn,hrjn/scikit-learn,xyguo/scikit-learn,andaag/scikit-learn,bnaul/scikit-learn,Vimos/scikit-learn,pompiduskus/scikit-learn,MartinSavc/scikit-learn,anirudhjayaraman/scikit-learn,kevin-intel/scikit-learn,sgenoud/scikit-learn,belltailjp/scikit-learn,yyjiang/scikit-learn,wlamond/scikit-learn,Barmaley-exe/scikit-learn,fzalkow/scikit-learn,shahankhatch/scikit-learn,altairpearl/scikit-learn,treycausey/scikit-learn,loli/sklearn-ensembletrees,gotomypc/scikit-learn,LohithBlaze/scikit-learn,jjx02230808/project0223,glemaitre/scikit-learn,deepesch/scikit-learn,chrisburr/scikit-learn,vortex-ape/scikit-learn,NelisVerhoef/scikit-learn,Srisai85/scikit-learn,ngoix/OCRF,Jimmy-Morzaria/scikit-learn,russel1237/scikit-learn,samuel1208/scikit-learn,JosmanPS/scikit-learn,LiaoPan/scikit-learn,PatrickOReilly/scikit-learn,glemaitre/scikit-learn,IndraVikas/scikit-learn,hlin117/scikit-learn,espg/scikit-learn,betatim/scikit-learn,ivannz/scikit-learn,cainiaocome/scikit-learn,justincassidy/scikit-learn,vivekmishra1991/scikit-learn,ClimbsRocks/scikit-learn,heli522/scikit-learn,andrewnc/scikit-learn,jaidevd/scikit-learn,simon-pepin/scikit-learn,mehdidc/scikit-learn,zihua/scikit-learn,krez13/scikit-learn,JosmanPS/scikit-learn,hlin117/scikit-learn,eickenberg/scikit-learn,mrshu/scikit-learn,mjudsp/Tsallis,bnaul/scikit-learn,HolgerPeters/scikit-learn,mojoboss/scikit-learn,r-mart/scikit-learn,mfjb/scikit-learn,samzhang111/scikit-learn,ChanderG/scikit-learn,jorge2703/scikit-learn,rajat1994/scikit-learn,mhue/scikit-learn,plissonf/scikit-learn,ashhher3/scikit-learn,fabianp/scikit-learn,idlead/scikit-learn,jmetzen/scikit-learn,lucidfrontier45/scikit-learn,hdmetor/scikit-learn,pianomania/scikit-learn,Aasmi/scikit-learn,joernhees/scikit-learn,mugizico/scikit-learn,eg-zhang/scikit-learn,xubenben/scikit-learn,ky822/scikit-learn,depet/scikit-learn,ZenDevelopmentSystems/scikit-learn,zorojean/scikit-learn,MechCoder/scikit-learn,trankmichael/scikit-learn,Djabbz/scikit-learn,waterponey/scikit-learn,chrsrds/scikit-learn,pnedunuri/scikit-learn,HolgerPeters/scikit-learn,JeanKossaifi/scikit-learn,ashhher3/scikit-learn,vivekmishra1991/scikit-learn,manhhomienbienthuy/scikit-learn,dsquareindia/scikit-learn,loli/sklearn-ensembletrees,alvarofierroclavero/scikit-learn,ElDeveloper/scikit-learn,nomadcube/scikit-learn,CVML/scikit-learn,ndingwall/scikit-learn,lbishal/scikit-learn,xiaoxiamii/scikit-learn,tawsifkhan/scikit-learn,giorgiop/scikit-learn,rexshihaoren/scikit-learn,vibhorag/scikit-learn,AIML/scikit-learn,zhenv5/scikit-learn,DSLituiev/scikit-learn,xyguo/scikit-learn,abhishekgahlot/scikit-learn,PatrickChrist/scikit-learn,Achuth17/scikit-learn,manhhomienbienthuy/scikit-learn,mjudsp/Tsallis,mayblue9/scikit-learn,ephes/scikit-learn,lucidfrontier45/scikit-learn,Obus/scikit-learn,ilyes14/scikit-learn,evgchz/scikit-learn,ephes/scikit-learn,wanggang3333/scikit-learn,scikit-learn/scikit-learn,nhejazi/scikit-learn,tdhopper/scikit-learn,xuewei4d/scikit-learn,kjung/scikit-learn,Clyde-fare/scikit-learn,plissonf/scikit-learn,vybstat/scikit-learn,TomDLT/scikit-learn,mhdella/scikit-learn,potash/scikit-learn,aabadie/scikit-learn,walterreade/scikit-learn,UNR-AERIAL/scikit-learn,JPFrancoia/scikit-learn,imaculate/scikit-learn,ltiao/scikit-learn,ZENGXH/scikit-learn,dsquareindia/scikit-learn,jmetzen/scikit-learn,raghavrv/scikit-learn,manashmndl/scikit-learn,jjx02230808/project0223,kjung/scikit-learn,aabadie/scikit-learn,arjoly/scikit-learn,imaculate/scikit-learn,procoder317/scikit-learn,shangwuhencc/scikit-learn,heli522/scikit-learn,gotomypc/scikit-learn,rahuldhote/scikit-learn,siutanwong/scikit-learn,MohammedWasim/scikit-learn,glouppe/scikit-learn,smartscheduling/scikit-learn-categorical-tree,aminert/scikit-learn,rexshihaoren/scikit-learn,glennq/scikit-learn,voxlol/scikit-learn,AlexanderFabisch/scikit-learn,huobaowangxi/scikit-learn,kevin-intel/scikit-learn,thientu/scikit-learn,treycausey/scikit-learn,xubenben/scikit-learn,ZENGXH/scikit-learn,elkingtonmcb/scikit-learn,RayMick/scikit-learn,zorroblue/scikit-learn,tmhm/scikit-learn,pypot/scikit-learn,alexeyum/scikit-learn,theoryno3/scikit-learn,jpautom/scikit-learn,PatrickOReilly/scikit-learn,cauchycui/scikit-learn,TomDLT/scikit-learn,MatthieuBizien/scikit-learn,jereze/scikit-learn,3manuek/scikit-learn,lin-credible/scikit-learn,xzh86/scikit-learn,jorge2703/scikit-learn,MohammedWasim/scikit-learn,kashif/scikit-learn,jblackburne/scikit-learn,Obus/scikit-learn,Nyker510/scikit-learn,Akshay0724/scikit-learn,ningchi/scikit-learn,jseabold/scikit-learn,akionakamura/scikit-learn,dsullivan7/scikit-learn,AlexRobson/scikit-learn,spallavolu/scikit-learn,PatrickOReilly/scikit-learn,lenovor/scikit-learn,hsuantien/scikit-learn,ankurankan/scikit-learn,jkarnows/scikit-learn,djgagne/scikit-learn,xiaoxiamii/scikit-learn,potash/scikit-learn,ycaihua/scikit-learn,IssamLaradji/scikit-learn,justincassidy/scikit-learn,herilalaina/scikit-learn,YinongLong/scikit-learn,Sentient07/scikit-learn,Garrett-R/scikit-learn,olologin/scikit-learn,giorgiop/scikit-learn,fzalkow/scikit-learn,kmike/scikit-learn,voxlol/scikit-learn,wzbozon/scikit-learn,lucidfrontier45/scikit-learn,YinongLong/scikit-learn,zaxtax/scikit-learn,Jimmy-Morzaria/scikit-learn,huobaowangxi/scikit-learn,thientu/scikit-learn,ssaeger/scikit-learn,nvoron23/scikit-learn,bthirion/scikit-learn,mehdidc/scikit-learn,massmutual/scikit-learn,ElDeveloper/scikit-learn,arahuja/scikit-learn,f3r/scikit-learn,eickenberg/scikit-learn,robbymeals/scikit-learn,Myasuka/scikit-learn,mwv/scikit-learn,hainm/scikit-learn,sarahgrogan/scikit-learn,anntzer/scikit-learn,rohanp/scikit-learn,hsiaoyi0504/scikit-learn,voxlol/scikit-learn,Titan-C/scikit-learn,ivannz/scikit-learn,gclenaghan/scikit-learn,Obus/scikit-learn,CVML/scikit-learn,ChanChiChoi/scikit-learn,anirudhjayaraman/scikit-learn,ephes/scikit-learn,jaidevd/scikit-learn,AnasGhrab/scikit-learn,mattgiguere/scikit-learn,DonBeo/scikit-learn,smartscheduling/scikit-learn-categorical-tree,jseabold/scikit-learn,appapantula/scikit-learn,terkkila/scikit-learn,sgenoud/scikit-learn,thilbern/scikit-learn,justincassidy/scikit-learn,lbishal/scikit-learn,tawsifkhan/scikit-learn,jlegendary/scikit-learn,Vimos/scikit-learn,bigdataelephants/scikit-learn,wazeerzulfikar/scikit-learn,ClimbsRocks/scikit-learn,jakirkham/scikit-learn,cdegroc/scikit-learn,mblondel/scikit-learn,mjgrav2001/scikit-learn,IshankGulati/scikit-learn,nikitasingh981/scikit-learn,victorbergelin/scikit-learn,ominux/scikit-learn,pv/scikit-learn,madjelan/scikit-learn,ashhher3/scikit-learn,pratapvardhan/scikit-learn,pypot/scikit-learn,zorroblue/scikit-learn,Barmaley-exe/scikit-learn,vinayak-mehta/scikit-learn,q1ang/scikit-learn,shusenl/scikit-learn,schets/scikit-learn,etkirsch/scikit-learn,mrshu/scikit-learn,harshaneelhg/scikit-learn,alexsavio/scikit-learn,JsNoNo/scikit-learn,stylianos-kampakis/scikit-learn,ankurankan/scikit-learn,UNR-AERIAL/scikit-learn,hrjn/scikit-learn,aewhatley/scikit-learn,cauchycui/scikit-learn,AlexanderFabisch/scikit-learn,amueller/scikit-learn,cl4rke/scikit-learn,kagayakidan/scikit-learn,abhishekgahlot/scikit-learn,hdmetor/scikit-learn,nvoron23/scikit-learn,thilbern/scikit-learn,vinayak-mehta/scikit-learn,beepee14/scikit-learn,tmhm/scikit-learn,shikhardb/scikit-learn,hsiaoyi0504/scikit-learn,zihua/scikit-learn,thientu/scikit-learn,Windy-Ground/scikit-learn,kmike/scikit-learn,pianomania/scikit-learn,wanggang3333/scikit-learn,ChanChiChoi/scikit-learn,0x0all/scikit-learn,stylianos-kampakis/scikit-learn,jmschrei/scikit-learn,shyamalschandra/scikit-learn,dhruv13J/scikit-learn,theoryno3/scikit-learn,ahoyosid/scikit-learn,zhenv5/scikit-learn,RPGOne/scikit-learn,qifeigit/scikit-learn,DSLituiev/scikit-learn,henrykironde/scikit-learn,MartinDelzant/scikit-learn,petosegan/scikit-learn,rsivapr/scikit-learn,manashmndl/scikit-learn,xubenben/scikit-learn,xwolf12/scikit-learn,zorroblue/scikit-learn,khkaminska/scikit-learn,rohanp/scikit-learn,IssamLaradji/scikit-learn,h2educ/scikit-learn,BiaDarkia/scikit-learn,mattgiguere/scikit-learn,simon-pepin/scikit-learn,nvoron23/scikit-learn,OshynSong/scikit-learn,btabibian/scikit-learn,meduz/scikit-learn,alexsavio/scikit-learn,waterponey/scikit-learn,espg/scikit-learn,nrhine1/scikit-learn,betatim/scikit-learn,Aasmi/scikit-learn,frank-tancf/scikit-learn,huzq/scikit-learn,abhishekgahlot/scikit-learn,fbagirov/scikit-learn,Achuth17/scikit-learn,Vimos/scikit-learn,aetilley/scikit-learn,ltiao/scikit-learn,trungnt13/scikit-learn,iismd17/scikit-learn,justincassidy/scikit-learn,icdishb/scikit-learn,adamgreenhall/scikit-learn,nmayorov/scikit-learn,MartinSavc/scikit-learn,Nyker510/scikit-learn,cainiaocome/scikit-learn,BiaDarkia/scikit-learn,robbymeals/scikit-learn,jm-begon/scikit-learn,sumspr/scikit-learn,costypetrisor/scikit-learn,lucidfrontier45/scikit-learn,meduz/scikit-learn,anntzer/scikit-learn,eickenberg/scikit-learn,ssaeger/scikit-learn,ngoix/OCRF,anntzer/scikit-learn,cybernet14/scikit-learn,hsuantien/scikit-learn,kashif/scikit-learn,jpautom/scikit-learn,kashif/scikit-learn,florian-f/sklearn,vshtanko/scikit-learn,Achuth17/scikit-learn,sinhrks/scikit-learn,mattilyra/scikit-learn,rrohan/scikit-learn,HolgerPeters/scikit-learn,Adai0808/scikit-learn,PrashntS/scikit-learn,Lawrence-Liu/scikit-learn,hlin117/scikit-learn,andaag/scikit-learn,JsNoNo/scikit-learn,jayflo/scikit-learn,marcocaccin/scikit-learn,huzq/scikit-learn,trankmichael/scikit-learn,xavierwu/scikit-learn,aetilley/scikit-learn,jorik041/scikit-learn,mikebenfield/scikit-learn,RachitKansal/scikit-learn,glouppe/scikit-learn,vibhorag/scikit-learn,appapantula/scikit-learn,rexshihaoren/scikit-learn,xavierwu/scikit-learn,trankmichael/scikit-learn,PrashntS/scikit-learn,ZenDevelopmentSystems/scikit-learn,wzbozon/scikit-learn,andaag/scikit-learn,sgenoud/scikit-learn,equialgo/scikit-learn,Nyker510/scikit-learn,robin-lai/scikit-learn,bhargav/scikit-learn,icdishb/scikit-learn,russel1237/scikit-learn,alexeyum/scikit-learn,ZENGXH/scikit-learn,rsivapr/scikit-learn,nrhine1/scikit-learn,madjelan/scikit-learn,equialgo/scikit-learn,lesteve/scikit-learn,MohammedWasim/scikit-learn,fabioticconi/scikit-learn,pratapvardhan/scikit-learn,amueller/scikit-learn,appapantula/scikit-learn,AIML/scikit-learn,pratapvardhan/scikit-learn,CVML/scikit-learn,vibhorag/scikit-learn,sgenoud/scikit-learn,abhishekkrthakur/scikit-learn,kaichogami/scikit-learn,tawsifkhan/scikit-learn,sarahgrogan/scikit-learn,pianomania/scikit-learn,xuewei4d/scikit-learn,xzh86/scikit-learn,depet/scikit-learn,kmike/scikit-learn,jmschrei/scikit-learn,nikitasingh981/scikit-learn,hugobowne/scikit-learn,deepesch/scikit-learn,roxyboy/scikit-learn,waterponey/scikit-learn,ngoix/OCRF,ankurankan/scikit-learn,pnedunuri/scikit-learn,JPFrancoia/scikit-learn,0asa/scikit-learn,h2educ/scikit-learn,marcocaccin/scikit-learn,lbishal/scikit-learn,MartinDelzant/scikit-learn,roxyboy/scikit-learn,Windy-Ground/scikit-learn,belltailjp/scikit-learn,nelson-liu/scikit-learn,lin-credible/scikit-learn,RayMick/scikit-learn,ldirer/scikit-learn,OshynSong/scikit-learn,vigilv/scikit-learn,jereze/scikit-learn,ankurankan/scikit-learn,huobaowangxi/scikit-learn,stylianos-kampakis/scikit-learn,IshankGulati/scikit-learn,Akshay0724/scikit-learn,mjudsp/Tsallis,eg-zhang/scikit-learn,joernhees/scikit-learn,massmutual/scikit-learn,henridwyer/scikit-learn,loli/sklearn-ensembletrees,nelson-liu/scikit-learn,dhruv13J/scikit-learn,ahoyosid/scikit-learn,wazeerzulfikar/scikit-learn,gotomypc/scikit-learn,shahankhatch/scikit-learn,lesteve/scikit-learn,samzhang111/scikit-learn,belltailjp/scikit-learn,toastedcornflakes/scikit-learn,elkingtonmcb/scikit-learn,zaxtax/scikit-learn,hainm/scikit-learn,depet/scikit-learn,mattilyra/scikit-learn,kjung/scikit-learn,mlyundin/scikit-learn,mjgrav2001/scikit-learn,rishikksh20/scikit-learn,Barmaley-exe/scikit-learn,Lawrence-Liu/scikit-learn,sergeyf/scikit-learn,ky822/scikit-learn,tmhm/scikit-learn,andrewnc/scikit-learn,ilo10/scikit-learn,harshaneelhg/scikit-learn,yyjiang/scikit-learn,xzh86/scikit-learn,larsmans/scikit-learn,chrsrds/scikit-learn,arabenjamin/scikit-learn,anntzer/scikit-learn,fyffyt/scikit-learn,qifeigit/scikit-learn,fbagirov/scikit-learn,chrisburr/scikit-learn,cdegroc/scikit-learn,aflaxman/scikit-learn,ZenDevelopmentSystems/scikit-learn,yonglehou/scikit-learn,lazywei/scikit-learn,dhruv13J/scikit-learn,Titan-C/scikit-learn,wanggang3333/scikit-learn,rahul-c1/scikit-learn,yyjiang/scikit-learn,Srisai85/scikit-learn,davidgbe/scikit-learn,dsquareindia/scikit-learn,pkruskal/scikit-learn,roxyboy/scikit-learn,qifeigit/scikit-learn,akionakamura/scikit-learn,yunfeilu/scikit-learn,466152112/scikit-learn | examples/plot_feature_selection.py | examples/plot_feature_selection.py | """
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM attributes small weights to these features, but these
weight are non zero. Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
import numpy as np
import pylab as pl
################################################################################
# import some data to play with
# The IRIS dataset
from scikits.learn import datasets, svm
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.normal(size=(len(iris.data), 35))
# Add the noisy data to the informative features
x = np.hstack((iris.data, E))
y = iris.target
################################################################################
pl.figure(1)
pl.clf()
x_indices = np.arange(x.shape[-1])
################################################################################
# Univariate feature selection
from scikits.learn.feature_selection import univariate_selection as univ_selection
# As a scoring function, we use a F test for classification
# We use the default selection function: the 10% most significant
# features
selector = univ_selection.SelectFpr(
score_func=univ_selection.f_classif)
selector.fit(x, y)
scores = -np.log(selector._pvalues)
scores /= scores.max()
pl.bar(x_indices-.45, scores, width=.3,
label=r'Univariate score ($-\log(p\,values)$)',
color='g')
################################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(x, y)
svm_weights = (clf.support_**2).sum(axis=0)
svm_weights /= svm_weights.max()
pl.bar(x_indices-.15, svm_weights, width=.3, label='SVM weight',
color='r')
pl.title("Comparing feature selection")
pl.xlabel('Feature number')
pl.yticks(())
pl.axis('tight')
pl.legend()
pl.show()
| """
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM attributes small weights to these features, but these
weight are non zero. Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
import numpy as np
import pylab as pl
################################################################################
# import some data to play with
# The IRIS dataset
from scikits.learn import datasets, svm
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.normal(size=(len(iris.data), 35))
# Add the noisy data to the informative features
x = np.hstack((iris.data, E))
y = iris.target
################################################################################
pl.figure(1)
pl.clf()
x_indices = np.arange(x.shape[-1])
################################################################################
# Univariate feature selection
from scikits.learn.feature_selection import univ_selection
# As a scoring function, we use a F test for classification
# We use the default selection function: the 10% most significant
# features
selector = univ_selection.UnivSelection(
score_func=univ_selection.f_classif)
selector.fit(x, y)
scores = -np.log(selector.p_values_)
scores /= scores.max()
pl.bar(x_indices-.45, scores, width=.3,
label=r'Univariate score ($-\log(p\,values)$)',
color='g')
################################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(x, y)
svm_weights = (clf.support_**2).sum(axis=0)
svm_weights /= svm_weights.max()
pl.bar(x_indices-.15, svm_weights, width=.3, label='SVM weight',
color='r')
################################################################################
# Now fit an SVM with added feature selection
selector = univ_selection.UnivSelection(
estimator=clf,
score_func=univ_selection.f_classif)
selector.fit(x, y)
svm_weights = (clf.support_**2).sum(axis=0)
svm_weights /= svm_weights.max()
full_svm_weights = np.zeros(selector.support_.shape)
full_svm_weights[selector.support_] = svm_weights
pl.bar(x_indices+.15, full_svm_weights, width=.3,
label='SVM weight after univariate selection',
color='b')
pl.title("Comparing feature selection")
pl.xlabel('Feature number')
pl.yticks(())
pl.axis('tight')
pl.legend()
pl.show()
| bsd-3-clause | Python |
1beec05941a6a34452bea6e9f60a1673c0f0925f | add base test case file | isotoma/KeenClient-Python,keenlabs/KeenClient-Python,ruleant/KeenClient-Python | keen/tests/base_test_case.py | keen/tests/base_test_case.py | __author__ = 'dkador'
| mit | Python |
|
1fa849f1a0eadad9573b677d3904986d76f900eb | Create main.py | mindm/2017Challenges,erocs/2017Challenges,popcornanachronism/2017Challenges,mindm/2017Challenges,erocs/2017Challenges,DakRomo/2017Challenges,popcornanachronism/2017Challenges,erocs/2017Challenges,popcornanachronism/2017Challenges,DakRomo/2017Challenges,popcornanachronism/2017Challenges,popcornanachronism/2017Challenges,DakRomo/2017Challenges,popcornanachronism/2017Challenges,DakRomo/2017Challenges,mindm/2017Challenges,DakRomo/2017Challenges,erocs/2017Challenges,DakRomo/2017Challenges,mindm/2017Challenges,DakRomo/2017Challenges,DakRomo/2017Challenges,erocs/2017Challenges,mindm/2017Challenges,popcornanachronism/2017Challenges,DakRomo/2017Challenges,erocs/2017Challenges,erocs/2017Challenges,mindm/2017Challenges,mindm/2017Challenges,erocs/2017Challenges,popcornanachronism/2017Challenges,erocs/2017Challenges,mindm/2017Challenges,DakRomo/2017Challenges,mindm/2017Challenges,mindm/2017Challenges,DakRomo/2017Challenges,mindm/2017Challenges,erocs/2017Challenges,popcornanachronism/2017Challenges,erocs/2017Challenges,erocs/2017Challenges,DakRomo/2017Challenges,popcornanachronism/2017Challenges,mindm/2017Challenges,popcornanachronism/2017Challenges,mindm/2017Challenges,mindm/2017Challenges,popcornanachronism/2017Challenges,popcornanachronism/2017Challenges,DakRomo/2017Challenges,erocs/2017Challenges,DakRomo/2017Challenges,erocs/2017Challenges,popcornanachronism/2017Challenges | challenge_2/python/wost/main.py | challenge_2/python/wost/main.py | """
Python 3.6:
:: Counts all the instances of all the elements in a list.
:: Returns all the instances with a count of 1.
"""
def find_one_in_list(a_list):
a_dict = {}
for char in a_list:
if char not in a_dict.keys():
a_dict[char] = 1
else:
a_dict[char] += 1
for letter in a_dict.keys():
if a_dict[letter] == 1:
print(letter, end=" ")
print()
def main():
# Returns 6, 7.
find_one_in_list([5, 4, 3, 4, 5, 6, 1, 3, 1, 7, 8, 8])
# Returns b.
find_one_in_list(["a", "b", "c", "a", "c", "W", "W"])
# Returns A, 5, r.
find_one_in_list(["A", "b", "d", "r", 4, 5, 4, "b", "d"])
# Returns nothing.
find_one_in_list([])
if __name__ == "__main__":
main()
| mit | Python |
|
ac4679b4dcbbc3b2a29230233afc138f98cf2c42 | Add the basics | wurstmineberg/python-anvil | anvil.py | anvil.py | import gzip
import io
import nbt.nbt
import pathlib
import re
import zlib
class Region:
def __init__(self, path):
if isinstance(path, str):
path = pathlib.Path(path)
with path.open('rb') as f:
data = f.read()
self.locations = data[:4096]
self.timestamps = data[4096:8192]
self.data = data[8192:]
match = re.search('r\.(-?[0-9]+)\.(-?[0-9]+)\.mca$', path.name)
if match:
self.x = int(match.group(1))
self.z = int(match.group(2))
else:
self.x = None
self.z = None
def chunk_column(self, x, z):
x_offset = x & 31
z_offset = z & 31
meta_offset = 4 * ((x_offset & 32) + (z_offset & 32) * 32)
chunk_location = self.locations[meta_offset:meta_offset + 4]
offset = chunk_location[0] * (256 ** 2) + chunk_location[1] * 256 + chunk_location[2]
if offset == 0:
return ChunkColumn(None, x=x, z=z)
else:
offset -= 2
sector_count = chunk_location[3]
return ChunkColumn(self.data[4096 * offset:4096 * (offset + sector_count)], x=x, z=z)
class ChunkColumn:
def __init__(self, data, *, x=None, z=None):
self.x = x
self.z = z
length = data[0] * (256 ** 3) + data[1] * (256 ** 2) + data[2] * 256 + data[3]
compression = data[4]
compressed_data = data[5:4 + length]
if compression == 1: # gzip
decompress = gzip.decompress
elif compression == 2: # zlib
decompress = zlib.decompress
else:
raise ValueError('Unknown compression method: {}'.format(compression))
self.data = nbt.nbt.NBTFile(buffer=io.BytesIO(decompress(compressed_data)))
| mit | Python |
|
702abe6dc661fbcda04f743edc56d2938098cefa | Add checkJSON file function only for checking a JSON file against a specified schema | jimwaldo/HarvardX-Tools,jimwaldo/HarvardX-Tools | src/main/python/convertfiles/checkJSON.py | src/main/python/convertfiles/checkJSON.py | #!/nfs/projects/c/ci3_jwaldo/MONGO/bin/python
"""
This function will check an existing JSON newline delimited file
against a specified schema
Input is a newline delimited JSON file and schema file
Output is a summary printout of statistics
Usage:
python checkJSON [-options]
OPTIONS:
--input Name of input filename (required)
--output Name of output filename
--schema Specify JSON Schema (required)
--schema-name Specify JSON Schema name within json file, if it exists
@author: G.Lopez
"""
import convertCSVtoJSON as converter
from path import path
import json
from collections import OrderedDict
import argparse
import sys
# Maintain Stats
LINE_CNT = 0
LINE_CNT_1000 = 1000
def checkJSON(inputFile, schemaFile, schemaName=None):
global LINE_CNT
# Read specified schema file
checkFormat = converter.convertCSVtoJSON()
schema_dict = checkFormat.readSchema( path(schemaFile), schemaName )
# Read JSON file
fin = open(inputFile, 'r')
for line in fin:
try:
json_rec = json.loads(line, object_pairs_hook=OrderedDict)
checkFormat.cleanJSONline(json_rec, schema_dict, applySchema=False)
checkFormat.checkIllegalKeys(json_rec, fixkeys=False)
# Print procesing Counter
LINE_CNT = LINE_CNT + 1
if LINE_CNT % LINE_CNT_1000 == 0:
sys.stdout.write("[main]: %dk Lines processed\r" % ( LINE_CNT / LINE_CNT_1000 ) )
sys.stdout.flush()
except:
print "[checkJSON]: Error parsing JSON line at line %s" % LINE_CNT
pass
checkFormat.printOtherStats()
checkFormat.calculateSchemaStats()
checkFormat.printSchemaStats()
checkFormat.calculateOverallSummary()
checkFormat.printOverallSummary()
def main():
"""
Main Program to Check Specified JSON file against Schema
"""
# Setup Command Line Options
text_help = '''usage: %prog [-options] '''
text_description = ''' Check JSON schema script '''
parser = argparse.ArgumentParser( prog='PROG',
description=text_description)
parser.add_argument("--input", type=str, help="Name of input file", required=True)
parser.add_argument("--schema", type=str, help="Specify JSON Schema", required=True)
parser.add_argument("--schema-name", type=str, help="Specify JSON Schema Name")
args = vars(parser.parse_args())
print "[main]: arguments passed => %s" % args
# Read Input File
print "[main]: Reading JSON input file %s " % args['input']
checkJSON( args['input'], args['schema'], args['schema_name'] )
if __name__ == '__main__':
main()
| bsd-3-clause | Python |
|
7330f9f1423fe7ee169569957d537441b6d72c08 | Create 0106_us_city_synonyms.py | boisvert42/npr-puzzle-python | 2019/0106_us_city_synonyms.py | 2019/0106_us_city_synonyms.py | #%%
"""
NPR 2019-01-06
https://www.npr.org/2019/01/06/682575357/sunday-puzzle-stuck-in-the-middle
Name a major U.S. city in 10 letters. If you have the right one, you can rearrange its letters to get two 5-letter words that are synonyms. What are they?
"""
import sys
sys.path.append('..')
import nprcommontools as nct
from nltk.corpus import gazetteers
#%%
COMMON_WORDS = frozenset(x for x in nct.get_common_words() if len(x) == 5)
#%%
US_CITIES = set(nct.alpha_only(x.lower()) for x in gazetteers.words('uscities.txt') if len(nct.alpha_only(x)) == 10)
city_dict = nct.make_sorted_dict(US_CITIES)
#%%
for c1 in COMMON_WORDS:
my_synonyms = nct.get_synonyms(c1)
for c2 in my_synonyms:
sort_word = nct.sort_string(''.join(c1+c2))
if sort_word in city_dict:
print(c1,c2,city_dict[sort_word])
| cc0-1.0 | Python |
|
2f08053dc04470c9a1e4802e0e90c198bb5eae63 | Update app/views/account/__init__.py | apipanda/openssl,apipanda/openssl,apipanda/openssl,apipanda/openssl | app/views/account/__init__.py | app/views/account/__init__.py | from flask import Blueprint
account = Blueprint(
'account',
__name__
)
from . import views
| mit | Python |
|
5470661c6f171f1e9da609c3bf67ece21cf6d6eb | Add example for response status code | timothycrosley/hug,timothycrosley/hug,MuhammadAlkarouri/hug,MuhammadAlkarouri/hug,MuhammadAlkarouri/hug,timothycrosley/hug | examples/return_400.py | examples/return_400.py | import hug
from falcon import HTTP_400
@hug.get()
def only_positive(positive: int, response):
if positive < 0:
response.status = HTTP_400 | mit | Python |
|
450f55f158bdec4b290851d68b8b79bd824d50f6 | Add the joystick test | Pitchless/arceye,Pitchless/arceye | bin/joy_test.py | bin/joy_test.py | #!/usr/bin/env python
from __future__ import print_function
import pygame
# Define some colors
BLACK = ( 0, 0, 0)
WHITE = ( 255, 255, 255)
# This is a simple class that will help us print to the screen
# It has nothing to do with the joysticks, just outputing the
# information.
class TextPrint:
def __init__(self):
self.reset()
self.font = pygame.font.Font(None, 20)
def print(self, screen, textString):
textBitmap = self.font.render(textString, True, BLACK)
screen.blit(textBitmap, [self.x, self.y])
self.y += self.line_height
def reset(self):
self.x = 10
self.y = 10
self.line_height = 15
def indent(self):
self.x += 10
def unindent(self):
self.x -= 10
pygame.init()
# Set the width and height of the screen [width,height]
size = [500, 700]
screen = pygame.display.set_mode(size)
pygame.display.set_caption("My Game")
#Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# Initialize the joysticks
pygame.joystick.init()
# Get ready to print
textPrint = TextPrint()
# -------- Main Program Loop -----------
while done==False:
# EVENT PROCESSING STEP
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done=True # Flag that we are done so we exit this loop
# Possible joystick actions: JOYAXISMOTION JOYBALLMOTION JOYBUTTONDOWN JOYBUTTONUP JOYHATMOTION
if event.type == pygame.JOYBUTTONDOWN:
print("Joystick button pressed.")
if event.type == pygame.JOYBUTTONUP:
print("Joystick button released.")
# DRAWING STEP
# First, clear the screen to white. Don't put other drawing commands
# above this, or they will be erased with this command.
screen.fill(WHITE)
textPrint.reset()
# Get count of joysticks
joystick_count = pygame.joystick.get_count()
textPrint.print(screen, "Number of joysticks: {}".format(joystick_count) )
textPrint.indent()
# For each joystick:
for i in range(joystick_count):
joystick = pygame.joystick.Joystick(i)
joystick.init()
textPrint.print(screen, "Joystick {}".format(i) )
textPrint.indent()
# Get the name from the OS for the controller/joystick
name = joystick.get_name()
textPrint.print(screen, "Joystick name: {}".format(name) )
# Usually axis run in pairs, up/down for one, and left/right for
# the other.
axes = joystick.get_numaxes()
textPrint.print(screen, "Number of axes: {}".format(axes) )
textPrint.indent()
for i in range( axes ):
axis = joystick.get_axis( i )
textPrint.print(screen, "Axis {} value: {:>6.3f}".format(i, axis) )
textPrint.unindent()
buttons = joystick.get_numbuttons()
textPrint.print(screen, "Number of buttons: {}".format(buttons) )
textPrint.indent()
for i in range( buttons ):
button = joystick.get_button( i )
textPrint.print(screen, "Button {:>2} value: {}".format(i,button) )
textPrint.unindent()
# Hat switch. All or nothing for direction, not like joysticks.
# Value comes back in an array.
hats = joystick.get_numhats()
textPrint.print(screen, "Number of hats: {}".format(hats) )
textPrint.indent()
for i in range( hats ):
hat = joystick.get_hat( i )
textPrint.print(screen, "Hat {} value: {}".format(i, str(hat)) )
textPrint.unindent()
textPrint.unindent()
# ALL CODE TO DRAW SHOULD GO ABOVE THIS COMMENT
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# Limit to 20 frames per second
clock.tick(20)
# Close the window and quit.
# If you forget this line, the program will 'hang'
# on exit if running from IDLE.
pygame.quit ()
| apache-2.0 | Python |
|
34f44cd57baf9f0a548d728e90ca0c67f47b08a1 | Add tests for Resource | soccermetrics/soccermetrics-client-py | tests/test_resource.py | tests/test_resource.py | import unittest
import soccermetrics
from soccermetrics import __api_version__
from soccermetrics.rest import SoccermetricsRestClient
from soccermetrics.rest.resource import Resource
class ResourceTest(unittest.TestCase):
def setUp(self):
base_url = "http://api-summary.soccermetrics.net"
auth = dict(account="APP_ID",api_key="APP_KEY")
self.resource = Resource(base_url, auth)
def test_initialization(self):
self.assertEqual(self.resource.auth['account'],"APP_ID")
self.assertEqual(self.resource.auth['api_key'],"APP_KEY")
self.assertEqual(self.resource.endpoint,'/%s' % __api_version__) | mit | Python |
|
0b0d77ca77cf5359175836d68fc0bcce3829d731 | Create change_config.py | GluuFederation/community-edition-setup,GluuFederation/community-edition-setup,GluuFederation/community-edition-setup | static/scripts/change_hostname/change_config.py | static/scripts/change_hostname/change_config.py | import os, sys
from change_gluu_host import Installer, FakeRemote, ChangeGluuHostname
name_changer = ChangeGluuHostname(
old_host='<current_hostname>',
new_host='<new_hostname>',
cert_city='<city>',
cert_mail='<email>',
cert_state='<state_or_region>',
cert_country='<country>',
server='<actual_hostname_of_server>',
ip_address='<ip_address_of_server>',
ldap_password="<ldap_password>",
os_type='<linux_distro>',
local= True
)
r = name_changer.startup()
if not r:
sys.exit(1)
name_changer.change_appliance_config()
name_changer.change_clients()
name_changer.change_uma()
name_changer.change_httpd_conf()
name_changer.create_new_certs()
name_changer.change_host_name()
name_changer.modify_etc_hosts()
| mit | Python |
|
3cb39bc8be7fdf857ebbdd2f78cbb617b2dda104 | Create PowofTwo_003.py | Chasego/codi,Chasego/codi,cc13ny/Allin,cc13ny/algo,Chasego/codi,Chasego/cod,Chasego/codi,Chasego/codirit,cc13ny/algo,Chasego/codi,Chasego/codirit,cc13ny/algo,Chasego/cod,Chasego/codirit,cc13ny/algo,Chasego/cod,cc13ny/Allin,Chasego/cod,cc13ny/Allin,Chasego/codirit,cc13ny/Allin,Chasego/codirit,Chasego/cod,cc13ny/Allin,cc13ny/algo | leetcode/231-Power-of-Two/PowofTwo_003.py | leetcode/231-Power-of-Two/PowofTwo_003.py | class Solution:
# @param {integer} n
# @return {boolean}
def isPowerOfTwo(self, n):
return n > 0 and (n & n - 1 is 0)
| mit | Python |
|
3cc6edabfc0251516aa2b11a6838fe12a794967c | Duplicate sandwich | SelvorWhim/competitive,SelvorWhim/competitive,SelvorWhim/competitive,SelvorWhim/competitive | Codewars/DuplicateSandwich.py | Codewars/DuplicateSandwich.py | def duplicate_sandwich(arr):
seen = set()
for word in arr:
if word in seen:
double = word
break
seen.add(word)
i1 = -1
i2 = -1
for i,word in enumerate(arr):
if word == double:
if i1 < 0:
i1 = i
else:
i2 = i
break
return arr[i1+1:i2]
| unlicense | Python |
|
edd28dc68b91af78da1a1d576fcb9dcb83ebd0c8 | Create lin_reg.py | RationalAsh/ml_scripts | lin_reg.py | lin_reg.py | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import square
#Mean Square error function
def costf(X, y, theta):
m = y.shape[0]
#print m
return (1.0/m)*np.sum(np.power(np.dot(X,theta) - y, 2))
#Gradient of error function
def gradientf(X, y, theta):
m = y.shape[0]
err = np.dot(X, theta) - y
return (2.0/m)*np.dot(np.transpose(X), err)
t = np.arange(0,10,0.01)
y = 2*square(t) + 0*np.random.random(t.shape)
X = np.array([[1, np.sin(x), np.sin(3*x), np.sin(5*x), np.sin(7*x)] for x in t])
th = np.zeros(5)
errors = []
thetas = []
#Optimizing using gradient descent algorithm
numiters = 1000
alpha = 0.02 #Learning rate
errors.append(costf(X,y,th))
for i in xrange(numiters):
#Gradient descent
grad = gradientf(X,y,th)
th = th - alpha*grad
errors.append(costf(X,y,th))
thetas.append(th)
if(i%10 == 0):
print "Iteration: "+str(i)
print "Costf: "+ str(costf(X,y,th))
print "Gradient: " + str(gradientf(X, t, th))
print "Theta: "+ str(th)
y_ = np.dot(X, th)
#Closed form solution
th_opt = np.dot(np.linalg.pinv(X), y)
y_opt = np.dot(X, th_opt)
#Plotting results
plt.plot(t, y, 'o')
plt.xlabel('x')
plt.ylabel('y')
plt.hold(True)
plt.plot(t, y_)
plt.plot(t, y_opt)
plt.figure()
plt.plot(errors)
plt.title("Error over time")
plt.ylabel("Error")
plt.xlabel("Number of iterations")
plt.show()
| mit | Python |
|
dc854dc41929b027f393c7e341be51193b4ca7b9 | Create SearchinRSArr_001.py | Chasego/cod,cc13ny/algo,Chasego/codirit,cc13ny/algo,cc13ny/Allin,Chasego/codirit,cc13ny/algo,Chasego/codi,cc13ny/Allin,Chasego/cod,Chasego/codirit,Chasego/codi,cc13ny/Allin,Chasego/codirit,cc13ny/Allin,cc13ny/algo,Chasego/cod,cc13ny/algo,Chasego/codirit,Chasego/codi,cc13ny/Allin,Chasego/cod,Chasego/cod,Chasego/codi,Chasego/codi | leetcode/033-Search-in-Rotated-Sorted-Array/SearchinRSArr_001.py | leetcode/033-Search-in-Rotated-Sorted-Array/SearchinRSArr_001.py | class Solution:
# @param {integer[]} nums
# @param {integer} target
# @return {integer}
def search(self, nums, target):
l, r = 0, len(nums) - 1
while l <= r:
m = (l + r) / 2
if nums[m] == target:
return m
elif nums[m] > target:
if nums[m] > nums[r] and target < nums[l]:
l = m + 1
else:
r = m - 1
else:
if nums[m] < nums[r] and target > nums[r]:
r = m - 1
else:
l = m + 1
return -1
| mit | Python |
|
b57c24b23fa9566178455da895ea63baf6e16ff4 | Test cases to verify parsing of bitwise encoded PIDs | corbinbs/shadetree,s-s-boika/obdlib,QualiApps/obdlib,QualiApps/obdlib,s-s-boika/obdlib | tests/scanner_tests.py | tests/scanner_tests.py | from shadetree.obd.scanner import decode_bitwise_pids
DURANGO_SUPPORTED_PIDS_RESPONSE = 'BE 3E B8 10 '
JETTA_DIESEL_SUPPORTED_PIDS_RESPONSE = '98 3B 80 19 '
def test_decode_bitwise_pids_durango():
"""
Verify we correctly parse information about supported PIDs on a 1999 Dodge Durango
"""
supported_pids = decode_bitwise_pids(DURANGO_SUPPORTED_PIDS_RESPONSE)
assert supported_pids == {
'01': True,
'02': False,
'03': True,
'04': True,
'05': True,
'06': True,
'07': True,
'08': False,
'09': False,
'0A': False,
'0B': True,
'0C': True,
'0D': True,
'0E': True,
'0F': True,
'10': False,
'11': True,
'12': False,
'13': True,
'14': True,
'15': True,
'16': False,
'17': False,
'18': False,
'19': False,
'1A': False,
'1B': False,
'1C': True,
'1D': False,
'1E': False,
'1F': False,
'20': False
}
def test_decode_bitwise_pids_jetta_diesel():
"""
Verify we correctly parse information about supported PIDs on a 2004 Jetta Diesel Wagon
"""
supported_pids = decode_bitwise_pids(JETTA_DIESEL_SUPPORTED_PIDS_RESPONSE)
assert supported_pids == {
'01': True,
'02': False,
'03': False,
'04': True,
'05': True,
'06': False,
'07': False,
'08': False,
'09': False,
'0A': False,
'0B': True,
'0C': True,
'0D': True,
'0E': False,
'0F': True,
'10': True,
'11': True,
'12': False,
'13': False,
'14': False,
'15': False,
'16': False,
'17': False,
'18': False,
'19': False,
'1A': False,
'1B': False,
'1C': True,
'1D': True,
'1E': False,
'1F': False,
'20': True
} | mit | Python |
|
7a9bb7d412ccfa4921dc691232c1192bbb2789cd | Add rudimentary swarming service. | benschmaus/catapult,benschmaus/catapult,catapult-project/catapult-csm,sahiljain/catapult,sahiljain/catapult,catapult-project/catapult,catapult-project/catapult,catapult-project/catapult-csm,catapult-project/catapult,catapult-project/catapult-csm,catapult-project/catapult-csm,benschmaus/catapult,sahiljain/catapult,sahiljain/catapult,catapult-project/catapult,catapult-project/catapult-csm,sahiljain/catapult,catapult-project/catapult-csm,benschmaus/catapult,benschmaus/catapult,sahiljain/catapult,catapult-project/catapult,catapult-project/catapult,benschmaus/catapult,benschmaus/catapult,catapult-project/catapult-csm,catapult-project/catapult | dashboard/dashboard/services/swarming_service.py | dashboard/dashboard/services/swarming_service.py | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Functions for interfacing with the Chromium Swarming Server.
The Swarming Server is a task distribution service. It can be used to kick off
a test run.
API explorer: https://goo.gl/uxPUZo
"""
# TODO(dtu): This module is very much a work in progress. It's not clear whether
# the parameters are the right ones to pass, whether it's the right way to pass
# the parameters (as opposed to having a data object, whether the functions
# should be encapsulated in the data object, or whether this is at the right
# abstraction level.
from apiclient import discovery
from dashboard import utils
_DISCOVERY_URL = ('https://chromium-swarm.appspot.com/_ah/api'
'/discovery/v1/apis/{api}/{apiVersion}/rest')
def New(name, user, bot_id, isolated_hash, extra_args=None):
"""Create a new Swarming task."""
if not extra_args:
extra_args = []
swarming = _DiscoverService()
request = swarming.tasks().new(body={
'name': name,
'user': user,
'priority': '100',
'expiration_secs': '600',
'properties': {
'inputs_ref': {
'isolated': isolated_hash,
},
'extra_args': extra_args,
'dimensions': [
{'key': 'id', 'value': bot_id},
{'key': 'pool', 'value': 'Chrome-perf'},
],
'execution_timeout_secs': '3600',
'io_timeout_secs': '3600',
},
'tags': [
'id:%s-b1' % bot_id,
'pool:Chrome-perf',
],
})
return request.execute()
def Get(task_id):
del task_id
raise NotImplementedError()
def _DiscoverService():
return discovery.build('swarming', 'v1', discoveryServiceUrl=_DISCOVERY_URL,
http=utils.ServiceAccountHttp())
| bsd-3-clause | Python |
|
1a3839a083293200862ea21283c9c4d82a836846 | Add test for profiles. | Brown-University-Library/vivo-data-management,Brown-University-Library/vivo-data-management | tests/test_catalyst.py | tests/test_catalyst.py |
from vdm.catalyst import DisambiguationEngine
def pretty(raw):
"""
Pretty print xml.
"""
import xml.dom.minidom
xml = xml.dom.minidom.parseString(raw)
pretty = xml.toprettyxml()
return pretty
def test_profile():
#Basic info about a person.
p = [
'Josiah',
'Carberry',
None,
'jcarberry@brown.edu',
['null'],
['null']
]
disambig = DisambiguationEngine()
disambig.affiliation_strings = ['Sample University']
doc = disambig.build_doc(*p)
#Basic verification that XML contains what we expect.
assert('<First>Josiah</First>' in doc)
assert('<Last>Carberry</Last>' in doc)
assert('<email>jcarberry@brown.edu</email>' in doc)
assert('<Affiliation>%Sample University%</Affiliation>' in doc)
| mit | Python |
|
15b69945a209515c236d8ed788e824a895ef6859 | Create uvcontinuum.py | tiffanyhsyu/XMPs | xmps/color_selection/uvcontinuum.py | xmps/color_selection/uvcontinuum.py | bsd-3-clause | Python |
||
ba60687fec047ed94bf7bb76dcf8bcf485c705ec | Add script to repair member relations between organizations and packages. | etalab/etalab-ckan-scripts | repair_organizations_members.py | repair_organizations_members.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Etalab-CKAN-Scripts -- Various scripts that handle Etalab datasets in CKAN repository
# By: Emmanuel Raviart <emmanuel@raviart.com>
#
# Copyright (C) 2013 Emmanuel Raviart
# http://github.com/etalab/etalab-ckan-scripts
#
# This file is part of Etalab-CKAN-Scripts.
#
# Etalab-CKAN-Scripts is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Etalab-CKAN-Scripts is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Repair members of organizations, to ensure that they match the owners of packages."""
import argparse
import logging
import os
import sys
from ckan import model, plugins
from ckan.config.environment import load_environment
from paste.deploy import appconfig
from paste.registry import Registry
import pylons
import sqlalchemy as sa
#import sqlalchemy.exc
app_name = os.path.splitext(os.path.basename(__file__))[0]
log = logging.getLogger(app_name)
class MockTranslator(object):
def gettext(self, value):
return value
def ugettext(self, value):
return value
def ungettext(self, singular, plural, n):
if n > 1:
return plural
return singular
def main():
parser = argparse.ArgumentParser(description = __doc__)
parser.add_argument('config', help = 'path of configuration file')
parser.add_argument('-v', '--verbose', action = 'store_true', help = 'increase output verbosity')
args = parser.parse_args()
# logging.basicConfig(level = logging.DEBUG if args.verbose else logging.WARNING, stream = sys.stdout)
logging.basicConfig(level = logging.INFO if args.verbose else logging.WARNING, stream = sys.stdout)
site_conf = appconfig('config:{}'.format(os.path.abspath(args.config)))
load_environment(site_conf.global_conf, site_conf.local_conf)
registry = Registry()
registry.prepare()
registry.register(pylons.translator, MockTranslator())
plugins.load('synchronous_search')
revision = model.repo.new_revision()
for package in model.Session.query(model.Package).filter(
model.Package.owner_org != None,
model.Package.state == 'active',
):
owner = model.Session.query(model.Group).get(package.owner_org)
assert owner is not None
assert owner.is_organization
assert owner.state != 'deleted'
member = model.Session.query(model.Member).filter(
model.Member.group_id == owner.id,
model.Member.state == 'active',
model.Member.table_id == package.id,
).first()
if member is None:
log.info(u'Repairing organization "{}" package "{}" membership'.format(owner.name, package.name))
member = model.Session.query(model.Member).filter(
model.Member.group_id == owner.id,
model.Member.table_id == package.id,
).first()
assert member is not None
if member.capacity != 'organization':
member.capacity = 'organization'
member.state = 'active'
assert member.table_name == 'package'
else:
if member.capacity != 'organization':
log.warning(u'Repairing capacity organization "{}" package "{}" membership'.format(owner, package))
member.capacity = 'organization'
assert member.table_name == 'package'
continue
model.repo.commit_and_remove()
return 0
if __name__ == '__main__':
sys.exit(main())
| agpl-3.0 | Python |
|
c6f09446076677e5a3af8fda8c7fbbb73885234f | Add Custom Filter Design demo | rclement/yodel,rclement/yodel | demo/custom_filter_design.py | demo/custom_filter_design.py | import yodel.analysis
import yodel.filter
import yodel.complex
import yodel.conversion
import matplotlib.pyplot as plt
def frequency_response(response):
size = len(response)
freq_response_real = [0] * size
freq_response_imag = [0] * size
fft = yodel.analysis.FFT(size)
fft.forward(response, freq_response_real, freq_response_imag)
return freq_response_real, freq_response_imag
def amplitude_response(spec_real, spec_imag, db=True):
size = len(spec_real)
amp = [0] * size
for i in range(0, size):
amp[i] = yodel.complex.modulus(spec_real[i], spec_imag[i])
if db:
amp[i] = yodel.conversion.lin2db(amp[i])
return amp
def phase_response(spec_real, spec_imag, degrees=True):
size = len(spec_real)
pha = [0] * size
for i in range(0, size):
pha[i] = yodel.complex.phase(spec_real[i], spec_imag[i])
if degrees:
pha[i] = (pha[i] * 180.0 / math.pi)
return pha
class CustomFilterDesigner:
def __init__(self):
self.samplerate = 48000
self.framesize = 256
self.frsize = int((self.framesize/2)+1)
self.custom_fr = [1] * self.frsize
self.hzscale = [(i*self.samplerate) / (2.0*self.frsize) for i in range(0, self.frsize)]
self.flt = yodel.filter.Custom(self.samplerate, self.framesize)
self.pressed = None
self.update_filter()
self.create_plot()
def update_filter(self):
self.flt.design(self.custom_fr, False)
fr_re, fr_im = frequency_response(self.flt.ir)
self.fft_fr = amplitude_response(fr_re, fr_im, False)
def create_plot(self):
self.fig = plt.figure()
self.cid = self.fig.canvas.mpl_connect('button_press_event', self.onpress)
self.cid = self.fig.canvas.mpl_connect('button_release_event', self.onrelease)
self.cid = self.fig.canvas.mpl_connect('motion_notify_event', self.onmotion)
self.ax_custom_fr = self.fig.add_subplot(111)
self.ax_custom_fr.set_title('Custom Filter Design')
self.plot_custom_fr, = self.ax_custom_fr.plot(self.hzscale, self.custom_fr, 'r', label='Desired Frequency Response')
self.plot_fft_fr, = self.ax_custom_fr.plot(self.hzscale, self.fft_fr[0:self.frsize], 'b', label='Actual Frequency Response')
self.ax_custom_fr.legend()
self.ax_custom_fr.grid()
self.rescale_plot()
def rescale_plot(self):
self.ax_custom_fr.set_ylim(-1, 5)
plt.draw()
def onpress(self, event):
if event.inaxes != self.ax_custom_fr:
return
self.pressed = (event.xdata, event.ydata)
xpos = int(event.xdata * 2.0 * self.frsize / self.samplerate)
ypos = max(event.ydata, 0)
if xpos >= 0 and xpos < self.frsize:
self.custom_fr[xpos] = ypos
self.update_filter()
self.plot_custom_fr.set_ydata(self.custom_fr)
self.plot_fft_fr.set_ydata(self.fft_fr[0:self.frsize])
self.rescale_plot()
def onrelease(self, event):
self.pressed = None
def onmotion(self, event):
if self.pressed != None and event.xdata != None and event.ydata != None:
xpos = int(event.xdata * 2.0 * self.frsize / self.samplerate)
ypos = max(event.ydata, 0)
if xpos >= 0 and xpos < self.frsize:
self.custom_fr[xpos] = ypos
self.update_filter()
self.plot_custom_fr.set_ydata(self.custom_fr)
self.plot_fft_fr.set_ydata(self.fft_fr[0:self.frsize])
self.rescale_plot()
cfd = CustomFilterDesigner()
plt.show()
| mit | Python |
|
4826764c24fca8204322f88adfde75968b3985ee | add wrapper to start bucky from source tree | trbs/bucky,JoseKilo/bucky,CollabNet/puppet-bucky,MrSecure/bucky2,ewdurbin/bucky,Hero1378/bucky,ewdurbin/bucky,dimrozakis/bucky,trbs/bucky,MrSecure/bucky2,jsiembida/bucky3,CollabNet/puppet-bucky,dimrozakis/bucky,CollabNet/puppet-bucky,JoseKilo/bucky,Hero1378/bucky,CollabNet/puppet-bucky | bucky.py | bucky.py | #!/usr/bin/env python
import bucky.main
if __name__ == '__main__':
bucky.main.main()
| apache-2.0 | Python |
|
c757c6ad714afb393c65c1b82bca31de357332fc | Add test coverage for utility module | lresende/toree-gateway,lresende/toree-gateway | python/util_test.py | python/util_test.py | #
# (C) Copyright IBM Corp. 2017
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import unittest
import tempfile
import util
class TestUtils(unittest.TestCase):
def setUp(self):
""" capture stdout to a temp file """
self.tempFile = tempfile.TemporaryFile()
os.dup2(self.tempFile.fileno(), sys.stdout.fileno())
def tearDown(self):
""" remove temp file """
self.tempFile.close()
def test_output_is_clean_when_debug_is_disabled(self):
util.isDebugging = False
util.debug_print('Debug Message')
self.assertEqual(self._readOutput(), '', 'Should not write messages when debug is disabled')
def test_output_has_content_when_debug_is_enabled(self):
util.isDebugging = True
util.debug_print('Debug Message')
self.assertEqual(self._readOutput(), 'Debug Message', 'Should write messages when debug is enabled')
def test_output_has_content_when_byte_array_message_is_passed(self):
util.isDebugging = True
util.debug_print(b'Binary Debug Message')
self.assertEqual(self._readOutput(), 'Binary Debug Message', 'Should write messages when debug is enabled')
def _readOutput(self):
self.tempFile.seek(0)
return self.tempFile.read().decode().rstrip()
if __name__ == "__main__":
unittest.main()
| apache-2.0 | Python |
|
8ee2f2b4c3a0ac40c6b7582a2cf3724f30f41dae | Add data migration | shapiromatron/amy,shapiromatron/amy,vahtras/amy,pbanaszkiewicz/amy,pbanaszkiewicz/amy,pbanaszkiewicz/amy,wking/swc-amy,wking/swc-amy,vahtras/amy,shapiromatron/amy,swcarpentry/amy,swcarpentry/amy,wking/swc-amy,wking/swc-amy,swcarpentry/amy,vahtras/amy | workshops/migrations/0035_auto_20150107_1205.py | workshops/migrations/0035_auto_20150107_1205.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def copy_project_to_tags(apps, schema_editor):
Event = apps.get_model('workshops', 'Event')
for event in Event.objects.all().exclude(project=None):
tag = event.project
print('add {} to {}'.format(tag, event))
event.tags.add(tag)
event.save()
class Migration(migrations.Migration):
dependencies = [
('workshops', '0034_auto_20150107_1200'),
]
operations = [
migrations.RenameModel(
old_name='Project',
new_name='Tag',
),
migrations.AddField(
model_name='event',
name='tags',
field=models.ManyToManyField(to='workshops.Tag'),
preserve_default=True,
),
migrations.RunPython(copy_project_to_tags),
migrations.RemoveField(
model_name='event',
name='project',
),
]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('workshops', '0034_auto_20150107_1200'),
]
operations = [
migrations.RenameModel(
old_name='Project',
new_name='Tag',
),
migrations.RemoveField(
model_name='event',
name='project',
),
migrations.AddField(
model_name='event',
name='tags',
field=models.ManyToManyField(to='workshops.Tag'),
preserve_default=True,
),
]
| mit | Python |
3ba67bf461f2f35f549cc2ac5c85dd1bfb39cfa4 | Add a collection of tests around move_or_merge.py | artefactual/archivematica,artefactual/archivematica,artefactual/archivematica,artefactual/archivematica | src/MCPClient/tests/test_move_or_merge.py | src/MCPClient/tests/test_move_or_merge.py | # -*- encoding: utf-8
import pytest
from .move_or_merge import move_or_merge
def test_move_or_merge_when_dst_doesnt_exist(tmpdir):
src = tmpdir.join("src.txt")
dst = tmpdir.join("dst.txt")
src.write("hello world")
move_or_merge(src=src, dst=dst)
assert not src.exists()
assert dst.exists()
assert dst.read() == "hello world"
def test_okay_if_dst_exists_and_is_same(tmpdir):
src = tmpdir.join("src.txt")
dst = tmpdir.join("dst.txt")
src.write("hello world")
dst.write("hello world")
move_or_merge(src=src, dst=dst)
assert not src.exists()
assert dst.exists()
assert dst.read() == "hello world"
def test_error_if_dst_exists_and_is_different(tmpdir):
src = tmpdir.join("src.txt")
dst = tmpdir.join("dst.txt")
src.write("hello world")
dst.write("we come in peace")
with pytest.raises(RuntimeError, match="dst exists and is different"):
move_or_merge(src=src, dst=dst)
# Check the original file wasn't deleted
assert src.exists()
assert dst.exists()
def test_moves_contents_of_directory(tmpdir):
src_dir = tmpdir.mkdir("src")
dst_dir = tmpdir.mkdir("dst")
src = src_dir.join("file.txt")
dst = dst_dir.join("file.txt")
src.write("hello world")
move_or_merge(src=str(src_dir), dst=str(dst_dir))
assert not src.exists()
assert dst.exists()
assert dst.read() == "hello world"
def test_moves_nested_directory(tmpdir):
src_dir = tmpdir.mkdir("src")
dst_dir = tmpdir.mkdir("dst")
src_nested = src_dir.mkdir("nested")
dst_nested = dst_dir.join("nested")
src = src_nested.join("file.txt")
dst = dst_nested.join("file.txt")
src.write("hello world")
move_or_merge(src=str(src_dir), dst=str(dst_dir))
assert not src.exists()
assert dst.exists()
assert dst.read() == "hello world"
def test_merges_nested_directory(tmpdir):
src_dir = tmpdir.mkdir("src")
dst_dir = tmpdir.mkdir("dst")
src_nested = src_dir.mkdir("nested")
# Unlike the previous test, we create the "nested" directory upfront,
# but we don't populate it.
dst_nested = dst_dir.mkdir("nested")
src = src_nested.join("file.txt")
dst = dst_nested.join("file.txt")
src.write("hello world")
move_or_merge(src=str(src_dir), dst=str(dst_dir))
assert not src.exists()
assert dst.exists()
assert dst.read() == "hello world"
def test_merges_nested_directory_with_existing_file(tmpdir):
src_dir = tmpdir.mkdir("src")
dst_dir = tmpdir.mkdir("dst")
src_nested = src_dir.mkdir("nested")
dst_nested = dst_dir.mkdir("nested")
src = src_nested.join("file.txt")
dst = dst_nested.join("file.txt")
src.write("hello world")
dst.write("hello world")
move_or_merge(src=str(src_dir), dst=str(dst_dir))
assert not src.exists()
assert dst.exists()
assert dst.read() == "hello world"
def test_merges_nested_directory_with_mismatched_existing_file(tmpdir):
src_dir = tmpdir.mkdir("src")
dst_dir = tmpdir.mkdir("dst")
src_nested = src_dir.mkdir("nested")
dst_nested = dst_dir.mkdir("nested")
src = src_nested.join("file.txt")
dst = dst_nested.join("file.txt")
src.write("hello world")
dst.write("we come in peace")
with pytest.raises(RuntimeError, match="dst exists and is different"):
move_or_merge(src=str(src_dir), dst=str(dst_dir))
def test_ignores_existing_files_in_dst(tmpdir):
src_dir = tmpdir.mkdir("src")
dst_dir = tmpdir.mkdir("dst")
dst_existing = dst_dir.join("philosophy.txt")
dst_existing.write("i think therefore i am")
src_dir.join("file.txt").write("hello world")
move_or_merge(src=str(src_dir), dst=str(dst_dir))
assert dst_existing.exists()
assert dst_existing.read() == "i think therefore i am"
| agpl-3.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.