hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f1f067f028748782da40d03c616d1804024a0dea | 3,271 | py | Python | tests/models/classifiers/test_logistic.py | harmsm/epistasis | 741b25b3e28015aeeba8d4efc94af1e1d811cd63 | [
"Unlicense"
] | null | null | null | tests/models/classifiers/test_logistic.py | harmsm/epistasis | 741b25b3e28015aeeba8d4efc94af1e1d811cd63 | [
"Unlicense"
] | null | null | null | tests/models/classifiers/test_logistic.py | harmsm/epistasis | 741b25b3e28015aeeba8d4efc94af1e1d811cd63 | [
"Unlicense"
] | 2 | 2020-04-02T00:58:24.000Z | 2021-11-16T13:30:30.000Z | import pytest
# External imports
import numpy as np
from gpmap import GenotypePhenotypeMap
# Module to test
import epistasis
from epistasis.models.classifiers import *
THRESHOLD = 0.2
| 27.258333 | 60 | 0.61174 |
f1f07ae2628711dd9b1d256f9780dc722c6f8e53 | 381 | py | Python | backend/notifications/admin.py | solitariaa/CMPUT404-project-socialdistribution | f9e23a10e209f8bf7ed062e105f44038751f7c74 | [
"W3C-20150513"
] | 1 | 2022-03-01T03:03:40.000Z | 2022-03-01T03:03:40.000Z | backend/notifications/admin.py | solitariaa/CMPUT404-project-socialdistribution | f9e23a10e209f8bf7ed062e105f44038751f7c74 | [
"W3C-20150513"
] | 51 | 2022-02-09T06:18:27.000Z | 2022-03-28T19:01:54.000Z | backend/notifications/admin.py | solitariaa/CMPUT404-project-socialdistribution | f9e23a10e209f8bf7ed062e105f44038751f7c74 | [
"W3C-20150513"
] | 2 | 2022-03-13T20:58:10.000Z | 2022-03-19T06:29:56.000Z | from django.contrib import admin
from .models import Notification
admin.site.register(Notification, NotificationAdmin)
| 23.8125 | 63 | 0.727034 |
f1f2709a9b0d54f4549f4f5b2c964cce095a32f9 | 3,655 | py | Python | example/experiments/01_experiment.py | dzwiedziu-nkg/credo-classify-framework | 45417b505b4f4b20a7248f3487ca57a3fd49ccee | [
"MIT"
] | null | null | null | example/experiments/01_experiment.py | dzwiedziu-nkg/credo-classify-framework | 45417b505b4f4b20a7248f3487ca57a3fd49ccee | [
"MIT"
] | null | null | null | example/experiments/01_experiment.py | dzwiedziu-nkg/credo-classify-framework | 45417b505b4f4b20a7248f3487ca57a3fd49ccee | [
"MIT"
] | 3 | 2020-06-19T15:41:19.000Z | 2020-06-29T12:47:05.000Z | import bz2
import time
import urllib.request
import io
from typing import List, Tuple
from credo_cf import load_json_from_stream, progress_and_process_image, group_by_device_id, group_by_resolution, too_often, near_hot_pixel2, \
too_bright
from credo_cf import xor_preprocess
from credo_cf.commons.utils import get_and_add
WORKING_SET = 'http://mars.iti.pk.edu.pl/~nkg/credo/working_set.json.bz2'
time_profile = {}
if __name__ == '__main__':
main()
| 31.782609 | 142 | 0.642681 |
f1f2862dcb680020685252fc0444e7b7a36ac2b8 | 427 | py | Python | apptweak/ios.py | gudhati/apptweak-api-python-library | f4a7f7e34548d6d216f3a297d63944c7adbf9667 | [
"MIT"
] | 5 | 2019-05-21T14:44:57.000Z | 2020-10-30T04:08:13.000Z | apptweak/ios.py | gudhati/apptweak-api-python-library | f4a7f7e34548d6d216f3a297d63944c7adbf9667 | [
"MIT"
] | 1 | 2020-08-28T02:42:37.000Z | 2020-08-28T07:52:54.000Z | apptweak/ios.py | gudhati/apptweak-api-python-library | f4a7f7e34548d6d216f3a297d63944c7adbf9667 | [
"MIT"
] | 5 | 2019-07-18T13:38:01.000Z | 2021-06-09T04:12:35.000Z | from apptweak.plateform import *
| 26.6875 | 81 | 0.697892 |
f1f2f70605379c3a09598bf2b8739bb4f47caa1b | 3,944 | py | Python | 30-Days-Of-Python/30-Days-Of-Python/19_file_handling.py | zhaobingwang/python-samples | d59f84d2b967cc793cb9b8999f8cdef349fd6fd5 | [
"MIT"
] | null | null | null | 30-Days-Of-Python/30-Days-Of-Python/19_file_handling.py | zhaobingwang/python-samples | d59f84d2b967cc793cb9b8999f8cdef349fd6fd5 | [
"MIT"
] | null | null | null | 30-Days-Of-Python/30-Days-Of-Python/19_file_handling.py | zhaobingwang/python-samples | d59f84d2b967cc793cb9b8999f8cdef349fd6fd5 | [
"MIT"
] | null | null | null | print('---------- Opening Files for Reading ----------')
f = open('./files/reading_file_example.txt')
print(f) # <_io.TextIOWrapper name='./files/reading_file_example.txt' mode='r' encoding='cp936'>
print('\t---------- read() ----------')
# read(): read the whole text as string. If we want to limit the number of characters we read,
# we can limit it by passing int value to the methods.
f = open('./files/reading_file_example.txt')
txt = f.read()
print(type(txt)) # <class 'str'>
print(txt) # Hello,Python!
f.close()
f = open('./files/reading_file_example.txt')
txt = f.read(5)
print(type(txt)) # <class 'str'>
print(txt) # Hello
f.close()
print('\t---------- readline(): read only the first line ----------')
f = open('./files/reading_file_example.txt')
line = f.readline()
print(type(line)) # <class 'str'>
print(line) # Hello,Python!
f.close()
print('\t---------- readlines(): read all the text line by line and returns a list of lines ----------')
f = open('./files/reading_file_example.txt')
lines = f.readlines()
print(type(lines)) # <class 'list'>
print(lines) # ['Hello,Python!']
f.close()
print('\t---------- splitlines() ----------')
f = open('./files/reading_file_example.txt')
lines = f.read().splitlines()
print(type(lines)) # <class 'list'>
print(lines) # ['Hello,Python!']
f.close()
print('\t---------- Another way to close a file ----------')
with open('./files/reading_file_example.txt') as f:
lines = f.read().splitlines()
print(type(lines)) # <class 'list'>
print(lines) # ['Hello,Python!']
print('---------- Opening Files for Writing and Updating ----------')
# To write to an existing file, we must add a mode as parameter to the open() function:
# "a" - append - will append to the end of the file, if the file does not exist it raise FileNotFoundError.
# "w" - write - will overwrite any existing content, if the file does not exist it creates.
with open('./files/writing_file_example.txt', 'a') as f:
f.write('Hello,Python!')
with open('./files/writing_file_example.txt', 'w') as f:
f.write('Hello,Java!')
print('---------- Deleting Files ----------')
import os
if os.path.exists('./files/writing_file_example.txt'):
os.remove('./files/writing_file_example.txt')
else:
os.remove('The file does not exist!')
print('---------- File Types ----------')
print('\t---------- File with json Extension ----------')
# dictionary
person_dct = {
"name": "Zhang San",
"country": "China",
"city": "Hangzhou",
"skills": ["Java", "C#", "Python"]
}
# JSON: A string form a dictionary
person_json = "{'name': 'Zhang San', 'country': 'China', 'city': 'Hangzhou', 'skills': ['Java', 'C#', 'Python']}"
# we use three quotes and make it multiple line to make it more readable
person_json = '''{
"name":"Zhang San",
"country":"China",
"city":"Hangzhou",
"skills":["Java", "C#","Python"]
}'''
print('\t---------- Changing JSON to Dictionary ----------')
import json
person_json = '''{
"name":"Zhang San",
"country":"China",
"city":"Hangzhou",
"skills":["Java", "C#","Python"]
}'''
person_dct = json.loads(person_json)
print(person_dct)
print(person_dct['name'])
print('\t---------- Changing Dictionary to JSON ----------')
person_dct = {
"name": "Zhang San",
"country": "China",
"city": "Hangzhou",
"skills": ["Java", "C#", "Python"]
}
person_json = json.dumps(person_dct, indent=4) # indent could be 2, 4, 8. It beautifies the json
print(type(person_json)) # <class 'str'>
print(person_json)
print('\t---------- Saving as JSON File ----------')
person_dct = {
"name": "Zhang San",
"country": "China",
"city": "Hangzhou",
"skills": ["Java", "C#", "Python"]
}
with open('./files/json_example.json', 'w', encoding='utf-8') as f:
json.dump(person_dct, f, ensure_ascii=False, indent=4)
print('\t---------- File with csv Extension ----------')
import csv
# with open('./files/csv_example.csv') as f:
| 31.806452 | 113 | 0.606491 |
f1f6211abde32ba71ccaac35e7c39eb9935dfa7c | 2,491 | py | Python | data/grady-memorial-hospital/parse.py | Afellman/hospital-chargemaster | 1b87bc64d95d97c0538be7633f9e469e5db624e2 | [
"MIT"
] | 34 | 2019-01-18T00:15:58.000Z | 2022-03-26T15:01:08.000Z | data/grady-memorial-hospital/parse.py | wsheffel/hospital-chargemaster | b3473c798fd2f343f7f02c1e32496f9eea9fa94d | [
"MIT"
] | 8 | 2019-01-16T22:06:11.000Z | 2019-02-25T00:59:25.000Z | data/grady-memorial-hospital/parse.py | wsheffel/hospital-chargemaster | b3473c798fd2f343f7f02c1e32496f9eea9fa94d | [
"MIT"
] | 10 | 2019-02-20T14:58:16.000Z | 2021-11-22T21:57:04.000Z | #!/usr/bin/env python
import os
from glob import glob
import json
import pandas
import datetime
import sys
here = os.path.dirname(os.path.abspath(__file__))
folder = os.path.basename(here)
latest = '%s/latest' % here
year = datetime.datetime.today().year
output_data = os.path.join(here, 'data-latest.tsv')
output_year = os.path.join(here, 'data-%s.tsv' % year)
# Function read zip into memory
# Don't continue if we don't have latest folder
if not os.path.exists(latest):
print('%s does not have parsed data.' % folder)
sys.exit(0)
# Don't continue if we don't have results.json
results_json = os.path.join(latest, 'records.json')
if not os.path.exists(results_json):
print('%s does not have results.json' % folder)
sys.exit(1)
with open(results_json, 'r') as filey:
results = json.loads(filey.read())
columns = ['charge_code',
'price',
'description',
'hospital_id',
'filename',
'charge_type']
df = pandas.DataFrame(columns=columns)
for result in results:
filename = os.path.join(latest, result['filename'])
if not os.path.exists(filename):
print('%s is not found in latest folder.' % filename)
continue
if os.stat(filename).st_size == 0:
print('%s is empty, skipping.' % filename)
continue
contents = None
if filename.endswith('txt'):
# ['DESCRIPTION', 'Unnamed: 1', 'PRICE']
contents = pandas.read_csv(filename)
contents = contents.dropna(how='all')
print("Parsing %s" % filename)
print(contents.head())
# Update by row
for row in contents.iterrows():
idx = df.shape[0] + 1
price = row[1]['PRICE'].replace('$','').replace(',','').strip()
entry = [None, # charge code
price, # price
row[1]["DESCRIPTION"], # description
result['hospital_id'], # hospital_id
result['filename'],
'standard'] # filename
df.loc[idx,:] = entry
# Remove empty rows
df = df.dropna(how='all')
# Save data!
print(df.shape)
df.to_csv(output_data, sep='\t', index=False)
df.to_csv(output_year, sep='\t', index=False)
| 29.654762 | 75 | 0.583701 |
f1f62ac7868b351e283f53daaf44f5e2562dfc27 | 10,476 | py | Python | DeterministicParticleFlowControl/tests/test_pytorch_kernel.py | dimitra-maoutsa/DeterministicParticleFlowControl | 106bc9b01d7a4888e4ded18c5fb5a989fe672386 | [
"MIT"
] | 6 | 2021-12-13T14:30:31.000Z | 2022-01-24T07:54:57.000Z | DeterministicParticleFlowControl/tests/test_pytorch_kernel.py | dimitra-maoutsa/DeterministicParticleFlowControl | 106bc9b01d7a4888e4ded18c5fb5a989fe672386 | [
"MIT"
] | 10 | 2021-12-18T23:04:53.000Z | 2022-02-05T02:06:34.000Z | DeterministicParticleFlowControl/tests/test_pytorch_kernel.py | dimitra-maoutsa/DeterministicParticleFlowControl | 106bc9b01d7a4888e4ded18c5fb5a989fe672386 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 10 07:20:39 2022
@author: maout
"""
import numpy as np
from scipy.spatial.distance import cdist
import torch
#from score_function_estimators import my_cdist
from typing import Union
from torch.autograd import grad
#%% select available device
#%%
#%% numpy versions of kernels functions
def Knp(x,y,l,multil=False):
if multil:
res = np.ones((x.shape[0],y.shape[0]))
for ii in range(len(l)):
tempi = np.zeros((x[:,ii].size, y[:,ii].size ))
##puts into tempi the cdist result
tempi = cdist(x[:,ii].reshape(-1,1), y[:,ii].reshape(-1,1),metric='sqeuclidean')
res = np.multiply(res,np.exp(-tempi/(2*l[ii]*l[ii])))
return res
else:
tempi = np.zeros((x.shape[0], y.shape[0] ))
tempi = cdist(x, y,'sqeuclidean') #this sets into the array tempi the cdist result
return np.exp(-0.5*tempi/(l*l))
def grdx_K_all(x,y,l,multil=False): #gradient with respect to the 1st argument - only which_dim
N,dim = x.shape
M,_ = y.shape
diffs = x[:,None]-y
redifs = np.zeros((1*N,M,dim))
for ii in range(dim):
if multil:
redifs[:,:,ii] = np.multiply(diffs[:,:,ii],Knp(x,y,l,True))/(l[ii]*l[ii])
else:
redifs[:,:,ii] = np.multiply(diffs[:,:,ii],Knp(x,y,l))/(l*l)
return redifs
#%%
DEVICE = set_device()
dtype = torch.float
dim = 2
N = 3
M = 4
X = torch.randn(N, dim, device=DEVICE)
Z = torch.randn(M, dim, device=DEVICE)
# common device agnostic way of writing code that can run on cpu OR gpu
# that we provide for you in each of the tutorials
#%% test kernel evaluation with single lengthscale
lengthsc = 2
# pytorched
K_instance = RBF(length_scale=lengthsc, multil=False, device=DEVICE) ##instance of kernel object - non-evaluated
if DEVICE=='cpu':
Ktorch = K_instance.Kernel(X, Z).detach().numpy()
gradK_torch = K_instance.gradient_X(X, Z).detach().numpy()
else:
Ktorch = K_instance.Kernel(X, Z).cpu().detach().numpy()
gradK_torch = K_instance.gradient_X(X, Z).cpu().detach().numpy()
# numpyed
if DEVICE=='cpu':
K_numpy = Knp(X.detach().numpy(), Z.detach().numpy(),l=lengthsc, multil=False).astype(np.float32)
grad_K_numpy = grdx_K_all(X.detach().numpy(), Z.detach().numpy(), l=lengthsc, multil=False).astype(np.float32)
else:
K_numpy = Knp(X.cpu().detach().numpy(), Z.cpu().detach().numpy(),l=lengthsc, multil=False).astype(np.float32)
grad_K_numpy = grdx_K_all(X.cpu().detach().numpy(), Z.cpu().detach().numpy(), l=lengthsc, multil=False).astype(np.float32)
np.testing.assert_allclose(Ktorch, K_numpy, rtol=1e-06)
np.testing.assert_allclose(gradK_torch, grad_K_numpy, rtol=1e-06)
#%% test kernel evaluation with multiple lengthscales
lengthsc = np.array([1,2])
# pytorched
if DEVICE=='cpu':
K_instance2 = RBF(length_scale=lengthsc, multil=True, device=DEVICE) ##instance of kernel object - non-evaluated
Ktorch = K_instance2.Kernel(X, Z).detach().numpy()
gradK_torch = K_instance2.gradient_X(X, Z).detach().numpy()
else:
K_instance2 = RBF(length_scale=lengthsc, multil=True, device=DEVICE) ##instance of kernel object - non-evaluated
Ktorch = K_instance2.Kernel(X, Z).cpu().detach().numpy()
gradK_torch = K_instance2.gradient_X(X, Z).cpu().detach().numpy()
# numpyed
if DEVICE=='cpu':
K_numpy = Knp(X.detach().numpy(), Z.detach().numpy(),l=lengthsc, multil=True).astype(np.float32)
grad_K_numpy = grdx_K_all(X.detach().numpy(), Z.detach().numpy(), l=lengthsc, multil=True).astype(np.float32)
else:
K_numpy = Knp(X.cpu().detach().numpy(), Z.cpu().detach().numpy(),l=lengthsc, multil=True).astype(np.float32)
grad_K_numpy = grdx_K_all(X.cpu().detach().numpy(), Z.cpu().detach().numpy(), l=lengthsc, multil=True).astype(np.float32)
np.testing.assert_allclose(Ktorch, K_numpy, rtol=1e-06)
np.testing.assert_allclose(gradK_torch, grad_K_numpy, rtol=1e-06) | 37.683453 | 180 | 0.612447 |
f1f6905a9916f479816181eeb443cb6b650cc61b | 11,075 | py | Python | components.py | zachgk/tfcomponents | 6c33349ab13549debfc9b347df795c82e38cfa73 | [
"MIT"
] | null | null | null | components.py | zachgk/tfcomponents | 6c33349ab13549debfc9b347df795c82e38cfa73 | [
"MIT"
] | null | null | null | components.py | zachgk/tfcomponents | 6c33349ab13549debfc9b347df795c82e38cfa73 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import tensorflow as tf
import tflearn
from tflearn import variables as vs
from tflearn import activations
from tflearn import initializations
from tflearn import losses
from tflearn import utils
componentInherit = {
'globalDroppath': False,
'localDroppath': False,
'localDroppathProb': .5,
'parentType': '',
'currentType': ''
}
| 34.182099 | 116 | 0.557562 |
f1f9c0eee8a8c52481a3d1792850e6310a0a8163 | 1,984 | py | Python | tests/unit/warnings_test.py | gamechanger/dusty | dd9778e3a4f0c623209e53e98aa9dc1fe76fc309 | [
"MIT"
] | 421 | 2015-06-02T16:29:59.000Z | 2021-06-03T18:44:42.000Z | tests/unit/warnings_test.py | gamechanger/dusty | dd9778e3a4f0c623209e53e98aa9dc1fe76fc309 | [
"MIT"
] | 404 | 2015-06-02T20:23:42.000Z | 2019-08-21T16:59:41.000Z | tests/unit/warnings_test.py | gamechanger/dusty | dd9778e3a4f0c623209e53e98aa9dc1fe76fc309 | [
"MIT"
] | 16 | 2015-06-16T17:21:02.000Z | 2020-03-27T02:27:09.000Z | from ..testcases import DustyTestCase
from dusty.warnings import Warnings
| 45.090909 | 229 | 0.689516 |
f1fa3f6469623ef44f7b253d9c5da8307b330081 | 4,655 | py | Python | dndice.py | Ar4093/PythonUtils | fd2d1e0eab51c40cd75b42a513f6e76ea8f76bb3 | [
"MIT"
] | null | null | null | dndice.py | Ar4093/PythonUtils | fd2d1e0eab51c40cd75b42a513f6e76ea8f76bb3 | [
"MIT"
] | null | null | null | dndice.py | Ar4093/PythonUtils | fd2d1e0eab51c40cd75b42a513f6e76ea8f76bb3 | [
"MIT"
] | null | null | null | from random import randint
import re
# Supported formats:
# [A]dX[(L|H|K)n][.Y1[.Y2[...]]]
# A - number of dice
# X - number of sides of dice
# . - operation: allowed are + - * x /
# Ln/Hn/Kn - discard the Lowest n dice or Keep the Highest n dice. - will only apply the first of these, in order LHK
# Y1,Y2,... - operand
# warning: doesn't respect order of operations. So +5*3 will first add 5, then multiply by 3.
# example: 4d6+3 rolls 4 dice with 6 faces each, afterwards adds 3.
# Thanks to tara, maximum number of allowed dice/faces is 999.
# Parse a single dice roll
# Parse a whole expression.
#
# Format: dice1[+dice2[+dice3[...]]]
# dice1, dice2, dice3, ...: Any valid dice format as written in the randomDice function.
#
# Returns: The total of all rolls as integer, None if there was no valid dice notation found
| 34.738806 | 140 | 0.478195 |
f1fbbda465699c148d64aca8b6b9736f618761e2 | 2,471 | py | Python | cfg/configure_model.py | dadelani/sentiment-discovery | 0cbfc5f6345dacbf52f1f806a9e136a61ca35cf8 | [
"BSD-3-Clause"
] | 2 | 2019-04-24T08:23:54.000Z | 2020-06-24T10:25:34.000Z | cfg/configure_model.py | mikekestemont/sentiment-discovery | 84bf39846ddf6b099d99318214a013269b5b0e61 | [
"BSD-3-Clause"
] | null | null | null | cfg/configure_model.py | mikekestemont/sentiment-discovery | 84bf39846ddf6b099d99318214a013269b5b0e61 | [
"BSD-3-Clause"
] | 1 | 2019-03-23T08:07:33.000Z | 2019-03-23T08:07:33.000Z | import os
from sentiment_discovery.reparameterization import remove_weight_norm
from sentiment_discovery.model import make_model
def configure_model(parser):
"""add cmdline args for configuring models"""
parser.add_argument('-load_model', default='',
help="""a specific checkpoint file to load from experiment's model directory""")
parser.add_argument('-should_test', action='store_true',
help='whether to train or evaluate a model')
parser.add_argument('-model_dir', default='model',
help='directory where models are saved to/loaded from')
parser.add_argument('-rnn_type', default='mlstm',
help='mlstm, lstm or gru')
parser.add_argument('-layers', type=int, default=1,
help='Number of layers in the rnn')
parser.add_argument('-rnn_size', type=int, default=4096,
help='Size of hidden states')
parser.add_argument('-embed_size', type=int, default=64,
help='Size of embeddings')
parser.add_argument('-weight_norm', action='store_true',
help='whether to use weight normalization for training NNs')
parser.add_argument('-lstm_only', action='store_true',
help='if `-weight_norm` is applied to the model, apply it to the lstm parmeters only')
parser.add_argument('-dropout', type=float, default=0.1,
help='Dropout probability.')
return ModuleConfig(parser)
| 39.854839 | 93 | 0.718737 |
f1fcac439aa33bb2b7ada9c60628d61b4b1afd6c | 4,309 | py | Python | tests/backends/console/test_env.py | j5api/j5 | d3158cfd3d0d19ed33aba0c5c2c1f17a38fe12c7 | [
"MIT"
] | 10 | 2019-01-19T13:09:37.000Z | 2021-06-18T13:40:10.000Z | tests/backends/console/test_env.py | j5api/j5 | d3158cfd3d0d19ed33aba0c5c2c1f17a38fe12c7 | [
"MIT"
] | 681 | 2019-01-22T18:12:23.000Z | 2022-03-25T14:14:31.000Z | tests/backends/console/test_env.py | j5api/j5 | d3158cfd3d0d19ed33aba0c5c2c1f17a38fe12c7 | [
"MIT"
] | 8 | 2019-02-22T21:45:47.000Z | 2021-11-17T19:43:33.000Z | """Tests for the ConsoleEnvironment and Console helper."""
from j5.backends.console import Console
def test_console_instantiation() -> None:
"""Test that we can create a console."""
console = Console("MockConsole")
assert type(console) is Console
assert console._descriptor == "MockConsole"
def test_console_info() -> None:
"""Test that the console can output information."""
console = MockPrintConsole("TestBoard")
console.info("Test the console info")
def test_console_read() -> None:
"""Test that we can read from the console."""
console = MockInputConsole("TestBoard")
assert str(console.read("Enter Test Input")) == str(reversed("Enter Test Input"))
def test_console_read_none_type() -> None:
"""Test that we can read None from console, i.e any input."""
console = ConsoleNone("TestBoard")
assert console.read("Enter test input", None) is None
def test_console_read_bad_type() -> None:
"""Test that the console emits an error if it cannot cast to the desired type."""
console = MockConsoleWithState("TestConsole")
assert console.read("I want an int", int) == 6
def test_console_handle_boolean_correctly() -> None:
"""Test that the console handles bools correctly."""
console = MockConsoleBoolean("TestConsole")
for _ in MockConsoleBoolean.true_cases:
val = console.read("I want an bool", bool, check_stdin=False)
assert isinstance(val, bool)
assert val
for _ in MockConsoleBoolean.false_cases:
val = console.read("I want an bool", bool, check_stdin=False)
assert isinstance(val, bool)
assert not val
# Test if false inputs are skipped.
val = console.read("I want an bool", bool, check_stdin=False)
assert isinstance(val, bool)
assert val
assert console.is_finished
| 31.918519 | 89 | 0.598283 |
f1ff198ad462185fb2910c252e87000aebf824f5 | 6,351 | py | Python | backend/modules/cache.py | fheyen/ClaVis | 528ca85dd05606d39761b5a00d755500cf1cd2f6 | [
"MIT"
] | 2 | 2021-01-11T20:09:32.000Z | 2021-05-14T14:52:48.000Z | backend/modules/cache.py | fheyen/ClaVis | 528ca85dd05606d39761b5a00d755500cf1cd2f6 | [
"MIT"
] | null | null | null | backend/modules/cache.py | fheyen/ClaVis | 528ca85dd05606d39761b5a00d755500cf1cd2f6 | [
"MIT"
] | null | null | null | from os import listdir, remove, makedirs
from os.path import isfile, join, exists
import shutil
import joblib
from termcolor import cprint
import json
from pathlib import Path
_cache_path = None
_log_actions = True
def init(cache_path, log_actions=True):
"""
Initializes the cache.
Keyword Arguments:
- cache_path: directory where cached files are saved
- log_actions: when true, all actions are logged
"""
global _cache_path, _log_actions
_log_actions = log_actions
_cache_path = cache_path
try:
if not exists(cache_path):
makedirs(cache_path)
except Exception as e:
cprint(e, 'red')
def write(filename, data):
"""
Pickles a file and writes it to the cache.
Keyword Arguments:
- filename: name of the file to write to
- data: object to cache
"""
if _log_actions:
cprint('Writing to cache: "{}"'.format(filename), 'green')
joblib.dump(data, join(_cache_path, filename))
def write_plain(filename, data, add_extension=True):
"""
Simply writes the textual data to a file.
"""
if _log_actions:
cprint('Writing to cache (plain): "{}"'.format(filename), 'green')
if add_extension:
filename += '.json'
with open(join(_cache_path, filename), 'w') as f:
f.write(data)
def write_dict_json(filename, data, add_extension=True):
"""
Writes a dictionary to file using JSON format.
"""
if _log_actions:
cprint('Writing to cache (json): "{}"'.format(filename), 'green')
json_string = json.dumps(data, sort_keys=False, indent=4)
if add_extension:
filename += '.json'
with open(join(_cache_path, filename), 'w') as f:
f.write(json_string)
def read(filename):
"""
Reads a file from the cache and unpickles it.
Keyword Arguments:
- filename: name of the file to read
Returns:
- data: unpickled object
"""
if _log_actions:
cprint('Loading from cache: "{}"'.format(filename), 'green')
return joblib.load(join(_cache_path, filename))
def read_multiple(filenames):
"""
Reads multiple file from the cache and unpickles them.
Keyword Arguments:
- filenames: names of the files to read
Returns:
- result: unpickled object
- success_files: list of successful filenames
- errors: filenames for which exceptions happened
"""
result = []
success_files = []
errors = []
for f in filenames:
try:
result.append(read(f))
success_files.append(f)
except Exception as e:
cprint(f'Loading {f} failed!', 'red')
cprint(e, 'red')
errors.append(f)
return result, success_files, errors
def read_plain(filename):
"""
Reads a file from the cache and unpickles it.
Keyword Arguments:
- filename: name of the file to read
Returns:
- data: unpickled object
"""
if _log_actions:
cprint('Loading from cache: "{}"'.format(filename), 'green')
return Path(join(_cache_path, filename)).read_text()
def delete(filename):
"""
Removes all files from the cache that have names starting with filename.
"""
deleted = 0
errors = 0
for f in entries():
try:
if f.startswith(filename):
remove(join(_cache_path, f))
deleted += 1
except:
cprint(f'Cannot remove from cache: {filename}', 'red')
errors += 1
cprint(f'Removed from cache all files starting with {filename}', 'green')
msg = f'Removed {deleted} files, {errors} errors'
cprint(msg, 'yellow')
return {
'type': 'success' if errors == 0 else 'error',
'msg': msg
}
def delete_all_clf_projs():
"""
Deletes all classifier projections
"""
deleted = 0
errors = 0
for f in entries():
try:
if '__clf_proj_' in f:
remove(join(_cache_path, f))
deleted += 1
except:
cprint(f'Cannot remove from cache: {f}', 'red')
errors += 1
cprint(f'Removed from cache all classifier projections', 'green')
msg = f'Removed {deleted} files, {errors} errors'
cprint(msg, 'yellow')
return {
'type': 'success' if errors == 0 else 'error',
'msg': msg
}
def clear():
"""
Deletes the cache.
"""
cprint('Clearing cache', 'yellow')
shutil.rmtree(_cache_path, ignore_errors=True)
def entries():
"""
Lists all files in the cache.
Returns:
- list of all file names in the cache directory
"""
return [f for f in listdir(_cache_path) if isfile(join(_cache_path, f))]
def content():
"""
Returns all .json files in the cache to allow showing what
classifiers etc. have been trained so far.
Returns:
- a dictionary containing all files' contents
"""
cached_files = entries()
json_files = [f for f in cached_files if f.endswith('_args.json')]
datasets = []
classifiers = []
projections = []
classifier_projections = []
for f in json_files:
try:
filepath = join(_cache_path, f)
contents = Path(filepath).read_text()
json_dict = {
'file': f,
'args': json.loads(contents)
}
if '__proj_' in f:
projections.append(json_dict)
elif '__clf_proj_' in f:
classifier_projections.append(json_dict)
elif '__clf_' in f:
# send scores for cached classifications
score_file = f.replace('_args.json', '_scores.json')
scores = Path(join(_cache_path, score_file)).read_text()
json_dict['scores'] = json.loads(scores)
classifiers.append(json_dict)
elif f.startswith('data_'):
datasets.append(json_dict)
except Exception as e:
cprint(
f'Error: Some related files may be missing for file {f}, check if you copied files correctly or run you jobs again!', 'red')
cprint(e, 'red')
return {
'datasets': datasets,
'classifiers': classifiers,
'projections': projections,
'classifier_projections': classifier_projections
}
| 26.352697 | 140 | 0.597859 |
7b00a8aae5f5c462bd8742df1743968940cbb675 | 8,123 | py | Python | training/data/sampler.py | jpjuvo/PANDA-challenge-raehmae | 5748cd23f18e2dd36d56918dcee495b822d2a5cd | [
"MIT"
] | null | null | null | training/data/sampler.py | jpjuvo/PANDA-challenge-raehmae | 5748cd23f18e2dd36d56918dcee495b822d2a5cd | [
"MIT"
] | null | null | null | training/data/sampler.py | jpjuvo/PANDA-challenge-raehmae | 5748cd23f18e2dd36d56918dcee495b822d2a5cd | [
"MIT"
] | 1 | 2021-04-20T04:37:47.000Z | 2021-04-20T04:37:47.000Z | import torch
import os
import numpy as np
import random
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from data.tileimages import *
from data.multitask import *
import fastai
from fastai.vision import *
| 40.819095 | 111 | 0.519266 |
7b0127f18652a5554693ea5f44876da7eca25e09 | 281 | py | Python | ABC/194/a.py | fumiyanll23/AtCoder | 362ca9fcacb5415c1458bc8dee5326ba2cc70b65 | [
"MIT"
] | null | null | null | ABC/194/a.py | fumiyanll23/AtCoder | 362ca9fcacb5415c1458bc8dee5326ba2cc70b65 | [
"MIT"
] | null | null | null | ABC/194/a.py | fumiyanll23/AtCoder | 362ca9fcacb5415c1458bc8dee5326ba2cc70b65 | [
"MIT"
] | null | null | null |
if __name__ == '__main__':
main()
| 14.05 | 36 | 0.441281 |
7b0281efeed9226063f79960fa17b68b47603613 | 2,578 | py | Python | test/graph/test_from_ase.py | yhtang/GraphDot | 3d5ed4fbb2f6912052baa42780b436da76979691 | [
"BSD-3-Clause-LBNL"
] | 9 | 2020-02-14T18:07:39.000Z | 2021-12-15T12:07:31.000Z | test/graph/test_from_ase.py | yhtang/graphdot | 3d5ed4fbb2f6912052baa42780b436da76979691 | [
"BSD-3-Clause-LBNL"
] | 3 | 2020-03-19T19:07:26.000Z | 2021-02-24T06:08:51.000Z | test/graph/test_from_ase.py | yhtang/graphdot | 3d5ed4fbb2f6912052baa42780b436da76979691 | [
"BSD-3-Clause-LBNL"
] | 3 | 2019-10-17T06:11:18.000Z | 2021-05-07T11:56:33.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from ase.build import molecule
from ase.lattice.cubic import SimpleCubic
from graphdot.graph import Graph
from graphdot.graph.adjacency import AtomicAdjacency
adjacencies = [
AtomicAdjacency(shape='tent1', length_scale=1.0, zoom=1),
AtomicAdjacency(shape='tent2', length_scale='vdw_radius', zoom=1),
AtomicAdjacency(
shape='gaussian', length_scale='covalent_radius_pyykko', zoom=1.5
),
AtomicAdjacency(shape='compactbell3,2'),
]
| 38.477612 | 79 | 0.660978 |
7b02e549c87583bcf554b71f024544d0bb0dac0a | 2,735 | py | Python | FEM/src/FemIo.py | BartSiwek/Neurotransmitter2D | 200c1b7e74de0786b1bb52d456e227f9d64cebc6 | [
"MIT"
] | null | null | null | FEM/src/FemIo.py | BartSiwek/Neurotransmitter2D | 200c1b7e74de0786b1bb52d456e227f9d64cebc6 | [
"MIT"
] | null | null | null | FEM/src/FemIo.py | BartSiwek/Neurotransmitter2D | 200c1b7e74de0786b1bb52d456e227f9d64cebc6 | [
"MIT"
] | null | null | null | import string
import scipy
import PslgIo, ElementAwarePslg | 27.35 | 102 | 0.571115 |
7b04376d12aae979563b6b36b34ff0b76d2dcff0 | 3,466 | py | Python | dianna/__init__.py | cffbots/dianna | 21e272dce2862747a5109341b622798f667d9248 | [
"Apache-2.0"
] | null | null | null | dianna/__init__.py | cffbots/dianna | 21e272dce2862747a5109341b622798f667d9248 | [
"Apache-2.0"
] | null | null | null | dianna/__init__.py | cffbots/dianna | 21e272dce2862747a5109341b622798f667d9248 | [
"Apache-2.0"
] | null | null | null | """
DIANNA: Deep Insight And Neural Network Analysis.
Modern scientific challenges are often tackled with (Deep) Neural Networks (DNN).
Despite their high predictive accuracy, DNNs lack inherent explainability. Many DNN
users, especially scientists, do not harvest DNNs power because of lack of trust and
understanding of their working.
Meanwhile, the eXplainable AI (XAI) methods offer some post-hoc interpretability and
insight into the DNN reasoning. This is done by quantifying the relevance of individual
features (image pixels, words in text, etc.) with respect to the prediction. These
"relevance heatmaps" indicate how the network has reached its decision directly in the
input modality (images, text, speech etc.) of the data.
There are many Open Source Software (OSS) implementations of these methods, alas,
supporting a single DNN format and the libraries are known mostly by the AI experts.
The DIANNA library supports the best XAI methods in the context of scientific usage
providing their OSS implementation based on the ONNX standard and demonstrations on
benchmark datasets. Representing visually the captured knowledge by the AI system can
become a source of (scientific) insights.
See https://github.com/dianna-ai/dianna
"""
import logging
from onnx_tf.backend import prepare # To avoid Access Violation on Windows with SHAP
from . import methods
from . import utils
logging.getLogger(__name__).addHandler(logging.NullHandler())
__author__ = "DIANNA Team"
__email__ = "dianna-ai@esciencecenter.nl"
__version__ = "0.2.1"
def explain_image(model_or_function, input_data, method, labels=(1,), **kwargs):
"""
Explain an image (input_data) given a model and a chosen method.
Args:
model_or_function (callable or str): The function that runs the model to be explained _or_
the path to a ONNX model on disk.
input_data (np.ndarray): Image data to be explained
method (string): One of the supported methods: RISE, LIME or KernelSHAP
labels (tuple): Labels to be explained
Returns:
One heatmap (2D array) per class.
"""
explainer = _get_explainer(method, kwargs)
explain_image_kwargs = utils.get_kwargs_applicable_to_function(explainer.explain_image, kwargs)
return explainer.explain_image(model_or_function, input_data, labels, **explain_image_kwargs)
def explain_text(model_or_function, input_data, method, labels=(1,), **kwargs):
"""
Explain text (input_data) given a model and a chosen method.
Args:
model_or_function (callable or str): The function that runs the model to be explained _or_
the path to a ONNX model on disk.
input_data (string): Text to be explained
method (string): One of the supported methods: RISE or LIME
labels (tuple): Labels to be explained
Returns:
List of (word, index of word in raw text, importance for target class) tuples.
"""
explainer = _get_explainer(method, kwargs)
explain_text_kwargs = utils.get_kwargs_applicable_to_function(explainer.explain_text, kwargs)
return explainer.explain_text(model_or_function, input_data, labels, **explain_text_kwargs)
| 42.790123 | 99 | 0.742643 |
7b0494a9e41efc09a0891a5e4ffe2bfd4e84d0d3 | 2,925 | py | Python | printer/gpio.py | 3DRPP/printer | 7826c7c82a5331d916d8ea038bd3a44aff6e35b5 | [
"MIT"
] | null | null | null | printer/gpio.py | 3DRPP/printer | 7826c7c82a5331d916d8ea038bd3a44aff6e35b5 | [
"MIT"
] | null | null | null | printer/gpio.py | 3DRPP/printer | 7826c7c82a5331d916d8ea038bd3a44aff6e35b5 | [
"MIT"
] | null | null | null | try:
import RPi.GPIO as GPIO
except RuntimeError:
print("Error importing RPi.GPIO! This is probably because you need "
"superuser privileges. You can achieve this by using 'sudo' to run "
"your script")
gpios = [7, 8, 10, 11, 12, 13, 15, 16, 18, 19, 21, 22, 23, 24, 26, 29,
31, 32, 33, 35, 36, 37, 38, 40]
header = Header()
try:
GPIO.setmode(GPIO.BOARD)
for id in gpios:
print('Initializing gpio ' + str(id))
GPIO.setup(id, GPIO.OUT, initial=GPIO.LOW)
print('Initialized GPIOs')
except:
print('Could not set GPIO mode to BOARD.')
| 29.545455 | 79 | 0.523419 |
7b04e005435865593cbdccc3f6d9e91235157df4 | 1,395 | py | Python | simple_joint_subscriber/scripts/joint_subscriber.py | itk-thrivaldi/thrivaldi_examples | 7c00ad4e1b4fa4b0f27c88e8c0147f8105b042fd | [
"Apache-2.0"
] | null | null | null | simple_joint_subscriber/scripts/joint_subscriber.py | itk-thrivaldi/thrivaldi_examples | 7c00ad4e1b4fa4b0f27c88e8c0147f8105b042fd | [
"Apache-2.0"
] | 1 | 2017-12-14T14:04:24.000Z | 2017-12-14T16:58:05.000Z | simple_joint_subscriber/scripts/joint_subscriber.py | itk-thrivaldi/thrivaldi_examples | 7c00ad4e1b4fa4b0f27c88e8c0147f8105b042fd | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import rospy # For all things ros with python
# JointState is defined in sensor_msgs.msg
# If you know a message but not where it is
# call rosmsg info MSGNAME from the terminal
from sensor_msgs.msg import JointState
# This tutorial takes heavily from
# http://wiki.ros.org/ROS/Tutorials/WritingPublisherSubscriber(python)
# In this example we make a simple subscriber that listens for JointState
# messages, and prints them. Uses a functional approach.
def message_callback(msg):
"""This function is called on the message every time a message arrives."""
rospy.loginfo("Joint position received:"+str(msg.position))
def joint_listener():
"""Blocking function that sets up node, subscription and waits for
messages."""
# Start ros node
rospy.init_node("joint_listener", anonymous=True)
# Tell the central command we want to hear about /joint_states
rospy.Subscriber("/joint_states", # Topic we subscribe to
JointState, # message type that topic has
message_callback) # function to call when message arrives
rospy.spin()
# If this script is run alone, not just imported:
if __name__ == "__main__":
joint_listener()
# Ensure that the python script is executable by running:
# chmod +x joint_subscriber.py
# Call this script by running:
# rosrun joint_subscriber joint_subscriber.py
| 34.875 | 79 | 0.7319 |
7b0521366a87b5722240ee07005b1b01f21cf17a | 1,291 | py | Python | src/lab4_cam/src/sawyercam.py | citronella3alain/baxterDraw | c050254e8b4b8d4f5087e8743a34289844138e0c | [
"MIT"
] | null | null | null | src/lab4_cam/src/sawyercam.py | citronella3alain/baxterDraw | c050254e8b4b8d4f5087e8743a34289844138e0c | [
"MIT"
] | null | null | null | src/lab4_cam/src/sawyercam.py | citronella3alain/baxterDraw | c050254e8b4b8d4f5087e8743a34289844138e0c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Aran Sena 2018
#
# Code example only, provided without guarantees
#
# Example for how to get both cameras streaming together
#
####
import rospy
from intera_core_msgs.srv._IOComponentCommandSrv import IOComponentCommandSrv
from intera_core_msgs.msg._IOComponentCommand import IOComponentCommand
if __name__ == '__main__':
rospy.init_node('camera_command_client')
camera_command_client(camera='head_camera', status=True)
camera_command_client(camera='right_hand_camera', status=True)
| 33.102564 | 110 | 0.655306 |
7b061600468274d3cebd155c75fff8f1303d7256 | 12,279 | py | Python | citydata/crime.py | JackKirbyCook82/neighborhood | 3805fa11890e121ffadcaaf8f02323434cb68519 | [
"MIT"
] | null | null | null | citydata/crime.py | JackKirbyCook82/neighborhood | 3805fa11890e121ffadcaaf8f02323434cb68519 | [
"MIT"
] | null | null | null | citydata/crime.py | JackKirbyCook82/neighborhood | 3805fa11890e121ffadcaaf8f02323434cb68519 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun May 2 2021
@name: CityData CensusTract Download Application
@author: Jack Kirby Cook
"""
import sys
import os.path
import warnings
import logging
import regex as re
MAIN_DIR = os.path.dirname(os.path.realpath(__file__))
MODULE_DIR = os.path.abspath(os.path.join(MAIN_DIR, os.pardir))
ROOT_DIR = os.path.abspath(os.path.join(MODULE_DIR, os.pardir))
RESOURCE_DIR = os.path.join(ROOT_DIR, "resources")
SAVE_DIR = os.path.join(ROOT_DIR, "save")
DRIVER_FILE = os.path.join(RESOURCE_DIR, "chromedriver.exe")
REPOSITORY_DIR = os.path.join(SAVE_DIR, "citydata")
QUEUE_FILE = os.path.join(RESOURCE_DIR, "zipcodes.zip.csv")
REPORT_FILE = os.path.join(SAVE_DIR, "citydata", "censustracts.csv")
if ROOT_DIR not in sys.path:
sys.path.append(ROOT_DIR)
from utilities.iostream import InputParser
from utilities.dataframes import dataframe_parser
from webscraping.webtimers import WebDelayer
from webscraping.webdrivers import WebDriver
from webscraping.weburl import WebURL
from webscraping.webpages import WebBrowserPage
from webscraping.webpages import BadRequestError
from webscraping.webpages import WebContents
from webscraping.webloaders import WebLoader
from webscraping.webquerys import WebQuery, WebDatasets
from webscraping.webqueues import WebScheduler
from webscraping.webdownloaders import WebDownloader, CacheMixin, AttemptsMixin
from webscraping.webdata import WebClickable, WebText, WebInput, WebSelect
from webscraping.webactions import WebScroll, WebMoveTo, WebMoveToClick, WebMoveToClickSelect, WebMoveToClickFillSend
__version__ = "1.0.0"
__author__ = "Jack Kirby Cook"
__all__ = ["CityData_WebDelayer", "CityData_WebDownloader", "CityData_WebScheduler"]
__copyright__ = "Copyright 2021, Jack Kirby Cook"
__license__ = ""
LOGGER = logging.getLogger(__name__)
warnings.filterwarnings("ignore")
DATASETS = {"violentcrime": "Crime - Violent crime index", "propertycrime": "Crime - Property crime index", "airpollution": "Air pollution - Air Quality Index (AQI)"}
GEOGRAPHYS = ("state", "county", "tract", "blockgroup")
dataset_select_xpath = r"//select[contains(@id, 'selmapOSM')]"
zipcode_click_xpath = r"//div[@id='searchOSM']//div[contains(@id, 'sboxOuter')]//b"
zipcode_input_xpath = r"//div[@id='searchOSM']//div[contains(@id, 'sboxOuter')]//input[contains(@id, 's2id')]"
zipcode_xpath = r"//div[@id='searchOSM']//div[contains(@id, 'sboxOuter')]//span[@class='select2-chosen']"
geography_xpath = r"//div[@id='legendBOX']/div[@id='mapOSM_legend']"
canvas_xpath = r"//div[@id='mapOSM']//canvas"
fullscreen_xpath = r"//div[@id='mapOSM']//a[@title='Full Screen']"
zoomin_xpath = r"//div[@id='mapOSM']//a[@title='Zoom in']"
zoomout_xpath = r"//div[@id='mapOSM']//a[@title='Zoom out']"
dataset_select_webloader = WebLoader(xpath=dataset_select_xpath)
zipcode_click_webloader = WebLoader(xpath=zipcode_click_xpath)
zipcode_input_webloader = WebLoader(xpath=zipcode_input_xpath)
zipcode_webloader = WebLoader(xpath=zipcode_xpath)
geography_webloader = WebLoader(xpath=geography_xpath)
canvas_webloader = WebLoader(xpath=canvas_xpath)
fullscreen_webloader = WebLoader(xpath=fullscreen_xpath)
zoomin_webloader = WebLoader(xpath=zoomin_xpath)
zoomout_webloader = WebLoader(xpath=zoomout_xpath)
zipcode_parser = lambda x: re.findall("^\d{5}(?=\, [A-Z]{2}$)", str(x).strip())[0]
state_parser = lambda x: re.findall("(?<=^\d{5}\, )[A-Z]{2}$", str(x).strip())[0]
geography_parser = lambda x: {"block groups": "blockgroup", "tracts": "tract", "counties": "county", "states": "state"}[re.findall("(?<=Displaying\: )[a-z ]+(?=\.)", str(x).strip())[0]]
geography_pattern = "(?P<blockgroup>(?<=Census Block Group )[\.0-9]+)|(?P<tract>(?<=Census Tract )[\.0-9]+)|(?P<state>(?<=\, )[A-Z]{2}|(?<=\()[A-Z]{2}(?=\)))|(?P<county>[a-zA-Z ]+ County(?=\, ))"
def main(*args, **kwargs):
delayer = CityData_WebDelayer("constant", wait=3)
scheduler = CityData_WebScheduler(*args, file=REPORT_FILE, **kwargs)
downloader = CityData_WebDownloader(*args, repository=REPOSITORY_DIR, **kwargs)
queue = scheduler(*args, **kwargs)
downloader(*args, queue=queue, delayer=delayer, **kwargs)
LOGGER.info(str(downloader))
for results in downloader.results:
LOGGER.info(str(results))
if not bool(downloader):
raise downloader.error
if __name__ == "__main__":
sys.argv += ["state=CA", "city=Bakersfield", "dataset=violentcrime", "geography=tract"]
logging.basicConfig(level="INFO", format="[%(levelname)s, %(threadName)s]: %(message)s")
inputparser = InputParser(proxys={"assign": "=", "space": "_"}, parsers={}, default=str)
inputparser(*sys.argv[1:])
main(*inputparser.arguments, **inputparser.parameters)
| 47.964844 | 197 | 0.710807 |
7b072a958ac36c49b32339e29f7e4de28848fadd | 3,644 | py | Python | apportionpy/experimental/boundary.py | btror/apportionpy | 5b70dbeee4b197e41794bed061ea4a11f128d1c8 | [
"MIT"
] | null | null | null | apportionpy/experimental/boundary.py | btror/apportionpy | 5b70dbeee4b197e41794bed061ea4a11f128d1c8 | [
"MIT"
] | null | null | null | apportionpy/experimental/boundary.py | btror/apportionpy | 5b70dbeee4b197e41794bed061ea4a11f128d1c8 | [
"MIT"
] | null | null | null | import math
def estimate_lowest_divisor(method, divisor, populations, seats):
"""
Calculates the estimated lowest possible divisor.
:param method: The method used.
:type method: str
:param divisor: A working divisor in calculating fair shares.
:type divisor: float
:param populations: The populations for each state respectively.
:type populations: [float]
:param seats: The amount of seats to apportion.
:type seats: int
:return: An estimation of the lowest possible divisor.
"""
# The number of states to apportion to.
states = sum(populations)
# Initialize lists for fair shares and quotas.
quotas = [0] * states
fair_shares = [0] * states
# Keep track of the previous divisor calculated and lowest of them.
prev_divisor = 0
lowest_divisor = 0
# Estimator to use in predicting divisors.
estimator = 1000000000
counter = 0
while counter < 1000:
for i, population in enumerate(populations):
if divisor is None or population is None:
return None
quotas[i] = population / divisor
if method.upper() == "ADAM":
fair_shares[i] = math.ceil(quotas[i])
elif method.upper() == "WEBSTER":
fair_shares[i] = round(quotas[i])
elif method.upper() == "JEFFERSON":
fair_shares[i] = math.floor(quotas[i])
if sum(fair_shares) != seats:
estimator = estimator / 10
prev_divisor = divisor
divisor = lowest_divisor - estimator
else:
lowest_divisor = divisor
divisor = prev_divisor - estimator
if lowest_divisor == divisor:
break
counter += 1
return math.ceil(lowest_divisor * 1000) / 1000
def estimate_highest_divisor(method, divisor, populations, seats):
"""
Calculates the estimated highest possible divisor.
:param method: The method used.
:type method: str
:param divisor: A working divisor in calculating fair shares.
:type divisor: float
:param populations: The populations for each state respectively.
:type populations: [float]
:param seats: The amount of seats to apportion.
:type seats: int
:return: An estimation of the lowest possible divisor.
"""
# The number of states to apportion to.
states = sum(populations)
# Initialize lists for fair shares and quotas.
quotas = [0] * states
fair_shares = [0] * states
# Keep track of the previous divisor calculated and highest of them.
prev_divisor = 0
highest_divisor = 0
# Estimator to use in predicting divisors.
estimator = 1000000000
counter = 0
while counter < 1000:
for i, population in enumerate(populations):
if divisor is None or population is None:
return None
quotas[i] = population / divisor
if method.upper() == "ADAM":
fair_shares[i] = math.ceil(quotas[i])
elif method.upper() == "WEBSTER":
fair_shares[i] = round(quotas[i])
elif method.upper() == "JEFFERSON":
fair_shares[i] = math.floor(quotas[i])
if sum(fair_shares) != seats:
estimator = estimator / 10
prev_divisor = divisor
divisor = highest_divisor + estimator
else:
highest_divisor = divisor
divisor = prev_divisor - estimator
if highest_divisor == divisor:
break
counter += 1
return math.ceil(highest_divisor * 1000) / 1000
| 30.881356 | 72 | 0.611416 |
7b0bcb46e200df6f78d9fe78eb07f700564fadd3 | 4,084 | py | Python | csv_to_table.py | canary-for-cognition/multimodal-ml-framework | 379963e2815165b28a28c983d32dd17656fba9a9 | [
"MIT"
] | 1 | 2021-11-10T10:28:01.000Z | 2021-11-10T10:28:01.000Z | csv_to_table.py | canary-for-cognition/multimodal-ml-framework | 379963e2815165b28a28c983d32dd17656fba9a9 | [
"MIT"
] | null | null | null | csv_to_table.py | canary-for-cognition/multimodal-ml-framework | 379963e2815165b28a28c983d32dd17656fba9a9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
# import pylatex
from pylatex import Document, Section, Tabular, Math, Axis, Subsection
import pandas as pd
import sys
import os
main()
| 51.696203 | 131 | 0.578355 |
7b0d0466817dc17050d1085421ef9276feb2fb86 | 2,803 | py | Python | torch_audioset/vggish/model.py | Guillaume-oso/torch_audioset | e8852c53becef811784754a2de9c4617d8db2156 | [
"MIT"
] | 26 | 2020-03-25T21:19:33.000Z | 2022-02-01T15:14:29.000Z | torch_audioset/vggish/model.py | Guillaume-oso/torch_audioset | e8852c53becef811784754a2de9c4617d8db2156 | [
"MIT"
] | 7 | 2020-05-31T07:57:05.000Z | 2021-12-23T10:16:55.000Z | torch_audioset/vggish/model.py | Guillaume-oso/torch_audioset | e8852c53becef811784754a2de9c4617d8db2156 | [
"MIT"
] | 8 | 2020-10-27T16:22:55.000Z | 2022-03-28T22:48:07.000Z | import os.path as osp
import yaml
import torch.nn as nn
from torch import hub
__all__ = ['get_vggish', 'vggish_category_metadata']
model_urls = {
'vggish': "https://github.com/w-hc/vggish/releases/download/v0.1/vggish_orig.pth",
'vggish_with_classifier': "https://github.com/w-hc/vggish/releases/download/v0.1/vggish_with_classifier.pth"
}
def get_vggish(with_classifier=False, pretrained=True):
if with_classifier:
model = VGGishClassify()
url = model_urls['vggish_with_classifier']
else:
model = VGGish()
url = model_urls['vggish']
if pretrained:
state_dict = hub.load_state_dict_from_url(url, progress=True)
model.load_state_dict(state_dict)
return model
| 29.197917 | 112 | 0.576882 |
7b0d272861a3704f10e9a92801a2d879819c1a06 | 12,584 | py | Python | common/cuchemcommon/data/helper/chembldata.py | dorukozturk/cheminformatics | c0fa66dd4f4e6650d7286ae2be533c66b7a2b270 | [
"Apache-2.0"
] | null | null | null | common/cuchemcommon/data/helper/chembldata.py | dorukozturk/cheminformatics | c0fa66dd4f4e6650d7286ae2be533c66b7a2b270 | [
"Apache-2.0"
] | null | null | null | common/cuchemcommon/data/helper/chembldata.py | dorukozturk/cheminformatics | c0fa66dd4f4e6650d7286ae2be533c66b7a2b270 | [
"Apache-2.0"
] | null | null | null | import os
import warnings
import pandas
import sqlite3
import logging
from typing import List
from dask import delayed, dataframe
from contextlib import closing
from cuchemcommon.utils.singleton import Singleton
from cuchemcommon.context import Context
warnings.filterwarnings("ignore", message=r"deprecated", category=FutureWarning)
logger = logging.getLogger(__name__)
BATCH_SIZE = 100000
ADDITIONAL_FEILD = ['canonical_smiles', 'transformed_smiles']
IMP_PROPS = [
'alogp',
'aromatic_rings',
'full_mwt',
'psa',
'rtb']
IMP_PROPS_TYPE = [pandas.Series([], dtype='float64'),
pandas.Series([], dtype='int64'),
pandas.Series([], dtype='float64'),
pandas.Series([], dtype='float64'),
pandas.Series([], dtype='int64')]
ADDITIONAL_FEILD_TYPE = [pandas.Series([], dtype='object'),
pandas.Series([], dtype='object')]
SQL_MOLECULAR_PROP = """
SELECT md.molregno as molregno, md.chembl_id, cp.*, cs.*
FROM compound_properties cp,
compound_structures cs,
molecule_dictionary md
WHERE cp.molregno = md.molregno
AND md.molregno = cs.molregno
AND md.molregno in (%s)
"""
# DEPRECATED. Please add code to DAO classes.
| 39.202492 | 101 | 0.565559 |
7b0dd834a233f033a4537593bd1c545e5c4ea02a | 769 | py | Python | tests/app/users/migrations/0001_initial.py | silverlogic/djangorestframework-timed-auth-token | 0884559c6b5e4021d7a8830ec5dd60f2799d0ee4 | [
"MIT"
] | 34 | 2015-05-22T00:02:49.000Z | 2021-12-29T11:42:31.000Z | tests/app/users/migrations/0001_initial.py | silverlogic/djangorestframework-timed-auth-token | 0884559c6b5e4021d7a8830ec5dd60f2799d0ee4 | [
"MIT"
] | 6 | 2015-05-22T00:04:50.000Z | 2021-06-10T17:49:38.000Z | tests/app/users/migrations/0001_initial.py | silverlogic/djangorestframework-timed-auth-token | 0884559c6b5e4021d7a8830ec5dd60f2799d0ee4 | [
"MIT"
] | 6 | 2015-05-25T17:44:50.000Z | 2020-12-05T14:48:53.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
| 29.576923 | 114 | 0.574772 |
7b0e27fa7adc3752fa6c840a8e64f5d20d45801c | 370 | py | Python | PyObjCTest/test_nsmachport.py | Khan/pyobjc-framework-Cocoa | f8b015ea2a72d8d78be6084fb12925c4785b8f1f | [
"MIT"
] | 132 | 2015-01-01T10:02:42.000Z | 2022-03-09T12:51:01.000Z | mac/pyobjc-framework-Cocoa/PyObjCTest/test_nsmachport.py | mba811/music-player | 7998986b34cfda2244ef622adefb839331b81a81 | [
"BSD-2-Clause"
] | 6 | 2015-01-06T08:23:19.000Z | 2019-03-14T12:22:06.000Z | mac/pyobjc-framework-Cocoa/PyObjCTest/test_nsmachport.py | mba811/music-player | 7998986b34cfda2244ef622adefb839331b81a81 | [
"BSD-2-Clause"
] | 27 | 2015-02-23T11:51:43.000Z | 2022-03-07T02:34:18.000Z | from PyObjCTools.TestSupport import *
import objc
import Foundation
if hasattr(Foundation, 'NSMachPort'):
if __name__ == '__main__':
main( )
| 23.125 | 47 | 0.632432 |
7b13d630c689e01a72a9bc979b93bb26fb000d70 | 7,125 | py | Python | harmony.py | cyrushadavi/home_automation | dcf1dcc688b5021a0c16e68e372e38a28d819f3d | [
"MIT"
] | null | null | null | harmony.py | cyrushadavi/home_automation | dcf1dcc688b5021a0c16e68e372e38a28d819f3d | [
"MIT"
] | null | null | null | harmony.py | cyrushadavi/home_automation | dcf1dcc688b5021a0c16e68e372e38a28d819f3d | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
"""Command line utility for querying the Logitech Harmony."""
import argparse
import logging
import json
import sys
import auth
import client as harmony_client
LOGGER = logging.getLogger(__name__)
def login_to_logitech(args):
"""Logs in to the Logitech service.
Args:
args: argparse arguments needed to login.
Returns:
Session token that can be used to log in to the Harmony device.
"""
token = auth.login(args.email, args.password)
if not token:
sys.exit('Could not get token from Logitech server.')
session_token = auth.swap_auth_token(
args.harmony_ip, args.harmony_port, token)
if not session_token:
sys.exit('Could not swap login token for session token.')
return session_token
def pprint(obj):
"""Pretty JSON dump of an object."""
print(json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': ')))
def get_client(args):
"""Connect to the Harmony and return a Client instance."""
token = login_to_logitech(args)
client = harmony_client.create_and_connect_client(
args.harmony_ip, args.harmony_port, token)
return client
def show_config(args):
"""Connects to the Harmony and prints its configuration."""
client = get_client(args)
pprint(client.get_config())
client.disconnect(send_close=True)
return 0
def show_current_activity(args):
"""Connects to the Harmony and prints the current activity block
from the config."""
client = get_client(args)
config = client.get_config()
current_activity_id = client.get_current_activity()
activity = [x for x in config['activity'] if int(x['id']) == current_activity_id][0]
pprint(activity)
client.disconnect(send_close=True)
return 0
def sync(args):
"""Connects to the Harmony and syncs it.
"""
client = get_client(args)
client.sync()
client.disconnect(send_close=True)
return 0
def turn_off(args):
"""Sends a 'turn off' command to the harmony, which is the activity
'-1'."""
args.activity = '-1'
start_activity(args)
def start_activity(args):
"""Connects to the Harmony and switches to a different activity,
specified as an id or label."""
client = get_client(args)
config = client.get_config()
print args
activity_off = False
activity_numeric = False
activity_id = None
activity_label = None
try:
activity_off = float(args.activity) == -1
activity_id = int(float(args.activity))
activity_numeric = True
except ValueError:
activity_off = args.activity.lower() == 'turn off'
activity_label = str(args.activity)
if activity_off:
activity = [{'id': -1, 'label': 'Turn Off'}]
else:
activity = [x for x in config['activity']
if (activity_numeric and int(x['id']) == activity_id)
or x['label'].lower() == activity_label
]
if not activity:
LOGGER.error('could not find activity: ' + args.activity)
client.disconnect(send_close=True)
return 1
activity = activity[0]
client.start_activity(int(activity['id']))
LOGGER.info("started activity: '%s' of id: '%s'" % (activity['label'], activity['id']))
client.disconnect(send_close=True)
return 0
def send_command(args):
"""Connects to the Harmony and send a simple command."""
client = get_client(args)
config = client.get_config()
device = args.device if args.device_id is None else args.device_id
device_numeric = None
try:
device_numeric = int(float(device))
except ValueError:
pass
device_config = [x for x in config['device'] if device.lower() == x['label'].lower() or
((device_numeric is not None) and device_numeric == int(x['id']))]
if not device_config:
LOGGER.error('could not find device: ' + device)
client.disconnect(send_close=True)
return 1
device_id = int(device_config[0]['id'])
client.send_command(device_id, args.command)
client.disconnect(send_close=True)
return 0
def main():
"""Main method for the script."""
parser = argparse.ArgumentParser(
description='pyharmony utility script',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Required flags go here.
required_flags = parser.add_argument_group('required arguments')
required_flags.add_argument('--email', required=True, help=(
'Logitech username in the form of an email address.'))
required_flags.add_argument(
'--password', required=True, help='Logitech password.')
required_flags.add_argument(
'--harmony_ip', required=True, help='IP Address of the Harmony device.')
# Flags with defaults go here.
parser.add_argument('--harmony_port', default=5222, type=int, help=(
'Network port that the Harmony is listening on.'))
loglevels = dict((logging.getLevelName(level), level)
for level in [10, 20, 30, 40, 50])
parser.add_argument('--loglevel', default='INFO', choices=loglevels.keys(),
help='Logging level to print to the console.')
subparsers = parser.add_subparsers()
show_config_parser = subparsers.add_parser(
'show_config', help='Print the Harmony device configuration.')
show_config_parser.set_defaults(func=show_config)
show_activity_parser = subparsers.add_parser(
'show_current_activity', help='Print the current activity config.')
show_activity_parser.set_defaults(func=show_current_activity)
start_activity_parser = subparsers.add_parser(
'start_activity', help='Switch to a different activity.')
start_activity_parser.add_argument(
'activity', help='Activity to switch to, id or label.')
start_activity_parser.set_defaults(func=start_activity)
sync_parser = subparsers.add_parser(
'sync', help='Sync the harmony.')
sync_parser.set_defaults(func=sync)
turn_off_parser = subparsers.add_parser(
'turn_off', help='Send a turn off command to the harmony.')
turn_off_parser.set_defaults(func=turn_off)
command_parser = subparsers.add_parser(
'send_command', help='Send a simple command.')
command_parser.add_argument('--command',
help='IR Command to send to the device.', required=True)
device_arg_group = command_parser.add_mutually_exclusive_group(required=True)
device_arg_group.add_argument('--device_id',
help='Specify the device id to which we will send the command.')
device_arg_group.add_argument('--device',
help='Specify the device id or label to which we will send the command.')
command_parser.set_defaults(func=send_command)
args = parser.parse_args()
logging.basicConfig(
level=loglevels[args.loglevel],
format='%(levelname)s:\t%(name)s\t%(message)s')
sys.exit(args.func(args))
if __name__ == '__main__':
main()
| 29.442149 | 107 | 0.663719 |
7b15f666dd8b6c5e2030f1efa5c2aa16458ac78c | 14,567 | py | Python | workshop/static/Reliability/300_Testing_for_Resiliency_of_EC2_RDS_and_S3/Code/Python/WebAppLambda/deploy_web_lambda.py | sykang808/aws-well-architected-labs-kor | da021a9f7501088f871b08560673deac4488eef4 | [
"Apache-2.0"
] | null | null | null | workshop/static/Reliability/300_Testing_for_Resiliency_of_EC2_RDS_and_S3/Code/Python/WebAppLambda/deploy_web_lambda.py | sykang808/aws-well-architected-labs-kor | da021a9f7501088f871b08560673deac4488eef4 | [
"Apache-2.0"
] | null | null | null | workshop/static/Reliability/300_Testing_for_Resiliency_of_EC2_RDS_and_S3/Code/Python/WebAppLambda/deploy_web_lambda.py | sykang808/aws-well-architected-labs-kor | da021a9f7501088f871b08560673deac4488eef4 | [
"Apache-2.0"
] | null | null | null | #
# MIT No Attribution
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import print_function
from botocore.exceptions import ClientError
import os
import sys
import logging
import traceback
import boto3
import json
LOG_LEVELS = {'CRITICAL': 50, 'ERROR': 40, 'WARNING': 30, 'INFO': 20, 'DEBUG': 10}
stackname = 'WebServersForResiliencyTesting'
AWS_REGION = 'us-east-2'
ARCH_TO_AMI_NAME_PATTERN = {
# Architecture: (pattern, owner)
"PV64": ("amzn2-ami-pv*.x86_64-ebs", "amazon"),
"HVM64": ("amzn2-ami-hvm-*-x86_64-gp2", "amazon"),
"HVMG2": ("amzn2-ami-graphics-hvm-*x86_64-ebs*", "679593333241")
}
if __name__ == "__main__":
logger = init_logging()
event = {
'vpc': {
'stackname': 'ResiliencyVPC',
'status': 'CREATE_COMPLETE'
},
'rds': {
'stackname': 'MySQLforResiliencyTesting',
'status': 'CREATE_COMPLETE'
},
'log_level': 'DEBUG',
'region_name': 'ap-northeast-2',
'cfn_region': 'us-east-2',
'cfn_bucket': 'aws-well-architected-labs-ohio',
'folder': 'Reliability/',
'boot_bucket': 'aws-well-architected-labs-ohio',
'boot_prefix': 'Reliability/',
'boot_object': 'bootstrapARC327.sh',
'websiteimage': 'https://s3.us-east-2.amazonaws.com/arc327-well-architected-for-reliability/Cirque_of_the_Towers.jpg',
'workshop': 'LondonSummit'
}
os.environ['log_level'] = os.environ.get('log_level', event['log_level'])
logger = setup_local_logging(logger, os.environ['log_level'])
# Add default level of debug for local execution
lambda_handler(event, 0)
| 39.800546 | 139 | 0.674126 |
7b16d187420b13711f7fff210fdd319f14807224 | 483 | py | Python | URI/1024.py | leilaapsilva/BabySteps | 32b1e6439fa3be49c93a3cae0b4fbd0f03a713be | [
"MIT"
] | 37 | 2020-10-01T03:50:42.000Z | 2021-11-23T00:49:51.000Z | URI/1024.py | leilaapsilva/BabySteps | 32b1e6439fa3be49c93a3cae0b4fbd0f03a713be | [
"MIT"
] | 27 | 2020-10-03T23:16:13.000Z | 2021-11-19T19:53:01.000Z | URI/1024.py | leilaapsilva/BabySteps | 32b1e6439fa3be49c93a3cae0b4fbd0f03a713be | [
"MIT"
] | 97 | 2020-10-01T11:39:01.000Z | 2021-11-01T00:30:53.000Z | alpha = "abcdefghijklmnopqrstuvwxyz"
n = int(raw_input())
for i in xrange(n):
word = raw_input()
aux_word = ""
first_part = ""
second_part = ""
for j in xrange(len(word)-1, -1, -1):
if(word[j].lower() in alpha):
aux_word += chr(ord(word[j]) + 3)
else:
aux_word += word[j]
middle = (len(word)/2)
first_part = aux_word[0:middle]
for k in xrange((len(aux_word)/2), len(aux_word)):
second_part += chr(ord(aux_word[k]) -1)
print first_part + second_part | 21 | 51 | 0.6294 |
7b17163e98fca69e6d9d2a2ecd44f5b5e78cfd5c | 6,095 | py | Python | Coursework 2/nn_preprocess.py | martinferianc/Pattern-Recognition-EIE4 | 412d437582b236dadd81c0621935f6b3bd5dbad5 | [
"MIT"
] | 1 | 2019-08-20T11:17:56.000Z | 2019-08-20T11:17:56.000Z | Coursework 2/nn_preprocess.py | martinferianc/Pattern-Recognition-EIE4 | 412d437582b236dadd81c0621935f6b3bd5dbad5 | [
"MIT"
] | null | null | null | Coursework 2/nn_preprocess.py | martinferianc/Pattern-Recognition-EIE4 | 412d437582b236dadd81c0621935f6b3bd5dbad5 | [
"MIT"
] | null | null | null | import numpy as np
# For file manipulation and locating
import os
# For the progress bar
from tqdm import tqdm
# To create a deep copy of the data
import copy
# To load the pre-processed and split data
from pre_process import load_data as ld
# For normalization of the samples
from sklearn.preprocessing import normalize
# We define some constant that we reuse
PROCESSED_DIR = "data/processed/"
def save_data(data, file_path, name):
"""
Saves the data
given the name and
the file path
Parameters
----------
data: numpy matrix
Data matrix with features
file_path: str
File path where the file should be saved
name: str
Specific name of the given file
"""
np.save(file_path + "{}.npy".format(name),data)
def preprocess(X, Y, size = 100000,lower_bound=0, upper_bound = 7368,samples = 10, same_class=0.4, different = 0.5, penalty = 10, same_class_penalty=1):
"""
Preprocessed the dataset
It creates two lists X,Y
It randomly chooses a sample from the input list
and then that sample is repeated in total * samples time
For each repeated sample it finds a portion of
images corresponding to different labels,
images corresponding to the same class and
a certain portion of identities
based on the class membership a penalty is applied or not
Parameters
----------
X: numpy array of features
Numpy array of features from which the pairs are created
Y: numpy array
Numpy array of corresponding labels
Returns
-------
X_selected: numpy array
Numpy array of the first input in the pairs
Y_selected: numpy array
Numpy array of the second input in the pairs
values: numpy array
Artificially determined distances
"""
X = normalize(X, axis=1)
N,F = X.shape
X_selected = []
Y_selected = []
values = []
C = int(samples*same_class)
D = int(samples*different)
selected_i = []
for i in tqdm(range(int(size/samples))):
# Randomly select a sample but do not repeat it with respect ot previous samples
random_i = np.random.randint(lower_bound,upper_bound)
while random_i in selected_i:
random_i = np.random.randint(lower_bound,upper_bound)
selected_i.append(random_i)
C_counter = 0
D_counter = 0
offset = 0
# Add samples which correspond to different label than the original image
selected_j = []
while D_counter<D:
random_j = np.random.randint(lower_bound,upper_bound)
while random_j in selected_j:
random_j = np.random.randint(lower_bound,upper_bound)
if Y[random_i] != Y[random_j]:
D_counter+=1
offset+=1
X_selected.append(copy.deepcopy(X[random_i]))
Y_selected.append(copy.deepcopy(X[random_j]))
values.append(penalty)
selected_j.append(random_j)
# Add samples which correspond to the same class
selected_j = []
while C_counter<C:
low = 0
high = N
if random_i-10>lower_bound:
low = random_i-10
if random_i+10<upper_bound:
high = random_i+10
random_j = np.random.randint(lower_bound,upper_bound)
while random_j in selected_j:
random_j = np.random.randint(lower_bound,upper_bound)
if Y[random_i] == Y[random_j] and random_i!=random_j:
C_counter+=1
offset +=1
X_selected.append(copy.deepcopy(X[random_i]))
Y_selected.append(copy.deepcopy(X[random_j]))
values.append(same_class_penalty)
selected_j.append(random_j)
# Fill in the rest with identities
while offset < samples:
X_selected.append(copy.deepcopy(X[random_i]))
Y_selected.append(copy.deepcopy(X[random_i]))
offset+=1
values.append(0)
indeces = np.random.choice(size, size=size, replace=False)
X_selected = np.array(X_selected)
Y_selected = np.array(Y_selected)
values = np.array(values)
return [X_selected[indeces], Y_selected[indeces], values[indeces]]
def load_data(retrain=False):
"""
Load the cached data or call preprocess()
to generate new data
Parameters
----------
None
Returns
-------
all_data: list
* All the data split into lists of [features, labels]
"""
all_data = ld(False)
training_data = all_data[0]
Y = training_data[1]
X = training_data[0]
if retrain is True:
print("Generating new data...")
X_train, Y_train, values_train = preprocess(X,Y, 40000, 0, 6379,samples = 10, same_class=0.4, different = 0.5, penalty = 1,same_class_penalty=0)
X_validation, Y_validation, values_validation = preprocess(X,Y, 7500, 6380,samples = 10, same_class=0.2, different = 0.7, penalty = 1, same_class_penalty=0)
save_data(X_train,PROCESSED_DIR,"training_nn_X")
save_data(Y_train,PROCESSED_DIR,"training_nn_Y")
save_data(values_train,PROCESSED_DIR,"training_nn_values")
save_data(X_validation,PROCESSED_DIR,"validation_nn_X")
save_data(Y_validation,PROCESSED_DIR,"validation_nn_Y")
save_data(values_validation,PROCESSED_DIR,"validation_nn_values")
return [X_train, Y_train, values_train, X_validation, Y_validation, values_validation]
else:
print("Loading data...")
data = []
data.append(np.load(PROCESSED_DIR + "training_nn_X.npy"))
data.append(np.load(PROCESSED_DIR + "training_nn_Y.npy"))
data.append(np.load(PROCESSED_DIR + "training_nn_values.npy"))
data.append(np.load(PROCESSED_DIR + "validation_nn_X.npy"))
data.append(np.load(PROCESSED_DIR + "validation_nn_Y.npy"))
data.append(np.load(PROCESSED_DIR + "validation_nn_values.npy"))
return data
if __name__ == '__main__':
load_data(retrain=True)
| 33.674033 | 164 | 0.642986 |
7b180f7965af3a7127ae86b77bf7384badafe436 | 776 | py | Python | src/main.py | M10han/image-scores | 509e2e9f9d3a484631a97a2e025849c266f71c43 | [
"MIT"
] | null | null | null | src/main.py | M10han/image-scores | 509e2e9f9d3a484631a97a2e025849c266f71c43 | [
"MIT"
] | 1 | 2021-06-08T21:41:19.000Z | 2021-06-08T21:41:19.000Z | src/main.py | M10han/image-scores | 509e2e9f9d3a484631a97a2e025849c266f71c43 | [
"MIT"
] | null | null | null | import pandas as pd
import time
from image_matcher import read_image, bjorn_score
if __name__ == "__main__":
main()
| 26.758621 | 58 | 0.643041 |
7b1892266415333934744e874665f21d627beb7f | 2,006 | py | Python | build/lib.linux-x86_64-2.7/biograder/Encryptor.py | PayneLab/GenericDataAPI | 9469328c4f845fbf8d97b5d80ad2077c9f927022 | [
"MIT"
] | 2 | 2021-04-25T18:36:29.000Z | 2021-05-14T15:34:59.000Z | build/lib.linux-x86_64-2.7/biograder/Encryptor.py | PayneLab/GenericDataAPI | 9469328c4f845fbf8d97b5d80ad2077c9f927022 | [
"MIT"
] | null | null | null | build/lib.linux-x86_64-2.7/biograder/Encryptor.py | PayneLab/GenericDataAPI | 9469328c4f845fbf8d97b5d80ad2077c9f927022 | [
"MIT"
] | 2 | 2020-11-23T02:09:57.000Z | 2021-08-13T21:57:03.000Z | from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives import serialization
| 31.84127 | 97 | 0.602193 |
7b190c0f4573cd290b14012b9fc7b11615f31516 | 218 | py | Python | elif_bayindir/phase_1/python_basic_1/day_6/q7.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 6 | 2020-05-23T19:53:25.000Z | 2021-05-08T20:21:30.000Z | elif_bayindir/phase_1/python_basic_1/day_6/q7.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 8 | 2020-05-14T18:53:12.000Z | 2020-07-03T00:06:20.000Z | elif_bayindir/phase_1/python_basic_1/day_6/q7.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 39 | 2020-05-10T20:55:02.000Z | 2020-09-12T17:40:59.000Z | # Question 7
# Find out the number of CPUs using
import os
print("Number of CPUs using:", os.cpu_count())
# Alternative,
""" import multiprocessing
print("Number of CPUs using:", multiprocessing.cpu_count()) """
| 18.166667 | 63 | 0.711009 |
7b1bfc88d4da28ede06e1a7e0dc3ba09c6ec9cb9 | 3,081 | py | Python | openstates/openstates-master/openstates/ia/__init__.py | Jgorsick/Advocacy_Angular | 8906af3ba729b2303880f319d52bce0d6595764c | [
"CC-BY-4.0"
] | null | null | null | openstates/openstates-master/openstates/ia/__init__.py | Jgorsick/Advocacy_Angular | 8906af3ba729b2303880f319d52bce0d6595764c | [
"CC-BY-4.0"
] | null | null | null | openstates/openstates-master/openstates/ia/__init__.py | Jgorsick/Advocacy_Angular | 8906af3ba729b2303880f319d52bce0d6595764c | [
"CC-BY-4.0"
] | null | null | null | import re
import datetime
import lxml.html
import requests
from billy.utils.fulltext import text_after_line_numbers
from .bills import IABillScraper
from .legislators import IALegislatorScraper
from .events import IAEventScraper
from .votes import IAVoteScraper
# Silencing unverified HTTPS request warnings.
requests.packages.urllib3.disable_warnings()
settings = dict(SCRAPELIB_TIMEOUT=240)
metadata = dict(
name = 'Iowa',
abbreviation = 'ia',
capitol_timezone = 'America/Chicago',
legislature_name = 'Iowa General Assembly',
legislature_url = 'https://www.legis.iowa.gov/',
chambers = {
'upper': {'name': 'Senate', 'title': 'Senator'},
'lower': {'name': 'House', 'title': 'Representative'},
},
terms = [
{
'name': '2011-2012',
'start_year': 2011,
'end_year': 2012,
'sessions': ['2011-2012'],
},
{
'name': '2013-2014',
'start_year': 2013,
'end_year': 2014,
'sessions': ['2013-2014'],
},
{
'name': '2015-2016',
'start_year': 2015,
'end_year': 2016,
'sessions': ['2015-2016'],
},
],
session_details = {
'2011-2012': {
'display_name': '2011-2012 Regular Session',
'_scraped_name': 'General Assembly: 84',
'number': '84',
'start_date': datetime.date(2011, 1, 10),
'end_date': datetime.date(2013, 1, 13),
},
'2013-2014': {
'display_name': '2013-2014 Regular Session',
'_scraped_name': 'General Assembly: 85',
'number': '85',
},
'2015-2016': {
'display_name': '2015-2016 Regular Session',
'_scraped_name': 'General Assembly: 86',
'number': '86',
},
},
feature_flags = ['events', 'influenceexplorer'],
_ignored_scraped_sessions = [
'Legislative Assembly: 86',
'General Assembly: 83',
'General Assembly: 82',
'General Assembly: 81',
'General Assembly: 80',
'General Assembly: 79',
'General Assembly: 79',
'General Assembly: 78',
'General Assembly: 78',
'General Assembly: 77',
'General Assembly: 77',
'General Assembly: 76',
]
)
| 29.066038 | 72 | 0.563778 |
7b1e18b2a4656893e78e78b318983823f4f03309 | 2,965 | py | Python | dp_excel/ExcelFile.py | DmitryPaschenko/python_excel_writer | d23acbe44e3e7e786fd8fd8deb1f47263326199f | [
"MIT"
] | null | null | null | dp_excel/ExcelFile.py | DmitryPaschenko/python_excel_writer | d23acbe44e3e7e786fd8fd8deb1f47263326199f | [
"MIT"
] | null | null | null | dp_excel/ExcelFile.py | DmitryPaschenko/python_excel_writer | d23acbe44e3e7e786fd8fd8deb1f47263326199f | [
"MIT"
] | null | null | null | from openpyxl import Workbook
from openpyxl.utils import get_column_letter
from openpyxl.writer.excel import save_virtual_workbook
| 36.158537 | 158 | 0.651602 |
7b1ea6dc53dbed446cf8e4fe80ef8e9dd14dbdfd | 435 | py | Python | test/test_flow.py | williford/vipy | d7ce90cfa3c11363ca9e9fcb1fcea9371aa1b74d | [
"MIT"
] | 13 | 2020-07-23T12:15:24.000Z | 2022-03-18T13:58:31.000Z | test/test_flow.py | williford/vipy | d7ce90cfa3c11363ca9e9fcb1fcea9371aa1b74d | [
"MIT"
] | 2 | 2020-02-26T00:58:40.000Z | 2021-04-26T12:34:41.000Z | test/test_flow.py | williford/vipy | d7ce90cfa3c11363ca9e9fcb1fcea9371aa1b74d | [
"MIT"
] | 2 | 2020-05-11T15:31:06.000Z | 2021-09-16T14:01:33.000Z | import vipy
from vipy.flow import Flow
import numpy as np
if __name__ == "__main__":
test_flow()
| 25.588235 | 87 | 0.65977 |
7b204556097cfdfd3ff88e8d7bc8bf1337b3e12c | 660 | py | Python | server/main.py | DarthBenro008/gh-release-paniker | 757845b1eebef9d2219c88706fd4277f4261391f | [
"MIT"
] | 5 | 2021-12-08T06:37:33.000Z | 2021-12-20T17:17:18.000Z | server/main.py | DarthBenro008/gh-release-paniker | 757845b1eebef9d2219c88706fd4277f4261391f | [
"MIT"
] | null | null | null | server/main.py | DarthBenro008/gh-release-paniker | 757845b1eebef9d2219c88706fd4277f4261391f | [
"MIT"
] | null | null | null | from typing import Optional
from fastapi import FastAPI
app = FastAPI()
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
LED=21
BUZZER=23
GPIO.setup(LED,GPIO.OUT)
| 16.5 | 33 | 0.672727 |
7b20674499d7148c6a6ca240f5128fad607757fd | 8,656 | py | Python | virtual/lib/python3.10/site-packages/bootstrap_py/tests/test_package.py | alex-mu/Moringa-blog | 430ab9c1f43f2f0066369433ac3f60c41a51a01c | [
"MIT"
] | null | null | null | virtual/lib/python3.10/site-packages/bootstrap_py/tests/test_package.py | alex-mu/Moringa-blog | 430ab9c1f43f2f0066369433ac3f60c41a51a01c | [
"MIT"
] | 7 | 2021-03-30T14:10:56.000Z | 2022-03-12T00:43:13.000Z | virtual/lib/python3.6/site-packages/bootstrap_py/tests/test_package.py | sarahsindet/pitch | c7a4256e19c9a250b6d88d085699a34f508eb86b | [
"Unlicense",
"MIT"
] | 1 | 2021-08-19T06:07:23.000Z | 2021-08-19T06:07:23.000Z | # -*- coding: utf-8 -*-
"""bootstrap_py.tests.test_package."""
import unittest
import os
import shutil
import tempfile
from glob import glob
from datetime import datetime
from mock import patch
from bootstrap_py import package
from bootstrap_py.tests.stub import stub_request_metadata
# pylint: disable=too-few-public-methods
| 42.019417 | 79 | 0.586992 |
7b2072a69cb5c6d86996ccfc0e3130c0fc1d1caa | 383 | py | Python | news_bl/main/migrations/0005_alter_article_urltoimage.py | noddy09/news_search | 7bee6a3aeb6c8a5e9e01109635fbd53f5d808722 | [
"MIT"
] | null | null | null | news_bl/main/migrations/0005_alter_article_urltoimage.py | noddy09/news_search | 7bee6a3aeb6c8a5e9e01109635fbd53f5d808722 | [
"MIT"
] | null | null | null | news_bl/main/migrations/0005_alter_article_urltoimage.py | noddy09/news_search | 7bee6a3aeb6c8a5e9e01109635fbd53f5d808722 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.6 on 2021-08-30 13:48
from django.db import migrations, models
| 20.157895 | 47 | 0.5953 |
7b20cd11ee3f48070fe24a5a912f30b91ada5d46 | 1,175 | py | Python | utils/migrate_cmds_idx_32bit.py | jzuhone/kadi | de4885327d256e156cfe42b2b1700775f5b4d6cf | [
"BSD-3-Clause"
] | 1 | 2015-07-30T18:33:14.000Z | 2015-07-30T18:33:14.000Z | utils/migrate_cmds_idx_32bit.py | jzuhone/kadi | de4885327d256e156cfe42b2b1700775f5b4d6cf | [
"BSD-3-Clause"
] | 104 | 2015-01-20T18:44:36.000Z | 2022-03-29T18:51:55.000Z | utils/migrate_cmds_idx_32bit.py | jzuhone/kadi | de4885327d256e156cfe42b2b1700775f5b4d6cf | [
"BSD-3-Clause"
] | 2 | 2018-08-23T02:36:08.000Z | 2020-03-13T19:24:36.000Z | from pathlib import Path
import numpy as np
import tables
# Use snapshot from aug08 before the last update that broke things.
with tables.open_file('cmds_aug08.h5') as h5:
cmds = h5.root.data[:]
print(cmds.dtype)
# [('idx', '<u2'), ('date', 'S21'), ('type', 'S12'), ('tlmsid', 'S10'),
# ('scs', 'u1'), ('step', '<u2'), ('timeline_id', '<u4'), ('vcdu', '<i4')]
new_dtype = [('idx', '<i4'), ('date', 'S21'), ('type', 'S12'), ('tlmsid', 'S10'),
('scs', 'u1'), ('step', '<u2'), ('timeline_id', '<u4'), ('vcdu', '<i4')]
new_cmds = cmds.astype(new_dtype)
for name in cmds.dtype.names:
assert np.all(cmds[name] == new_cmds[name])
cmds_h5 = Path('cmds.h5')
if cmds_h5.exists():
cmds_h5.unlink()
with tables.open_file('cmds.h5', mode='a') as h5:
h5.create_table(h5.root, 'data', new_cmds, "cmds", expectedrows=2e6)
# Make sure the new file is really the same except the dtype
with tables.open_file('cmds.h5') as h5:
new_cmds = h5.root.data[:]
for name in cmds.dtype.names:
assert np.all(cmds[name] == new_cmds[name])
if name != 'idx':
assert cmds[name].dtype == new_cmds[name].dtype
assert new_cmds['idx'].dtype.str == '<i4'
| 31.756757 | 85 | 0.613617 |
7b21a08900385c33387348bb5cf7b32f2eca5c0f | 579 | py | Python | 1_estrutura_sequencial/18_velocidade_download.py | cecilmalone/lista_de_exercicios_pybr | 6d7c4aeddf8d1b1d839ad05ef5b5813a8fe611b5 | [
"MIT"
] | null | null | null | 1_estrutura_sequencial/18_velocidade_download.py | cecilmalone/lista_de_exercicios_pybr | 6d7c4aeddf8d1b1d839ad05ef5b5813a8fe611b5 | [
"MIT"
] | null | null | null | 1_estrutura_sequencial/18_velocidade_download.py | cecilmalone/lista_de_exercicios_pybr | 6d7c4aeddf8d1b1d839ad05ef5b5813a8fe611b5 | [
"MIT"
] | null | null | null | """
18. Faa um programa que pea o tamanho de um arquivo para download (em MB) e
a velocidade de um link de Internet (em Mbps), calcule e informe o tempo
aproximado de download do arquivo usando este link (em minutos).
"""
mb_arquivo = float(input('Informe o tamanho de um arquivo para download (em MB): '))
mbps_link = float(input('Informe a velocidade do link de Internet (em Mbps): '))
velocidade_segundos = mb_arquivo / mbps_link
velocidade_minutos = velocidade_segundos / 60
print('O tempo aproximado para download do arquivo de %d minuto(s).' %velocidade_minutos)
| 38.6 | 91 | 0.753022 |
7b2304794deb520b2f5f87d0e37dcca35db22896 | 4,802 | py | Python | src/rte_pac/train_pyramid.py | UKPLab/conll2019-snopes-experiments | 102f4a05cfba781036bd3a7b06022246e53765ad | [
"Apache-2.0"
] | 5 | 2019-11-08T09:17:07.000Z | 2022-01-25T19:37:06.000Z | src/rte_pac/train_pyramid.py | UKPLab/conll2019-snopes-experiments | 102f4a05cfba781036bd3a7b06022246e53765ad | [
"Apache-2.0"
] | 18 | 2020-01-28T22:17:34.000Z | 2022-03-11T23:57:22.000Z | src/rte_pac/train_pyramid.py | UKPLab/conll2019-snopes-experiments | 102f4a05cfba781036bd3a7b06022246e53765ad | [
"Apache-2.0"
] | 1 | 2021-03-08T12:02:24.000Z | 2021-03-08T12:02:24.000Z | import argparse
import pickle
import os
import json
from sklearn.metrics import confusion_matrix
from utils.data_reader import embed_data_sets_with_glove, embed_data_set_given_vocab, prediction_2_label
from utils.text_processing import vocab_map
from common.util.log_helper import LogHelper
from deep_models.MatchPyramid import MatchPyramid
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mode', help='\'train\' or \'test\'', required=True)
parser.add_argument('--train', help='/path/to/training/set')
parser.add_argument('--valid', help='/path/to/validation/set')
parser.add_argument('--test', help='/path/to/test/set')
parser.add_argument('--model', help='/path/to/model/file', required=True)
parser.add_argument(
'--save-data', help='/path/to/save/data', default="data/rte/train/")
parser.add_argument('--load-data', help='/path/to/load/data/file')
parser.add_argument('--db', help='/path/to/data/base', required=True)
parser.add_argument(
'--max-sent', type=int, help='Maximal number of sentences per claim', default=5)
parser.add_argument('--embed', help='/path/to/embedding')
parser.add_argument(
'--save-result', help='/path/to/save/result', default="data/rte/result/")
args = parser.parse_args()
LogHelper.setup()
logger = LogHelper.get_logger(args.mode)
if args.mode == 'train':
assert args.train is not None or args.load_data is not None, "--train training set or --load-data should be provided in train mode"
assert args.embed is not None, "--embed should be provided in train mode"
# training mode
if args.load_data:
# load pre-processed training data
with open(args.load_data, "rb") as file:
param = pickle.load(file)
else:
# process training JSONL file
paths = [args.train, args.valid]
dataset_list, vocab, embeddings, b_max_sent_num, b_max_sent_size = embed_data_sets_with_glove(
paths, args.db, args.embed, threshold_b_sent_num=args.max_sent)
vocab = vocab_map(vocab)
param = {
'dataset_list': dataset_list,
'vocab': vocab,
'embeddings': embeddings,
'max_sent_size': b_max_sent_size,
'max_sent': args.max_sent
}
# save processed training data
os.makedirs(args.save_data, exist_ok=True)
train_data_path = os.path.join(
args.save_data, "train.{}.s{}.p".format("matchpyramid", str(args.max_sent)))
with open(train_data_path, "wb") as file:
pickle.dump(param, file, protocol=pickle.HIGHEST_PROTOCOL)
pyramid = _instantiate_model(param)
pyramid.fit(param['dataset_list'][0]['data'], param['dataset_list'][0]['label'],
param['dataset_list'][1]['data'], param['dataset_list'][1]['label'])
pyramid.save(args.model)
else:
# testing mode
assert args.load_data is not None, "--load_data should be provided in test mode"
assert args.test is not None, "--test test set should be provided in test mode"
with open(args.load_data, "rb") as file:
param = pickle.load(file)
pyramid = _instantiate_model(param)
pyramid.restore_model(args.model)
data_set = embed_data_set_given_vocab(args.test, args.db, param['vocab'], threshold_b_sent_num=param['max_sent'],
threshold_b_sent_size=param['max_sent_size'], threshold_h_sent_size=param['max_sent_size'])
os.makedirs(args.save_result, exist_ok=True)
test_result_path = os.path.join(
args.save_result, "predicted.pyramid.s{}.jsonl".format(param['max_sent']))
with open(test_result_path, "w") as result_file:
predictions = pyramid.predict(data_set['data'])
for i, prediction in enumerate(predictions):
data = {'predicted': prediction_2_label(prediction)}
if 'label' in data_set:
data['label'] = prediction_2_label(data_set['label'][i])
result_file.write(json.dumps(data) + "\n")
if 'label' in data_set:
logger.info("Confusion Matrix:")
logger.info(confusion_matrix(data_set['label'], predictions))
| 52.195652 | 139 | 0.641399 |
7b2354c08ba6d3f70427aa659e1ba9d3a3e03c13 | 854 | py | Python | annotation/helpers/helpers/extract_noise.py | jim-schwoebel/allie | d85db041b91c81dfb3fd1a4d719b5aaaf3b6697e | [
"Apache-2.0"
] | 87 | 2020-08-07T09:05:11.000Z | 2022-01-24T00:48:22.000Z | annotation/helpers/helpers/extract_noise.py | jim-schwoebel/allie | d85db041b91c81dfb3fd1a4d719b5aaaf3b6697e | [
"Apache-2.0"
] | 87 | 2020-08-07T19:12:10.000Z | 2022-02-08T14:46:34.000Z | annotation/helpers/helpers/extract_noise.py | jim-schwoebel/allie | d85db041b91c81dfb3fd1a4d719b5aaaf3b6697e | [
"Apache-2.0"
] | 25 | 2020-08-07T20:03:08.000Z | 2022-03-16T07:33:25.000Z | import shutil, os, random
from pydub import AudioSegment
try:
os.mkdir('noise')
except:
shutil.rmtree('noise')
os.mkdir('noise')
listdir=os.listdir()
mp3files=list()
for i in range(len(listdir)):
if listdir[i][-4:]=='.mp3':
mp3files.append(listdir[i])
random.shuffle(mp3files)
for i in range(len(mp3files)):
extract_noise(mp3files[i],300)
if i == 100:
break
os.chdir('noise')
listdir=os.listdir()
for i in range(len(listdir)):
if listdir[i][-4:]=='.mp3':
os.system('play %s'%(listdir[i]))
remove=input('should remove? type y to remove')
if remove=='y':
os.remove(listdir[i])
| 27.548387 | 108 | 0.688525 |
7b248b5ee36bb65d830c7b56e66b0b390aa45baa | 1,030 | py | Python | ARMODServers/Apps/Apiv2/urls.py | Phantomxm2021/ARMOD-Dashboard | 383cf0a5e72dc5a2651f43e693f06773d5b88bbd | [
"Apache-2.0"
] | 1 | 2021-11-04T09:03:27.000Z | 2021-11-04T09:03:27.000Z | ARMODServers/Apps/Apiv2/urls.py | Phantomxm2021/ARMOD-Dashboard | 383cf0a5e72dc5a2651f43e693f06773d5b88bbd | [
"Apache-2.0"
] | null | null | null | ARMODServers/Apps/Apiv2/urls.py | Phantomxm2021/ARMOD-Dashboard | 383cf0a5e72dc5a2651f43e693f06773d5b88bbd | [
"Apache-2.0"
] | null | null | null | from django.conf.urls import url
from Apps.Apiv2.views import GetARResourcesView, GetARExperienceDetailView
from Apps.Apiv2.views import GetTagListView,GetARExperienceRecommendList,GetARExperiencePublicListView,GetARExperiencesView
from Apps.Apiv2.views import GetARexperienceByTagsListView
app_name = 'Apps.Users'
urlpatterns = [
url(r'^getarresources$', GetARResourcesView.as_view(), name='getarresources'),
url(r'^getarexperience$', GetARExperienceDetailView.as_view(), name='getarexperience'),
url(r'^getarexperiencelist$', GetARExperiencesView.as_view(), name='getarexperience'),
url(r'^gettaglist$', GetTagListView.as_view(), name='getshowcasetags'),
url(r'^getrecommendslist$', GetARExperienceRecommendList.as_view(), name='getshowcaserecommends'),
url(r'^getarexperiencepubliclist$', GetARExperiencePublicListView.as_view(), name='getarexperiencepubliclist'),
url(r'^getarexperiencebytagslist$', GetARexperienceByTagsListView.as_view(), name='getarexperiencebytagslist'),
# api/v2/
] | 60.588235 | 123 | 0.794175 |
7b24aa6646e92566319ce68092ddf4db0af43da1 | 2,600 | py | Python | make.py | loicseguin/astronomie | b489d615adb136991ff3fc82ca06c4f6791ca8c6 | [
"BSD-2-Clause"
] | null | null | null | make.py | loicseguin/astronomie | b489d615adb136991ff3fc82ca06c4f6791ca8c6 | [
"BSD-2-Clause"
] | 7 | 2020-01-19T21:27:07.000Z | 2020-01-19T21:28:09.000Z | make.py | loicseguin/astronomie | b489d615adb136991ff3fc82ca06c4f6791ca8c6 | [
"BSD-2-Clause"
] | null | null | null | """Construit le site Explorer et comprendre l'Univers, incluant les diapositives
et le livre. Le logiciel Pandoc est utilis pour obtenir des prsentations
dans diffrents formats.
On peut construire tous les fichiers html avec la commande
$ python make.py
"""
import subprocess
import os
import sys
# Dossiers de prsentation
DIAPOS_DIRS = [os.path.join('diapos', d) for d in os.listdir('diapos')
if d != 'reveal.js']
def run(call_str):
"""Excute la chane de caractre sur la ligne de commande."""
try:
subprocess.check_call(call_str.split())
print("complet!")
except subprocess.CalledProcessError as e:
print(call_str, end='... ')
print("erreur, la compilation a chou")
def revealjs(in_fname, out_fname):
"""Cre une prsentation avec la librairie javascript Reveal.js."""
call_str = "pandoc -t revealjs " \
"-V revealjs-url=../reveal.js -s " \
"--slide-level=1 " \
"--mathjax {} -o {}".format(in_fname, out_fname)
run(call_str)
def diapos():
"""Construits les fichiers HTML des diapositives."""
cwd = os.getcwd()
for folder in DIAPOS_DIRS:
try:
os.chdir(folder)
except (FileNotFoundError, NotADirectoryError):
os.chdir(cwd)
continue
# Dterminer le nom du fichier source.
for fname in os.listdir():
if fname.endswith(".md"):
break
else:
os.chdir(cwd)
continue
in_fname = fname
out_fname = "{}.html".format(os.path.splitext(os.path.basename(fname))[0])
print("{}: ".format(folder), end='')
revealjs(in_fname, out_fname)
os.chdir(cwd)
def livre():
"""Construit les fichiers HTML du livre."""
for fname in os.listdir('livre'):
if not fname.endswith('.md'):
continue
in_fname = os.path.join('livre', fname)
out_fname = os.path.join(
'livre',
'{}.html'.format(os.path.splitext(os.path.basename(fname))[0]))
call_str = 'pandoc -s -c ../www/style.css --mathjax ' \
'--template www/book-template.html ' \
'--include-after-body www/sidebar.html ' \
'--include-after-body www/footer.html ' \
'{} -o {}'.format(in_fname, out_fname)
print("{}: ".format(in_fname), end='')
run(call_str)
if __name__ == '__main__':
if len(sys.argv) != 1:
print("usage: python make.py\n")
exit()
diapos()
livre()
| 30.232558 | 82 | 0.576154 |
7b25b9ec772098b6a401939f74bc6b08ca37a58b | 280 | py | Python | geosnap/tests/get_data.py | WawNun/geosnap | 9838498b89d42c94fef73ee2983dd385dab17345 | [
"BSD-3-Clause"
] | 148 | 2019-04-19T00:16:59.000Z | 2022-03-24T06:35:47.000Z | geosnap/tests/get_data.py | WawNun/geosnap | 9838498b89d42c94fef73ee2983dd385dab17345 | [
"BSD-3-Clause"
] | 178 | 2019-04-15T21:54:36.000Z | 2022-03-31T03:08:29.000Z | geosnap/tests/get_data.py | WawNun/geosnap | 9838498b89d42c94fef73ee2983dd385dab17345 | [
"BSD-3-Clause"
] | 25 | 2019-04-19T21:27:56.000Z | 2022-03-28T21:03:31.000Z | import os
from pathlib import PurePath
try:
from geosnap import io
except:
pass
path = os.getcwd()
try:
io.store_ltdb(sample=PurePath(path, 'ltdb_sample.zip'), fullcount=PurePath(path, 'ltdb_full.zip'))
io.store_ncdb(PurePath(path, "ncdb.csv"))
except:
pass | 18.666667 | 102 | 0.707143 |
7b26132c0d8b78762b805dd6438fa5d2c8d060b1 | 13,370 | py | Python | plotting/utils.py | plai-group/amortized-rejection-sampling | 1e85253ae1e6ef1c939e1c488e55f9d95ee48355 | [
"MIT"
] | null | null | null | plotting/utils.py | plai-group/amortized-rejection-sampling | 1e85253ae1e6ef1c939e1c488e55f9d95ee48355 | [
"MIT"
] | null | null | null | plotting/utils.py | plai-group/amortized-rejection-sampling | 1e85253ae1e6ef1c939e1c488e55f9d95ee48355 | [
"MIT"
] | null | null | null | import numpy as np
import torch
from tqdm import tqdm
import matplotlib as mpl
# https://gist.github.com/thriveth/8560036
color_cycle = ['#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00']
labels_dict = {"ic": "IC",
"prior": "Prior",
"ars-1": r"$\mathrm{ARS}_{M=1}$",
"ars-2": r"$\mathrm{ARS}_{M=2}$",
"ars-5": r"$\mathrm{ARS}_{M=5}$",
"ars-10": r"$\mathrm{ARS}_{M=10}$",
"ars-20": r"$\mathrm{ARS}_{M=20}$",
"ars-50": r"$\mathrm{ARS}_{M=50}$",
"biased": "Biased",
"gt": "Groundtruth",
"is": "IS",
"collapsed": "Collapsed"}
color_dict = {'gt': color_cycle[0],
'prior': color_cycle[5],
'ic': color_cycle[2],
'biased': color_cycle[3],
'ars-1': color_cycle[4],
'ars-2': color_cycle[1],
'ars-5': color_cycle[7],
'ars-10': color_cycle[6],
'ars-100': color_cycle[8],
'ars-50': color_cycle[8],
'is': color_cycle[8],
'ars-20': "C1",
"collapsed": color_cycle[7]}
########################################
## matplotlib style and configs ##
########################################
def set_size(width, fraction=1, subplots=(1, 1)):
# https://jwalton.info/Embed-Publication-Matplotlib-Latex/
""" Set aesthetic figure dimensions to avoid scaling in latex.
Parameters
----------
width: float
Width in pts
fraction: float
Fraction of the width which you wish the figure to occupy
subplots: array-like, optional
The number of rows and columns of subplots.
Returns
-------
fig_dim: tuple
Dimensions of figure in inches
"""
if width == 'thesis':
width_pt = 426.79135
elif width == 'beamer':
width_pt = 307.28987
elif width == 'pnas':
width_pt = 246.09686
elif width == 'aistats22':
width_pt = 487.8225
else:
width_pt = width
# Width of figure
fig_width_pt = width_pt * fraction
# Convert from pt to inches
inches_per_pt = 1 / 72.27
# Golden ratio to set aesthetic figure height
golden_ratio = (5**.5 - 1) / 2
# Figure width in inches
fig_width_in = fig_width_pt * inches_per_pt
# Figure height in inches
fig_height_in = fig_width_in * golden_ratio * (subplots[0] / subplots[1])
return (fig_width_in, fig_height_in)
########################################
## Loading from disk ##
########################################
def load_log_weights(log_weights_root, iw_mode):
"""Loads the log_weights from the disk. It assumes a file structure of <log_weights_root>/<iw_mode>/*.npy
of mulyiple npy files. This function loads all the weights in a single numpy array, concatenating all npy files.
Finally, it caches the result in a file stored at <log_weights_root>/<iw_mode>.npy
In the further calls, it reuses the cached file.
Args:
log_weights_root (str or pathlib.Path)
iw_mode (str)
Returns:
np.ndarray: log importance weights
"""
agg_weights_file = log_weights_root / f"{iw_mode}.npy"
agg_weights_dir = log_weights_root / iw_mode
assert agg_weights_dir.exists() or agg_weights_file.exists()
if not agg_weights_file.exists():
log_weights = np.concatenate(
[np.load(weight_file) for weight_file in agg_weights_dir.glob("*.npy")])
np.save(agg_weights_file, log_weights)
else:
log_weights = np.load(agg_weights_file)
print(f"{log_weights_root} / {iw_mode} has {len(log_weights):,} traces")
return log_weights
########################################
## Estimators and metrics ##
########################################
def _compute_estimator_helper(log_weights, dx, estimator_func, **kwargs):
"""A helper function for computing the plotting data. It generates the
x-values and y-values of the plot. x-values is an increasing sequence of
integers, with incremens of dx and ending with N. y-values is a TxK tensor
where T is the number of trials and K is the size of x-values. The j-th
column of y-values is the estimator applied to the log_weights up to the
corresponding x-value.
Args:
log_weights (torch.FloatTensor of shape TxN): All the log importance weights
of a particular experiment.
dx (int): different between points of evaluating the estimator.
estimator_func (function): the estimator function that operates on a tensor
of shape Txn where n <= N.
**kwargs: optional additional arguments to the estimator function
"""
(T, N) = log_weights.shape
xvals = _get_xvals(end=N, dx=dx)
yvals_all = [estimator_func(log_weights[:, :x], **kwargs) for x in xvals]
yvals_all = torch.stack(yvals_all, dim=1)
return xvals, yvals_all
def _get_xvals(end, dx):
"""Returns a integer numpy array of x-values incrementing by "dx"
and ending with "end".
Args:
end (int)
dx (int)
"""
arange = np.arange(0, end-1+dx, dx, dtype=int)
xvals = arange[1:]
return xvals
def _log_evidence_func(arr):
"""Returns an estimate of the log evidence from a set of log importance wegiths
in arr. arr has shape TxN where T is the number of trials and N is the number
of samples for estimation.
Args:
arr (torch.FloatTensor of shape TxN): log importance weights
Returns:
A tensor of shape (T,) representing the estimates for each set of sampels.
"""
T, N = arr.shape
log_evidence = torch.logsumexp(arr, dim=1) - np.log(N)
return log_evidence
def _ess_func(arr):
"""Effective sample size (ESS)"""
a = torch.logsumexp(arr, dim=1) * 2
b = torch.logsumexp(2 * arr, dim=1)
return torch.exp(a - b)
def _ess_inf_func(arr):
"""ESS-infinity (Q_n)"""
a = torch.max(arr, dim=1)[0]
b = torch.logsumexp(arr, dim=1)
return torch.exp(a - b)
def get_ness(log_weights, dx):
"""Normalized ESS (ESS / N)"""
xvals, yvals = get_ess(log_weights, dx=dx)
return xvals, yvals / xvals
########################################
## Plotting functions ##
########################################
def _lineplot_helper(*, name, func, ax, log_weights_dict, iw_mode_list, dx, bias=None, **kwargs):
"""A helper function for making the line functions of the paper.
Args:
name (string): Metric name. Used for logging only.
func (function): The metric computation function. Should be a function that takes in log_weights and dx
and returns x-values and y-values. Any additional arguments in kwargs will be passed to this function.
ax (matplotlib.axes): A matrplotlib ax object in which the plot should be drawn.
log_weights_dict (dict): A dictionary of the form {iw_mode: log_imprtance_weights as a TxN tensor}
iw_mode_list (list): An ordered list of iw modes specifying the order of drawing the lines.
dx (int): The distance between consequent x-values.
bias (float, optional): If not None, shifts all the line's y-values according to it. Defaults to None.
"""
for iw_mode in tqdm(iw_mode_list, desc=name):
if iw_mode not in log_weights_dict:
print(f"Skipping {iw_mode}.")
continue
log_weights = torch.tensor(log_weights_dict[iw_mode])
label = labels_dict[iw_mode]
color = color_dict[iw_mode]
xs, ys_all = func(log_weights, dx=dx)
means = ys_all.mean(dim=0)
stds = ys_all.std(dim=0)
if bias is not None:
means -= bias
ax.plot(xs, means, color=color, label=label)
ax.fill_between(xs, means - stds, means + stds, color=color, alpha=0.2)
print(f"> ({name}) {iw_mode, means[-1].item(), stds[-1].item()}")
| 35.558511 | 128 | 0.618624 |
7b28352f856a9eaa1fa2b24d293fcd81d28eb11c | 4,750 | py | Python | dfa/visualize.py | garyzhao/FRGAN | 8aeb064fc93b45d3d8e074c5253b4f7a287582f4 | [
"Apache-2.0"
] | 39 | 2018-07-28T04:37:48.000Z | 2022-01-20T18:34:37.000Z | dfa/visualize.py | garyzhao/FRGAN | 8aeb064fc93b45d3d8e074c5253b4f7a287582f4 | [
"Apache-2.0"
] | 2 | 2018-08-27T08:19:22.000Z | 2019-08-16T09:15:34.000Z | dfa/visualize.py | garyzhao/FRGAN | 8aeb064fc93b45d3d8e074c5253b4f7a287582f4 | [
"Apache-2.0"
] | 8 | 2018-07-31T09:33:49.000Z | 2020-12-06T10:16:53.000Z | from __future__ import division
from __future__ import print_function
import numpy as np
import cv2
import matplotlib.pyplot as plt
from .face import compute_bbox_size
end_list = np.array([17, 22, 27, 42, 48, 31, 36, 68], dtype=np.int32) - 1
def plot_kpt(image, kpt):
''' Draw 68 key points
Args:
image: the input image
kpt: (68, 3).
'''
image = image.copy()
kpt = np.round(kpt).astype(np.int32)
for i in range(kpt.shape[0]):
st = kpt[i, :2]
image = cv2.circle(image, (st[0], st[1]), 1, (0, 0, 255), 2)
if i in end_list:
continue
ed = kpt[i + 1, :2]
image = cv2.line(image, (st[0], st[1]), (ed[0], ed[1]), (255, 255, 255), 1)
return image
def plot_pose_box(image, Ps, pts68s, color=(40, 255, 0), line_width=2):
''' Draw a 3D box as annotation of pose. Ref:https://github.com/yinguobing/head-pose-estimation/blob/master/pose_estimator.py
Args:
image: the input image
P: (3, 4). Affine Camera Matrix.
kpt: (2, 68) or (3, 68)
'''
image = image.copy()
if not isinstance(pts68s, list):
pts68s = [pts68s]
if not isinstance(Ps, list):
Ps = [Ps]
for i in range(len(pts68s)):
pts68 = pts68s[i]
llength = compute_bbox_size(pts68)
point_3d = build_camera_box(llength)
P = Ps[i]
# Map to 2d image points
point_3d_homo = np.hstack((point_3d, np.ones([point_3d.shape[0], 1]))) # n x 4
point_2d = point_3d_homo.dot(P.T)[:, :2]
point_2d[:, 1] = - point_2d[:, 1]
point_2d[:, :2] = point_2d[:, :2] - np.mean(point_2d[:4, :2], 0) + np.mean(pts68[:2, :27], 1)
point_2d = np.int32(point_2d.reshape(-1, 2))
# Draw all the lines
cv2.polylines(image, [point_2d], True, color, line_width, cv2.LINE_AA)
cv2.line(image, tuple(point_2d[1]), tuple(
point_2d[6]), color, line_width, cv2.LINE_AA)
cv2.line(image, tuple(point_2d[2]), tuple(
point_2d[7]), color, line_width, cv2.LINE_AA)
cv2.line(image, tuple(point_2d[3]), tuple(
point_2d[8]), color, line_width, cv2.LINE_AA)
return image
def draw_landmarks(img, pts, style='fancy', wfp=None, show_flg=False, **kwargs):
"""Draw landmarks using matplotlib"""
# height, width = img.shape[:2]
# plt.figure(figsize=(12, height / width * 12))
plt.imshow(img[:, :, ::-1])
plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
plt.axis('off')
if not type(pts) in [tuple, list]:
pts = [pts]
for i in range(len(pts)):
if style == 'simple':
plt.plot(pts[i][0, :], pts[i][1, :], 'o', markersize=4, color='g')
elif style == 'fancy':
alpha = 0.8
markersize = 4
lw = 1.5
color = kwargs.get('color', 'w')
markeredgecolor = kwargs.get('markeredgecolor', 'black')
nums = [0, 17, 22, 27, 31, 36, 42, 48, 60, 68]
# close eyes and mouths
plot_close = lambda i1, i2: plt.plot([pts[i][0, i1], pts[i][0, i2]], [pts[i][1, i1], pts[i][1, i2]],
color=color, lw=lw, alpha=alpha - 0.1)
plot_close(41, 36)
plot_close(47, 42)
plot_close(59, 48)
plot_close(67, 60)
for ind in range(len(nums) - 1):
l, r = nums[ind], nums[ind + 1]
plt.plot(pts[i][0, l:r], pts[i][1, l:r], color=color, lw=lw, alpha=alpha - 0.1)
plt.plot(pts[i][0, l:r], pts[i][1, l:r], marker='o', linestyle='None', markersize=markersize,
color=color,
markeredgecolor=markeredgecolor, alpha=alpha)
if wfp is not None:
plt.savefig(wfp, dpi=200)
print('Save visualization result to {}'.format(wfp))
if show_flg:
plt.show()
| 35.714286 | 129 | 0.573895 |
7b2c39567282edd435ce6c7b2d8bdb6da59671bf | 439 | py | Python | bin/curvature.py | AgeYY/prednet | 90668d98b88e29bbaa68a7709e4fcb3664c110e8 | [
"MIT"
] | null | null | null | bin/curvature.py | AgeYY/prednet | 90668d98b88e29bbaa68a7709e4fcb3664c110e8 | [
"MIT"
] | null | null | null | bin/curvature.py | AgeYY/prednet | 90668d98b88e29bbaa68a7709e4fcb3664c110e8 | [
"MIT"
] | null | null | null | # calculate the curverture
import numpy as np
import matplotlib.pyplot as plt
from predusion.tools import curvature
radius = 2
n_point = 10
circle_curve = [[radius * np.sin(t), radius * np.cos(t)] for t in np.linspace(0, 2 * np.pi, n_point, endpoint=False)]
circle_curve = np.array(circle_curve)
#plt.figure()
#plt.scatter(circle_curve[:, 0], circle_curve[:, 1])
#plt.show()
ct, ct_mean = curvature(circle_curve)
print(ct, ct_mean)
| 20.904762 | 117 | 0.724374 |
7b2c3dcb95bb9538fdb4cb9f25daeb1cf42bc3eb | 875 | py | Python | cocos/tests/test_numerics/test_statistics/test_mean.py | michaelnowotny/cocos | 3c34940d7d9eb8592a97788a5df84b8d472f2928 | [
"MIT"
] | 101 | 2019-03-30T05:23:01.000Z | 2021-11-27T09:09:40.000Z | cocos/tests/test_numerics/test_statistics/test_mean.py | michaelnowotny/cocos | 3c34940d7d9eb8592a97788a5df84b8d472f2928 | [
"MIT"
] | 3 | 2019-04-17T06:04:12.000Z | 2020-12-14T17:36:01.000Z | cocos/tests/test_numerics/test_statistics/test_mean.py | michaelnowotny/cocos | 3c34940d7d9eb8592a97788a5df84b8d472f2928 | [
"MIT"
] | 5 | 2020-02-07T14:29:50.000Z | 2020-12-09T17:54:07.000Z | import cocos.device
import cocos.numerics as cn
import numpy as np
import pytest
test_data = [np.array([[1, 2, 3], [4, 5, 6], [7, 8, 20]],
dtype=np.int32),
np.array([[0.2, 1.0, 0.5], [0.4, 0.5, 0.6], [0.7, 0.2, 0.25]],
dtype=np.float32),
np.array([[0.5, 2.3, 3.1], [4, 5.5, 6], [7 - 9j, 8 + 1j, 2 + 10j]],
dtype=np.complex64)]
| 26.515152 | 80 | 0.537143 |
7b2f67783a54c7281fccbf52bb33f6fc8f65fc62 | 482 | py | Python | tests/individual_samples/long_doc.py | MiWeiss/docstr_coverage | 502ab0174ea261383f497af2476317d4cc199665 | [
"MIT"
] | 50 | 2019-01-25T16:53:39.000Z | 2022-03-17T22:02:06.000Z | tests/individual_samples/long_doc.py | HunterMcGushion/docstr_coverage | 502ab0174ea261383f497af2476317d4cc199665 | [
"MIT"
] | 66 | 2019-01-25T11:45:43.000Z | 2022-03-30T11:55:47.000Z | tests/individual_samples/long_doc.py | MiWeiss/docstr_coverage | 502ab0174ea261383f497af2476317d4cc199665 | [
"MIT"
] | 23 | 2019-01-28T08:37:42.000Z | 2021-06-16T12:35:27.000Z | """
this is a very long docstring
this is a very long docstring
this is a very long docstring
this is a very long docstring
this is a very long docstring
this is a very long docstring
this is a very long docstring
this is a very long docstring
this is a very long docstring
"""
| 20.083333 | 65 | 0.707469 |
7b2fdc657bc9709a4e827c864106583a0abe59bc | 461 | py | Python | Lib/site-packages/elasticsearch_django/signals.py | Nibraz15/FullTextSearch | 79d03a9b5c0fc94219ad9a70fe57818496844660 | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/elasticsearch_django/signals.py | Nibraz15/FullTextSearch | 79d03a9b5c0fc94219ad9a70fe57818496844660 | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/elasticsearch_django/signals.py | Nibraz15/FullTextSearch | 79d03a9b5c0fc94219ad9a70fe57818496844660 | [
"bzip2-1.0.6"
] | null | null | null | import django.dispatch
# signal fired just before calling model.index_search_document
pre_index = django.dispatch.Signal(providing_args=["instance", "index"])
# signal fired just before calling model.update_search_document
pre_update = django.dispatch.Signal(
providing_args=["instance", "index", "update_fields"]
)
# signal fired just before calling model.delete_search_document
pre_delete = django.dispatch.Signal(providing_args=["instance", "index"])
| 35.461538 | 73 | 0.796095 |
7b30e1e10fc484e48de9eae99bc4b49a95428432 | 528 | py | Python | adverse/signals.py | michael-xander/communique-webapp | 85b450d7f6d0313c5e5ef53a262a850b7e93c3d6 | [
"MIT"
] | null | null | null | adverse/signals.py | michael-xander/communique-webapp | 85b450d7f6d0313c5e5ef53a262a850b7e93c3d6 | [
"MIT"
] | null | null | null | adverse/signals.py | michael-xander/communique-webapp | 85b450d7f6d0313c5e5ef53a262a850b7e93c3d6 | [
"MIT"
] | null | null | null | from django.db.models.signals import post_save
from django.dispatch import receiver
from communique.utils.utils_signals import generate_notifications
from user.models import NotificationRegistration
from .models import AdverseEvent
| 37.714286 | 103 | 0.829545 |
7b32ae7712bef36c9a2b8c71ee2035133eed9f7e | 1,117 | py | Python | hoomd/test-py/test_run_callback.py | PetersResearchGroup/PCND | 584768cc683a6df0152ead69b567d05b781aab2b | [
"BSD-3-Clause"
] | 2 | 2020-03-30T14:38:50.000Z | 2020-06-02T05:53:41.000Z | hoomd/test-py/test_run_callback.py | PetersResearchGroup/PCND | 584768cc683a6df0152ead69b567d05b781aab2b | [
"BSD-3-Clause"
] | null | null | null | hoomd/test-py/test_run_callback.py | PetersResearchGroup/PCND | 584768cc683a6df0152ead69b567d05b781aab2b | [
"BSD-3-Clause"
] | 1 | 2020-05-20T07:00:08.000Z | 2020-05-20T07:00:08.000Z | # -*- coding: iso-8859-1 -*-
# Maintainer: joaander
import hoomd
hoomd.context.initialize()
import unittest
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
| 23.270833 | 76 | 0.521038 |
7b332b95f4298d84e9d671c6d88abc96e79fcae6 | 7,145 | py | Python | cheshire3/parser.py | cheshire3/cheshire3 | 306348831ec110229c78a7c5f0f2026a0f394d2c | [
"Python-2.0",
"Unlicense"
] | 3 | 2015-08-02T09:03:28.000Z | 2017-12-06T09:26:14.000Z | cheshire3/parser.py | cheshire3/cheshire3 | 306348831ec110229c78a7c5f0f2026a0f394d2c | [
"Python-2.0",
"Unlicense"
] | 5 | 2015-08-17T01:16:35.000Z | 2015-09-16T21:51:27.000Z | cheshire3/parser.py | cheshire3/cheshire3 | 306348831ec110229c78a7c5f0f2026a0f394d2c | [
"Python-2.0",
"Unlicense"
] | 6 | 2015-05-17T15:32:20.000Z | 2020-04-22T08:43:16.000Z |
import cStringIO
import StringIO
from xml.sax import make_parser, ErrorHandler, SAXParseException
from xml.sax import InputSource as SaxInput
from xml.dom.minidom import parseString as domParseString
from xml.parsers.expat import ExpatError
from lxml import etree
from cheshire3.baseObjects import Parser
from cheshire3.record import (
SaxRecord,
SaxContentHandler,
DomRecord,
MinidomRecord,
MarcRecord
)
from cheshire3.record import LxmlRecord
from cheshire3.utils import nonTextToken
from exceptions import XMLSyntaxError
| 31.065217 | 77 | 0.588383 |
9e26ff289e7c1f363b136e3f4b93da4585664e71 | 6,275 | py | Python | scripts/checkpT_curv.py | masamuch/hepqpr-qallse | 0b39f8531c6f3c758b94c31f4633f75dcfeb67ad | [
"Apache-2.0"
] | null | null | null | scripts/checkpT_curv.py | masamuch/hepqpr-qallse | 0b39f8531c6f3c758b94c31f4633f75dcfeb67ad | [
"Apache-2.0"
] | null | null | null | scripts/checkpT_curv.py | masamuch/hepqpr-qallse | 0b39f8531c6f3c758b94c31f4633f75dcfeb67ad | [
"Apache-2.0"
] | null | null | null | from hepqpr.qallse import *
from hepqpr.qallse.plotting import *
from hepqpr.qallse.cli.func import time_this
import time
import pickle
# import the method
from hepqpr.qallse.dsmaker import create_dataset
modelName = "D0"
#modelName = "Mp"
#modelName = "Doublet"
maxTry=1
# 5e-3 : 167 MeV
# 8e-4 : 1.04 GeV
varDensity = []
for ptThr_w in [0.15, 0.20, 0.30, 0.4, 0.50, 0.6, 0.75, 0.9, 1.0, 1.2]:
for ptThr_r in [3e-4, 3.5e-4, 4e-4, 4.5e-4, 5e-4, 6e-4, 7e-4, 8e-4, 9e-4, 1e-3, 1.2e-3, 1.5e-3, 1.7e-3, 2e-3, 2.5e-3, 3e-3, 4e-3, 5e-3]:
varDensity.append((modelName, ptThr_w, ptThr_r, maxTry))
#varDensity = [
# (modelName, 0.20, 5e-3, maxTry),
# (modelName, 1.00, 5e-3, maxTry),
#
#]
picklename = ".tmp.checkpT_curv.pickle"
try:
with open(picklename,'rb') as f:
results = pickle.load(f)
except:
print ("No pickle files.")
results = {}
for v in varDensity:
nTry = v[3]
for iTry in range(nTry):
k = (v[0], v[1], v[2], iTry)
print (k)
ModelName = k[0]
ptThr_w = k[1]
ptThr_r = k[2]
Density = 0.05
if k in results:
continue
results[k] = {}
results[k]["density"] = Density
results[k]["ptThr_w"] = ptThr_w
results[k]["ptThr_r"] = ptThr_r
results[k]["ModelName"] = ModelName
# dataset creation options
ds_options = dict(
# output directory: output_path+prefix
output_path='/tmp',
#prefix='ds_'+k,
#prefix=prefix,
# size
density = Density,
#phi_bounds = (0.15, 1.05),
# important: no pt cut
high_pt_cut = ptThr_w,
)
prefix = f'ez-{Density}'
if ds_options["high_pt_cut"] > 0:
prefix += f'_hpt-{ds_options["high_pt_cut"]}'
else:
prefix += '_baby'
prefix += f'_{iTry}'
prefix += f'_noPhiCut'
ds_options["prefix"] = prefix
# generate the dataset
import os
path = os.path.join(ds_options['output_path'], prefix, "event000001000")
if os.path.exists(path + "-hits.csv"):
import json
with open(path + "-meta.json") as f:
meta = json.load(f)
with open(path+"-metaHits.pickle", 'rb') as f:
time_info= pickle.load(f)
else:
with time_this() as time_info:
meta, path = create_dataset(**ds_options)
with open(os.path.join(path+"-metaHits.pickle"), 'wb') as f:
pickle.dump(time_info, f)
results[k]['TReadingHits'] = time_info[1]
results[k]['meta']=meta
from hepqpr.qallse.seeding import generate_doublets, SeedingConfig
# generate the doublets: the important part is the config_cls !
if os.path.exists(path + "-doublets.csv"):
doublets = pd.read_csv(path + "-doublets.csv", index_col=0)
results[k]['TInitialDoubletBuilding'] = time_info[1]
with open(path+"-metaDoublets.pickle", 'rb') as f:
time_info= pickle.load(f)
else:
with time_this() as time_info:
doublets = generate_doublets(hits_path=path+'-hits.csv', config_cls=SeedingConfig)
doublets.to_csv(path+'-doublets.csv')
with open(os.path.join(path+"-metaDoublets.pickle"), 'wb') as f:
pickle.dump(time_info, f)
results[k]['TInitialDoubletBuilding'] = time_info[1]
print('number of doublets = ', len(doublets))
results[k]['Ndoublets'] = len(doublets)
from hepqpr.qallse.qallse import Config
config = Config()
config.tplet_max_curv = ptThr_r
dw = DataWrapper.from_path(path + '-hits.csv')
if modelName == "D0":
from hepqpr.qallse.qallse_d0 import D0Config
new_config = merge_dicts(D0Config().as_dict(), config.as_dict())
model = QallseD0(dw, **new_config)
elif modelName == "Mp":
from hepqpr.qallse.qallse_mp import MpConfig
new_config = merge_dicts(MpConfig().as_dict(), config.as_dict())
model = QallseMp(dw, **new_config)
elif modelName == "Nominal":
from hepqpr.qallse.qallse import Config1GeV
new_config = merge_dicts(Config1GeV().as_dict(), config.as_dict())
model = Qallse1GeV(dw, **new_config)
elif modelName == "Doublet":
from hepqpr.qallse.qallse_doublet import DoubletConfig
new_config = merge_dicts(DoubletConfig().as_dict(), config.as_dict())
model = QallseDoublet(dw, **new_config)
p, r, ms = model.dataw.compute_score(doublets)
results[k]['precision_initDoublet'] = p
results[k]['recall_initDoublet'] = r
results[k]['missing_initDoublet'] = len(ms)
# generate the qubo as usual
with time_this() as time_info:
model.build_model(doublets)
print(f'Time of model building = {time_info[1]:.2f}s.')
results[k]['TModelBuilding'] = time_info[1]
with time_this() as time_info:
Q = model.to_qubo()
print(f'Time of qubo building = {time_info[1]:.2f}s.')
results[k]['TQuboBuilding'] = time_info[1]
results[k]['QuboSize'] = len(Q)
from hepqpr.qallse.cli.func import *
with time_this() as time_info:
response = solve_neal(Q)
print(f'Time of neal = {time_info[1]:.2f}s.')
results[k]['TNeal'] = time_info[1]
final_doublets, final_tracks = process_response(response)
en0 = 0 if Q is None else dw.compute_energy(Q)
en = response.record.energy[0]
results[k]['obsEnergy'] = en
results[k]['idealEnergy'] = en0
occs = response.record.num_occurrences
results[k]['bestOcc'] = occs[0]
results[k]['OccSum'] = occs.sum()
p, r, ms = dw.compute_score(final_doublets)
results[k]['precision'] = p
results[k]['recall'] = r
results[k]['missing'] = len(ms)
trackml_score = dw.compute_trackml_score(final_tracks)
results[k]['trackmlScore'] = trackml_score
with open(picklename, 'wb') as f:
pickle.dump(results, f)
#print(results)
| 35.055866 | 140 | 0.577211 |
9e27be8d3067835dcbda95c1548885176ae1ebf3 | 440 | py | Python | ifconfigparser/__init__.py | KnightWhoSayNi/ifconfig-parser | 4921ac9d6be6244b062d082c164f5a5e69522478 | [
"MIT"
] | 17 | 2018-10-06T15:19:27.000Z | 2022-02-25T05:05:22.000Z | ifconfigparser/__init__.py | KnightWhoSayNi/ifconfig-parser | 4921ac9d6be6244b062d082c164f5a5e69522478 | [
"MIT"
] | 3 | 2019-11-22T23:40:58.000Z | 2019-12-06T02:26:59.000Z | ifconfigparser/__init__.py | KnightWhoSayNi/ifconfig-parser | 4921ac9d6be6244b062d082c164f5a5e69522478 | [
"MIT"
] | 2 | 2019-05-10T15:36:46.000Z | 2020-11-18T11:56:33.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ======================================================
#
# File name: __init__.py
# Author: threeheadedknight@protonmail.com
# Date created: 30.06.2018 17:00
# Python Version: 3.7
#
# ======================================================
from .ifconfig_parser import IfconfigParser
__author__ = "KnightWhoSayNi"
__email__ = 'threeheadedknight@protonmail.com'
__version__ = '0.0.5'
| 25.882353 | 56 | 0.522727 |
9e287d153cff7385984c9cc16aca63539ed882d4 | 3,382 | py | Python | api/views/movies.py | iamvukasin/filminds | 54c9d7175f3a06f411cc750a694758bd683af1ee | [
"MIT"
] | 2 | 2019-06-15T01:40:04.000Z | 2019-12-19T05:11:17.000Z | api/views/movies.py | iamvukasin/filminds | 54c9d7175f3a06f411cc750a694758bd683af1ee | [
"MIT"
] | 1 | 2021-03-09T05:22:51.000Z | 2021-03-09T05:22:51.000Z | api/views/movies.py | iamvukasin/filminds | 54c9d7175f3a06f411cc750a694758bd683af1ee | [
"MIT"
] | 2 | 2019-06-24T19:24:25.000Z | 2020-05-29T13:57:35.000Z | from abc import ABC, abstractmethod
import tmdbsimple as tmdb
from django.contrib.auth.decorators import login_required
from django.http import Http404
from django.utils.decorators import method_decorator
from rest_framework.response import Response
from rest_framework.views import APIView
from api.serializers import MovieSerializer
from app.models import Movie, SearchedMovie, User, CollectedMovie
MAX_NUM_CASTS = 4
| 27.274194 | 119 | 0.646363 |
9e29911c2cf893692ea46e7dbded4b692a9e33a0 | 3,853 | py | Python | apps/lk/views.py | DaniilGorokhov/CaloryHelper | 6bf5ddce85479508b6498c3e4b2e0f4e5dd01b51 | [
"MIT"
] | null | null | null | apps/lk/views.py | DaniilGorokhov/CaloryHelper | 6bf5ddce85479508b6498c3e4b2e0f4e5dd01b51 | [
"MIT"
] | null | null | null | apps/lk/views.py | DaniilGorokhov/CaloryHelper | 6bf5ddce85479508b6498c3e4b2e0f4e5dd01b51 | [
"MIT"
] | 1 | 2021-02-15T17:40:23.000Z | 2021-02-15T17:40:23.000Z | from django.shortcuts import render
from django.http import Http404, HttpResponseRedirect
from django.urls import reverse
from apps.index.models import User, UserHistory
from sova_avia.settings import MEDIA_ROOT
from imageai.Prediction import ImagePrediction
import json
from .models import Article
from .forms import ArticleForm
| 35.675926 | 125 | 0.659227 |
9e2d53249be23d06d560e65260043ec473bab942 | 1,159 | py | Python | setup.py | CZ-NIC/deckard | 35ed3c59b27c52fc2e3a187679251353f5efe6c0 | [
"BSD-2-Clause"
] | 30 | 2016-08-06T20:56:17.000Z | 2021-12-13T07:56:23.000Z | setup.py | CZ-NIC/deckard | 35ed3c59b27c52fc2e3a187679251353f5efe6c0 | [
"BSD-2-Clause"
] | 6 | 2016-05-31T10:48:51.000Z | 2018-07-03T09:05:12.000Z | setup.py | CZ-NIC/deckard | 35ed3c59b27c52fc2e3a187679251353f5efe6c0 | [
"BSD-2-Clause"
] | 10 | 2016-04-03T13:55:19.000Z | 2020-11-28T01:23:49.000Z | #!/usr/bin/env python3
from distutils.core import setup
version = '3.0'
setup(
name='deckard',
version=version,
description='DNS toolkit',
long_description=(
"Deckard is a DNS software testing based on library pydnstest."
"It supports parsing and running Unbound-like test scenarios,"
"and setting up a mock DNS server. It's based on dnspython."),
author='CZ.NIC',
author_email='knot-dns-users@lists.nic.cz',
license='BSD',
url='https://gitlab.labs.nic.cz/knot/deckard',
packages=['pydnstest'],
python_requires='>=3.5',
install_requires=[
'dnspython>=1.15',
'jinja2',
'PyYAML',
'python-augeas'
],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3 :: Only'
'Operating System :: POSIX :: Linux',
'Topic :: Internet :: Name Service (DNS)',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
]
)
| 31.324324 | 71 | 0.609146 |
9e2f62d9ca279a2304c666233677d5d0d663e572 | 1,894 | py | Python | tests/testing_utils.py | alguerre/TrackEditorWeb | e92cb8554e804af8620298ca75567e6ce653b15e | [
"MIT"
] | 1 | 2021-09-06T14:56:27.000Z | 2021-09-06T14:56:27.000Z | tests/testing_utils.py | qjx666/TrackEditorWeb | e92cb8554e804af8620298ca75567e6ce653b15e | [
"MIT"
] | 79 | 2021-07-06T13:37:09.000Z | 2021-10-21T11:09:10.000Z | tests/testing_utils.py | qjx666/TrackEditorWeb | e92cb8554e804af8620298ca75567e6ce653b15e | [
"MIT"
] | 1 | 2022-01-30T05:44:25.000Z | 2022-01-30T05:44:25.000Z | import os
from urllib.parse import urljoin
from selenium import webdriver
from TrackApp.models import User, Track
from libs import track
| 30.548387 | 71 | 0.661563 |
9e30175d2516252b61b551241d3a7d897279d318 | 1,563 | py | Python | SimulEval/simuleval/agents/agent.py | ashkanalinejad/Supervised-Simultaneous-MT | d09397ed86bbf4133d5d9b906030a8881ee4c13f | [
"MIT"
] | 2 | 2022-01-11T19:27:11.000Z | 2022-01-12T11:06:53.000Z | SimulEval/simuleval/agents/agent.py | sfu-natlang/Supervised-Simultaneous-MT | 12c3a53887c985ae24199ecef2f7b2335fe214c6 | [
"MIT"
] | 1 | 2022-02-12T03:02:52.000Z | 2022-02-12T04:27:10.000Z | SimulEval/simuleval/agents/agent.py | sfu-natlang/Supervised-Simultaneous-MT | 12c3a53887c985ae24199ecef2f7b2335fe214c6 | [
"MIT"
] | 1 | 2022-02-27T14:22:36.000Z | 2022-02-27T14:22:36.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from simuleval.states import TextStates, SpeechStates
| 26.948276 | 76 | 0.662828 |
9e301c912b42abb46c781523b9340a9c6ccd01d4 | 13,317 | py | Python | source/mre-plugin-samples/Plugins/DetectShotsByRekognitionVideo/DetectShotsByRekognitionVideo.py | aws-samples/aws-media-replay-engine-samples | d9b479f3c7da87c8b6d2a265334a6d3aae58d885 | [
"MIT-0"
] | 4 | 2022-02-03T17:23:19.000Z | 2022-03-16T13:13:09.000Z | source/mre-plugin-samples/Plugins/DetectShotsByRekognitionVideo/DetectShotsByRekognitionVideo.py | aws-samples/aws-media-replay-engine-samples | d9b479f3c7da87c8b6d2a265334a6d3aae58d885 | [
"MIT-0"
] | 1 | 2022-02-22T01:25:57.000Z | 2022-03-10T21:27:31.000Z | source/mre-plugin-samples/Plugins/DetectShotsByRekognitionVideo/DetectShotsByRekognitionVideo.py | aws-samples/aws-media-replay-engine-samples | d9b479f3c7da87c8b6d2a265334a6d3aae58d885 | [
"MIT-0"
] | 1 | 2022-02-16T02:23:43.000Z | 2022-02-16T02:23:43.000Z | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import boto3
import json
import sys
import time
import ffmpeg
from MediaReplayEnginePluginHelper import OutputHelper
from MediaReplayEnginePluginHelper import Status
from MediaReplayEnginePluginHelper import DataPlane
s3_client = boto3.client('s3')
| 39.283186 | 139 | 0.535181 |
9e316afea9883b374b2578dfd94ecad511320c5f | 1,567 | py | Python | chempy/kinetics/tests/test_integrated.py | matecsaj/chempy | 2c93f185e4547739331193c06d77282206621517 | [
"BSD-2-Clause"
] | null | null | null | chempy/kinetics/tests/test_integrated.py | matecsaj/chempy | 2c93f185e4547739331193c06d77282206621517 | [
"BSD-2-Clause"
] | null | null | null | chempy/kinetics/tests/test_integrated.py | matecsaj/chempy | 2c93f185e4547739331193c06d77282206621517 | [
"BSD-2-Clause"
] | null | null | null | from __future__ import division
from chempy.util.testing import requires
from ..integrated import pseudo_irrev, pseudo_rev, binary_irrev, binary_rev
import pytest
try:
import sympy
except ImportError:
sympy = None
else:
one = sympy.S(1)
t, kf, kb, prod, major, minor = sympy.symbols(
't kf kb prod major minor', negative=False, nonnegative=True, real=True)
subsd = {t: one*2, kf: one*3, kb: one*7, major: one*11,
minor: one*13, prod: one*0}
| 27.017241 | 81 | 0.640715 |
9e3410f7e06e468d0eb7d1e58add77993b4f9819 | 1,362 | py | Python | emulateHttp2/processTestByBrowser.py | mixianghang/newhttp2 | 0843301ad79d11bc43f5d70dbcf934aaf072f6a3 | [
"MIT"
] | null | null | null | emulateHttp2/processTestByBrowser.py | mixianghang/newhttp2 | 0843301ad79d11bc43f5d70dbcf934aaf072f6a3 | [
"MIT"
] | null | null | null | emulateHttp2/processTestByBrowser.py | mixianghang/newhttp2 | 0843301ad79d11bc43f5d70dbcf934aaf072f6a3 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import sys
import os
import shutil
if __name__ == "__main__":
main()
| 32.428571 | 123 | 0.660793 |
9e36180ad2d9abb3875f4262a27e459d07a15a75 | 1,097 | py | Python | setup.py | osism/netbox-plugin-osism | 8cba95bd6bed167c5a05d464d95246c9d4c98a6a | [
"Apache-2.0"
] | null | null | null | setup.py | osism/netbox-plugin-osism | 8cba95bd6bed167c5a05d464d95246c9d4c98a6a | [
"Apache-2.0"
] | null | null | null | setup.py | osism/netbox-plugin-osism | 8cba95bd6bed167c5a05d464d95246c9d4c98a6a | [
"Apache-2.0"
] | null | null | null | from setuptools import setup
setup(
name='netbox_plugin_osism',
version='0.0.1',
description='NetBox Plugin OSISM',
long_description='Netbox Plugin OSISM',
url='https://github.com/osism/netbox-plugin-osism',
download_url='https://github.com/osism/netbox-plugin-osism',
author='OSISM GmbH',
author_email='info@osism.tech',
maintainer='OSISM GmbH',
maintainer_email='info@osism.tech',
install_requires=[],
packages=['netbox_plugin_osism'],
package_data={
'netbox_plugin_osism':
['templates/netbox_plugin_osism/*.html']
},
include_package_data=True,
zip_safe=False,
platforms=['Any'],
keywords=['netbox', 'netbox-plugin'],
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
'Framework :: Django',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Intended Audience :: Developers',
'Environment :: Console',
],
)
| 31.342857 | 64 | 0.631723 |
9e36f2c784f6f44bd775bdedd2272a8be3601516 | 525 | py | Python | src/response.py | vcokltfre/snowflake.vcokltf.re | 5b8324a4fbc2e512dbc263d4ed65edb89d72a549 | [
"MIT"
] | 1 | 2021-03-23T15:13:04.000Z | 2021-03-23T15:13:04.000Z | src/response.py | vcokltfre/snowflake.vcokltf.re | 5b8324a4fbc2e512dbc263d4ed65edb89d72a549 | [
"MIT"
] | null | null | null | src/response.py | vcokltfre/snowflake.vcokltf.re | 5b8324a4fbc2e512dbc263d4ed65edb89d72a549 | [
"MIT"
] | null | null | null | from starlette.responses import HTMLResponse
| 23.863636 | 82 | 0.485714 |
9e377bb8273400c9545a16768897adf2638f5e45 | 63 | py | Python | rx/__init__.py | yutiansut/RxPY | c3bbba77f9ebd7706c949141725e220096deabd4 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2018-11-16T09:07:13.000Z | 2018-11-16T09:07:13.000Z | rx/__init__.py | yutiansut/RxPY | c3bbba77f9ebd7706c949141725e220096deabd4 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | rx/__init__.py | yutiansut/RxPY | c3bbba77f9ebd7706c949141725e220096deabd4 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-05-08T08:23:08.000Z | 2020-05-08T08:23:08.000Z | from .core import Observer, Observable, AnonymousObserver as _
| 31.5 | 62 | 0.825397 |
9e379e1fd1991982e0f968b5ef6aafe42d277ba1 | 47 | py | Python | news_api/settings/Vespa_config.py | rdoume/News_API | 9c555fdc5e5b717b98bcfec27364b9612b9c4aa1 | [
"MIT"
] | 9 | 2019-07-19T13:19:55.000Z | 2021-07-08T16:25:30.000Z | news_api/settings/Vespa_config.py | rdoume/News_API | 9c555fdc5e5b717b98bcfec27364b9612b9c4aa1 | [
"MIT"
] | null | null | null | news_api/settings/Vespa_config.py | rdoume/News_API | 9c555fdc5e5b717b98bcfec27364b9612b9c4aa1 | [
"MIT"
] | 1 | 2021-05-12T01:50:04.000Z | 2021-05-12T01:50:04.000Z | VESPA_IP = "172.16.100.65"
VESPA_PORT = "8080"
| 15.666667 | 26 | 0.680851 |
9e39c8fbaaf037c97de86567d3d6ad2bfa09867d | 642 | py | Python | test/walk.py | manxueitp/cozmo-test | a91b1a4020544cb622bd67385f317931c095d2e8 | [
"MIT"
] | null | null | null | test/walk.py | manxueitp/cozmo-test | a91b1a4020544cb622bd67385f317931c095d2e8 | [
"MIT"
] | null | null | null | test/walk.py | manxueitp/cozmo-test | a91b1a4020544cb622bd67385f317931c095d2e8 | [
"MIT"
] | null | null | null | import cozmo
from cozmo.util import distance_mm, speed_mmps,degrees
cozmo.run_program(cozmo_program)
| 45.857143 | 79 | 0.800623 |
9e3a0239409f0db941b17e1b31a07a8a3ed673cb | 694 | py | Python | lectures/extensions/hyperbolic_discounting/replication_code/.mywaflib/waflib/Tools/clang.py | loikein/ekw-lectures | a2f5436f10515ab26eab323fca8c37c91bdc5dcd | [
"MIT"
] | 4 | 2019-11-15T15:21:27.000Z | 2020-07-08T15:04:30.000Z | lectures/extensions/hyperbolic_discounting/replication_code/.mywaflib/waflib/Tools/clang.py | loikein/ekw-lectures | a2f5436f10515ab26eab323fca8c37c91bdc5dcd | [
"MIT"
] | 9 | 2019-11-18T15:54:36.000Z | 2020-07-14T13:56:53.000Z | lectures/extensions/hyperbolic_discounting/replication_code/.mywaflib/waflib/Tools/clang.py | loikein/ekw-lectures | a2f5436f10515ab26eab323fca8c37c91bdc5dcd | [
"MIT"
] | 3 | 2021-01-25T15:41:30.000Z | 2021-09-21T08:51:36.000Z | #!/usr/bin/env python
# Krzysztof Kosiski 2014
"""
Detect the Clang C compiler
"""
from waflib.Configure import conf
from waflib.Tools import ar
from waflib.Tools import ccroot
from waflib.Tools import gcc
| 22.387097 | 72 | 0.693084 |
9e3b5a48a7befde960b0ddd0c42b6f209d9a2b77 | 457 | py | Python | test_lambda_function.py | gavinbull/loyalty_anagram | a91d23083d8c040916733751932fb47d00592890 | [
"MIT"
] | null | null | null | test_lambda_function.py | gavinbull/loyalty_anagram | a91d23083d8c040916733751932fb47d00592890 | [
"MIT"
] | null | null | null | test_lambda_function.py | gavinbull/loyalty_anagram | a91d23083d8c040916733751932fb47d00592890 | [
"MIT"
] | null | null | null | import unittest
from lambda_function import gather_anagrams
if __name__ == '__main__':
unittest.main()
| 28.5625 | 86 | 0.654267 |
9e3d9a4ab5c166e9fe2b7e4de49e51e3488a6de5 | 577 | py | Python | euler62.py | dchourasia/euler-solutions | e20cbf016a9ea601fcce928d9690930c9a498837 | [
"Apache-2.0"
] | null | null | null | euler62.py | dchourasia/euler-solutions | e20cbf016a9ea601fcce928d9690930c9a498837 | [
"Apache-2.0"
] | null | null | null | euler62.py | dchourasia/euler-solutions | e20cbf016a9ea601fcce928d9690930c9a498837 | [
"Apache-2.0"
] | null | null | null | '''
Find the smallest cube for which exactly five permutations of its digits are cube.
'''
import math, itertools
print(math.pow(8, 1/3).is_integer())
tried = {}
for i in range(1000, 1200):
cb = int(math.pow(i, 3))
#print(cb)
#print(math.pow(int(cb), 1/3))
roots = 1
tried[i] = [str(cb)]
for x in itertools.permutations(str(cb)):
x = ''.join(x)
if x not in tried[i]:
#print('x =', x)
y = round(math.pow(int(x), 1/3))
#print(y**3, x)
if y**3 == int(x):
roots += 1
tried[i].append(x)
print(roots, i, y, x)
if roots == 5:
print(cb)
break
| 21.37037 | 82 | 0.587522 |
9e3eca14631d828c95eda787a3d066e5994ecfdb | 3,010 | py | Python | examples/reeds_problem.py | bwhewe-13/ants | 6923cfc1603e0cd90c2ae90fa0fed6dd86edc0b2 | [
"MIT"
] | null | null | null | examples/reeds_problem.py | bwhewe-13/ants | 6923cfc1603e0cd90c2ae90fa0fed6dd86edc0b2 | [
"MIT"
] | null | null | null | examples/reeds_problem.py | bwhewe-13/ants | 6923cfc1603e0cd90c2ae90fa0fed6dd86edc0b2 | [
"MIT"
] | null | null | null | from ants.medium import MediumX
from ants.materials import Materials
from ants.mapper import Mapper
from ants.multi_group import source_iteration
import numpy as np
import matplotlib.pyplot as plt
groups = 1
cells_x = 1000
medium_width = 16.
cell_width_x = medium_width / cells_x
angles = 16
xbounds = np.array([1, 0])
materials = ['reed-vacuum', 'reed-strong-source', \
'reed-scatter','reed-absorber']
problem_01 = Materials(materials, 1, None)
medium = MediumX(cells_x, cell_width_x, angles, xbounds)
medium.add_external_source("reed")
map_obj = Mapper.load_map('reed_problem2.mpr')
if cells_x != map_obj.cells_x:
map_obj.adjust_widths(cells_x)
reversed_key = {v: k for k, v in map_obj.map_key.items()}
total = []
scatter = []
fission = []
for position in range(len(map_obj.map_key)):
map_material = reversed_key[position]
total.append(problem_01.data[map_material][0])
scatter.append(problem_01.data[map_material][1])
fission.append(problem_01.data[map_material][2])
total = np.array(total)
scatter = np.array(scatter)
fission = np.array(fission)
print(map_obj.map_key.keys())
print(problem_01.data.keys())
mu_x = medium.mu_x
weight = medium.weight
print(mu_x)
print(weight)
medium_map = map_obj.map_x.astype(int)
phi = source_iteration(groups, mu_x / cell_width_x, weight, total, scatter, \
fission, medium.ex_source, medium_map, xbounds, \
cell_width_x)
print(medium.ex_source.shape)
fig, ax = plt.subplots()
solution = np.load('reed_solution.npy')
print(len(solution))
print(np.allclose(solution, phi[:,0],atol=1e-12))
ax.plot(np.linspace(0, 16, len(solution)), solution, label='solution', c='k', ls='--')
ax.plot(np.linspace(0, medium_width, cells_x), phi[:,0], label='New', c='r', alpha=0.6)
ax.legend(loc=0)
plt.show() | 29.80198 | 87 | 0.679734 |
9e40a4a7ae6fa13448f345e341c1c32845116799 | 29,411 | py | Python | exp_runner.py | BoifZ/NeuS | a2900fa5c0b2a9d54b9cb5b364440ee7eecfb525 | [
"MIT"
] | null | null | null | exp_runner.py | BoifZ/NeuS | a2900fa5c0b2a9d54b9cb5b364440ee7eecfb525 | [
"MIT"
] | null | null | null | exp_runner.py | BoifZ/NeuS | a2900fa5c0b2a9d54b9cb5b364440ee7eecfb525 | [
"MIT"
] | null | null | null | import os
import time
import logging
import argparse
import numpy as np
import cv2 as cv
import trimesh
import torch
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from shutil import copyfile
from icecream import ic
from tqdm import tqdm
from pyhocon import ConfigFactory
from models.dataset import Dataset, load_K_Rt_from_P
from models.fields import RenderingNetwork, SDFNetwork, SingleVarianceNetwork, NeRF
from models.renderer import NeuSRenderer
from models.poses import LearnPose, LearnIntrin, RaysGenerator
# from models.depth import SiLogLoss
if __name__ == '__main__':
print('Hello Wooden')
torch.set_default_tensor_type('torch.cuda.FloatTensor')
FORMAT = "[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s"
logging.basicConfig(level=logging.DEBUG, format=FORMAT)
parser = argparse.ArgumentParser()
parser.add_argument('--conf', type=str, default='./confs/base.conf')
parser.add_argument('--mode', type=str, default='train')
parser.add_argument('--mcube_threshold', type=float, default=0.0)
parser.add_argument('--is_continue', default=False, action="store_true")
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--case', type=str, default='')
args = parser.parse_args()
torch.cuda.set_device(args.gpu)
runner = Runner(args.conf, args.mode, args.case, args.is_continue)
if args.mode == 'train':
runner.train()
elif args.mode == 'validate_mesh':
runner.validate_mesh(world_space=True, resolution=512, threshold=args.mcube_threshold)
elif args.mode.startswith('interpolate'): # Interpolate views given two image indices
_, img_idx_0, img_idx_1 = args.mode.split('_')
img_idx_0 = int(img_idx_0)
img_idx_1 = int(img_idx_1)
runner.interpolate_view(img_idx_0, img_idx_1)
elif args.mode.startswith('showcam'):
_, iter_show = args.mode.split('_')
runner.load_pnf_checkpoint(('pnf_{:0>6d}.pth').format(int(iter_show)))
runner.show_cam_pose(int(iter_show))
| 47.590615 | 180 | 0.605352 |
9e44b7345e9261d66e37f31753ad1afb6577bc5f | 2,007 | py | Python | code/video-analiz/python/camshift.py | BASARIRR/computer-vision-guide | 0a11726fb2be0cad63738ab45fd4edc4515441d2 | [
"MIT"
] | 230 | 2019-01-17T01:00:53.000Z | 2022-03-31T18:00:09.000Z | code/video-analiz/python/camshift.py | sturlu/goruntu-isleme-kilavuzu | e9377ace3823ca5f2d06ca78a11884256539134d | [
"MIT"
] | 8 | 2019-05-03T07:44:50.000Z | 2022-02-10T00:14:38.000Z | code/video-analiz/python/camshift.py | sturlu/goruntu-isleme-kilavuzu | e9377ace3823ca5f2d06ca78a11884256539134d | [
"MIT"
] | 71 | 2019-01-17T12:11:06.000Z | 2022-03-03T22:02:46.000Z | #Python v3, OpenCV v3.4.2
import numpy as np
import cv2
videoCapture = cv2.VideoCapture("video.mp4")
ret,camera_input = videoCapture.read()
rows, cols = camera_input.shape[:2]
'''
Video dosyas zerine Mean Shift iin bir alan belirlenir.
Bu koordinatlar arlkl ortalamas belirlenecek olan drtgen alandr. '''
#w ve h boyutlandrmasn deitirerek sonular gzlemleyebilirsiniz
w = 10
h = 15
col = int((cols - w) / 2)
row = int((rows - h) / 2)
shiftWindow = (col, row, w, h)
'''
imdi grnt zerindeki parlakl, renk dalmlarn dengelemek iin bir maskeleme alan oluturalm ve
bu alan zerinde histogram eitleme yapalm
'''
roi = camera_input[row:row + h, col:col + w]
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.)))
histogram = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180])
cv2.normalize(histogram,histogram,0,255,cv2.NORM_MINMAX)
'''
Bu parametre / durdurma lt algoritmann kendi ierisinde kaydrma/hesaplama ilemini ka defa yapacan belirlemektedir.
'''
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
while True:
#Video'dan bir frame okunur
ret ,camera_input = videoCapture.read()
'''
video ierisinde ncelikli HSV renk uzay zerinde histogram alp histogram back projection yapacaz ve
tm grnt zerinde istediimiz yerin segmentlerini bulacaz.
'''
hsv = cv2.cvtColor(camera_input, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv],[0],histogram,[0,180],1)
#her yeni konum iin meanshift tekrar uygulanr
ret, shiftWindow = cv2.CamShift(dst, shiftWindow, term_crit)
#Grnt zerinde tespit edilen alan izelim
pts = cv2.boxPoints(ret)
pts = np.int0(pts)
result_image = cv2.polylines(camera_input,[pts],True, 255,2)
cv2.imshow('Camshift (Surekli Mean Shift) Algoritmasi', result_image)
k = cv2.waitKey(60) & 0xff
videoCapture.release()
cv2.destroyAllWindows() | 32.901639 | 125 | 0.727454 |
9e459ba91afb3134b739b9c40e6c311ac98e5335 | 346 | py | Python | DTT_files/dtt.py | stecik/Directory_to_text | f93c76f820ff7dc39e213779115861e53ed6a266 | [
"MIT"
] | null | null | null | DTT_files/dtt.py | stecik/Directory_to_text | f93c76f820ff7dc39e213779115861e53ed6a266 | [
"MIT"
] | null | null | null | DTT_files/dtt.py | stecik/Directory_to_text | f93c76f820ff7dc39e213779115861e53ed6a266 | [
"MIT"
] | null | null | null | from dtt_class import DTT
from parser import args
if __name__ == "__main__":
dtt = DTT()
# Creates a list of files and subdirectories
try:
l = dtt.dir_to_list(args.directory, args)
# Creates a .txt file with the list
dtt.list_to_txt(args.output_file, l)
except Exception as e:
print(f"Error: {e}") | 28.833333 | 49 | 0.644509 |
9e45b73d08315aaa5770ad5f620934e0e80ebd70 | 1,675 | py | Python | src/models/head.py | takedarts/DenseResNet | d5f9c143ed3c484436a2a5bac366c3795e5d47ec | [
"MIT"
] | null | null | null | src/models/head.py | takedarts/DenseResNet | d5f9c143ed3c484436a2a5bac366c3795e5d47ec | [
"MIT"
] | null | null | null | src/models/head.py | takedarts/DenseResNet | d5f9c143ed3c484436a2a5bac366c3795e5d47ec | [
"MIT"
] | null | null | null | import torch.nn as nn
import collections
| 36.413043 | 93 | 0.605373 |
9e47088047a050a5c1880fb84b394c06ebc4af2c | 968 | py | Python | application.py | nicolas-van/startup_asgard_app | acbb706256214f6758de9db92ff2988cee62c8ff | [
"MIT"
] | null | null | null | application.py | nicolas-van/startup_asgard_app | acbb706256214f6758de9db92ff2988cee62c8ff | [
"MIT"
] | null | null | null | application.py | nicolas-van/startup_asgard_app | acbb706256214f6758de9db92ff2988cee62c8ff | [
"MIT"
] | null | null | null |
from __future__ import unicode_literals, print_function, absolute_import
import flask
import os
import os.path
import json
import sjoh.flask
import logging
import asgard
app = asgard.Asgard(__name__, flask_parameters={"static_folder": None})
# load configuration about files and folders
folder = os.path.dirname(__file__)
fc = os.path.join(folder, "filesconfig.json")
with open(fc, "rb") as file_:
fc_content = file_.read().decode("utf8")
files_config = json.loads(fc_content)
# register static folders
for s in files_config["static_folders"]:
route = "/" + s + "/<path:path>"
app.web_app.add_url_rule(route, "static:"+s, gen_fct(s))
| 25.473684 | 73 | 0.722107 |
9e470dc0299f2bc08dbfaf73e95ab549a126fe53 | 414 | py | Python | build/lib/tests/visualizer_test.py | eltoto1219/vltk | e84c0efe9062eb864604d96345f71483816340aa | [
"Apache-2.0"
] | null | null | null | build/lib/tests/visualizer_test.py | eltoto1219/vltk | e84c0efe9062eb864604d96345f71483816340aa | [
"Apache-2.0"
] | null | null | null | build/lib/tests/visualizer_test.py | eltoto1219/vltk | e84c0efe9062eb864604d96345f71483816340aa | [
"Apache-2.0"
] | null | null | null | import io
import os
import unittest
import numpy as np
from PIL import Image
from vltk import SingleImageViz
PATH = os.path.dirname(os.path.realpath(__file__))
URL = "https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/images/input.jpg"
| 18 | 107 | 0.731884 |
9e473c9d126543858d93cd7cc38a1863415d85a8 | 3,419 | py | Python | siam_tracker/models/train_wrappers/pairwise_wrapper.py | microsoft/PySiamTracking | a82dabeaa42a7816dbd8e823da7b7e92ebb622ce | [
"MIT"
] | 28 | 2020-03-18T04:41:21.000Z | 2022-02-24T16:44:01.000Z | siam_tracker/models/train_wrappers/pairwise_wrapper.py | HengFan2010/PySiamTracking | a82dabeaa42a7816dbd8e823da7b7e92ebb622ce | [
"MIT"
] | 1 | 2020-04-05T15:23:22.000Z | 2020-04-07T16:23:12.000Z | siam_tracker/models/train_wrappers/pairwise_wrapper.py | HengFan2010/PySiamTracking | a82dabeaa42a7816dbd8e823da7b7e92ebb622ce | [
"MIT"
] | 11 | 2020-03-19T00:30:06.000Z | 2021-11-10T08:22:35.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import torch
from collections import OrderedDict
from ..builder import build_tracker, TRAIN_WRAPPERS
from ...datasets import TrainPairDataset, build_dataloader
from ...runner import Runner
from ...utils.parallel import MMDataParallel
from ...utils import load_checkpoint
| 38.852273 | 99 | 0.623867 |
9e477dd3df7f5df09267317cd3bfe78b579ab14e | 212 | py | Python | coaster/views/__init__.py | AferriDaniel/coaster | 3ffbc9d33c981284593445299aaee0c3cc0cdb0b | [
"BSD-2-Clause"
] | 48 | 2015-01-15T08:57:24.000Z | 2022-01-26T04:04:34.000Z | coaster/views/__init__.py | AferriDaniel/coaster | 3ffbc9d33c981284593445299aaee0c3cc0cdb0b | [
"BSD-2-Clause"
] | 169 | 2015-01-16T13:17:38.000Z | 2021-05-31T13:23:23.000Z | coaster/views/__init__.py | AferriDaniel/coaster | 3ffbc9d33c981284593445299aaee0c3cc0cdb0b | [
"BSD-2-Clause"
] | 17 | 2015-02-15T07:39:04.000Z | 2021-10-05T11:20:22.000Z | """
View helpers
============
Coaster provides classes, functions and decorators for common scenarios in view
handlers.
"""
# flake8: noqa
from .classview import *
from .decorators import *
from .misc import *
| 16.307692 | 79 | 0.707547 |
9e481ccd75d0d45dc38668e3abc95311f9633891 | 1,429 | py | Python | socialdistribution/profiles/migrations/0009_auto_20200308_0539.py | um4r12/CMPUT404-project-socialdistribution | 54778371d1f6537370562de4ba4e4efe3288f95d | [
"Apache-2.0"
] | null | null | null | socialdistribution/profiles/migrations/0009_auto_20200308_0539.py | um4r12/CMPUT404-project-socialdistribution | 54778371d1f6537370562de4ba4e4efe3288f95d | [
"Apache-2.0"
] | null | null | null | socialdistribution/profiles/migrations/0009_auto_20200308_0539.py | um4r12/CMPUT404-project-socialdistribution | 54778371d1f6537370562de4ba4e4efe3288f95d | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.1.5 on 2020-03-08 05:39
from django.db import migrations, models
import django.db.models.deletion
| 42.029412 | 167 | 0.650805 |
9e491ac31491040fbc01015d8b5c1a03d71d8961 | 377 | py | Python | src/edeposit/amqp/rest/structures/__init__.py | edeposit/edeposit.rest | ecb1c00f7c156e1ed2000a0b68a3e4da506e7992 | [
"MIT"
] | 1 | 2015-12-10T13:30:22.000Z | 2015-12-10T13:30:22.000Z | src/edeposit/amqp/rest/structures/__init__.py | edeposit/edeposit.rest | ecb1c00f7c156e1ed2000a0b68a3e4da506e7992 | [
"MIT"
] | 33 | 2015-10-06T16:02:13.000Z | 2015-12-10T15:00:04.000Z | src/edeposit/amqp/rest/structures/__init__.py | edeposit/edeposit.rest | ecb1c00f7c156e1ed2000a0b68a3e4da506e7992 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
from incomming import CacheTick
from incomming import SaveLogin
from incomming import RemoveLogin
from incomming import StatusUpdate
from outgoing import UploadRequest
from outgoing import AfterDBCleanupRequest
| 26.928571 | 79 | 0.649867 |
9e4940a9f3cc370e790b4e7a714aac9bb4e6baa7 | 9,446 | py | Python | accelbyte_py_sdk/api/platform/wrappers/_anonymization.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | null | null | null | accelbyte_py_sdk/api/platform/wrappers/_anonymization.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | 1 | 2021-10-13T03:46:58.000Z | 2021-10-13T03:46:58.000Z | accelbyte_py_sdk/api/platform/wrappers/_anonymization.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | null | null | null | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import HeaderStr
from ....core import get_namespace as get_services_namespace
from ....core import run_request
from ....core import run_request_async
from ....core import same_doc_as
from ..operations.anonymization import AnonymizeCampaign
from ..operations.anonymization import AnonymizeEntitlement
from ..operations.anonymization import AnonymizeFulfillment
from ..operations.anonymization import AnonymizeIntegration
from ..operations.anonymization import AnonymizeOrder
from ..operations.anonymization import AnonymizePayment
from ..operations.anonymization import AnonymizeSubscription
from ..operations.anonymization import AnonymizeWallet
| 37.935743 | 151 | 0.72401 |
9e49cf2dc6f50772b3945f19de0ff48e7f6c2734 | 358 | py | Python | backend/api/serializers.py | vingle1/RestaurantProject | 5106a7662f26324ef50eebcfcba673960dff1734 | [
"MIT"
] | null | null | null | backend/api/serializers.py | vingle1/RestaurantProject | 5106a7662f26324ef50eebcfcba673960dff1734 | [
"MIT"
] | 1 | 2017-12-10T18:12:38.000Z | 2017-12-10T18:12:38.000Z | backend/api/serializers.py | vingle1/RestaurantProject | 5106a7662f26324ef50eebcfcba673960dff1734 | [
"MIT"
] | 2 | 2017-10-31T20:48:04.000Z | 2017-11-30T04:05:36.000Z |
from django.contrib.auth.models import User, Group
from rest_framework import serializers
from rest_framework_json_api.relations import *
#load django and webapp models
#from django.contrib.auth.models import *
from api.models import *
| 22.375 | 51 | 0.765363 |
9e4d5fb0fa81e143693d4b850e79279a83dcb058 | 622 | py | Python | preprocessed_data/RGHS/Code/S_model.py | SaiKrishna1207/Underwater-Image-Segmentation | 78def27e577b10e6722c02807bdcfeb7ba53d760 | [
"MIT"
] | null | null | null | preprocessed_data/RGHS/Code/S_model.py | SaiKrishna1207/Underwater-Image-Segmentation | 78def27e577b10e6722c02807bdcfeb7ba53d760 | [
"MIT"
] | null | null | null | preprocessed_data/RGHS/Code/S_model.py | SaiKrishna1207/Underwater-Image-Segmentation | 78def27e577b10e6722c02807bdcfeb7ba53d760 | [
"MIT"
] | null | null | null | import numpy as np
import pylab as pl
x = [] # Make an array of x values
y = [] # Make an array of y values for each x value
for i in range(-128,127):
x.append(i)
for j in range(-128,127):
temp = j *(2**(1 - abs((j/128))))
y.append(temp)
# print('y',y)
# pl.xlim(-128, 127)# set axis limits
# pl.ylim(-128, 127)
pl.axis([-128, 127,-128, 127])
pl.title('S-model Curve Function ',fontsize=20)# give plot a title
pl.xlabel('Input Value',fontsize=20)# make axis labels
pl.ylabel('Output Value',fontsize=20)
pl.plot(x, y,color='red') # use pylab to plot x and y
pl.show() # show the plot on the screen | 23.037037 | 66 | 0.639871 |
9e4db1ef4c553d26b23cdf167ecc2ec7e965d780 | 36,578 | py | Python | tools/Blender Stuff/Plugins/Gothic_MaT_Blender/1.3/Gothic_MaT_Blender_1_3.py | PhoenixTales/gothic-devk | 48193bef8fd37626f8909853bfc5ad4b7126f176 | [
"FSFAP"
] | 3 | 2021-04-13T07:12:30.000Z | 2021-06-18T17:26:10.000Z | tools/Blender Stuff/Plugins/Gothic_MaT_Blender/1.3/Gothic_MaT_Blender_1_3.py | PhoenixTales/gothic-devk | 48193bef8fd37626f8909853bfc5ad4b7126f176 | [
"FSFAP"
] | null | null | null | tools/Blender Stuff/Plugins/Gothic_MaT_Blender/1.3/Gothic_MaT_Blender_1_3.py | PhoenixTales/gothic-devk | 48193bef8fd37626f8909853bfc5ad4b7126f176 | [
"FSFAP"
] | 2 | 2021-03-23T19:45:39.000Z | 2021-04-17T17:21:48.000Z | bl_info = {
"name": "Gothic Materials and Textures Blender",
"description": "Makes life easier for Gothic material export",
"author": "Diego",
"version": (1, 3, 0),
"blender": (2, 78, 0),
"location": "3D View > Tools",
"warning": "", # used for warning icon and text in addons panel
"wiki_url": "",
"tracker_url": "",
"category": "Development"
}
import bpy
# if not blenders bundled python is used, packages might not be installed
try:
from mathutils import Color
except ImportError:
raise ImportError('Package mathutils needed, but not installed')
try:
import numpy
except ImportError:
raise ImportError('Package numpy needed, but not installed')
try:
import os.path
except ImportError:
raise ImportError('Package os needed, but not installed')
try:
import colorsys
except ImportError:
raise ImportError('Package colorsys needed, but not installed')
from bpy.props import (StringProperty,
BoolProperty,
IntProperty,
FloatProperty,
EnumProperty,
PointerProperty,
)
from bpy.types import (Panel,
Operator,
PropertyGroup,
)
# ------------------------------------------------------------------------
# store properties in the active scene
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
# operators
# ------------------------------------------------------------------------
# hides all objects that do not have the material specified in the "searched_material" property
# optional: isolate in all layers
# changes the names of all used images to their filename
# if multiple images use the same file, only one is kept
# the others will be replaced by this one
# Removes suffixes like ".001" and renames textures to image filename
# replaces materials with same name except suffixes
# keeps only one texture per image file, replaces others by this one
# takes a sample of pixels and calculates average color for every material with image
# replaces all UV textures by the image that the material of this face has
# replaces materials by those that belong to the assigned UV textures
# at every call matlib.ini is parsed and for every image a matching material is searched_material
# depending on how often this texture is used by a material, the used material name is
# never: texture name without file extension
# once: take name from materialfilter
# more: ambiguous, depending on settings
# optionally faces with portal materials are not overwritten
# note that this will create a material for all used images in the file if they dont exist. this is done because
# it would be more troublesome to first filter out the actually needed materials
# ------------------------------------------------------------------------
# gothic tools in objectmode
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
# register and unregister
# ------------------------------------------------------------------------
def register():
bpy.utils.register_module(__name__)
bpy.types.Scene.gothic_tools = PointerProperty(type=GothicMaterialSettings)
def unregister():
bpy.utils.unregister_module(__name__)
del bpy.types.Scene.gothic_tools
if __name__ == "__main__":
register() | 43.963942 | 194 | 0.561458 |
9e4e27c4f397f2c0b09121050df5d040566af2dd | 7,881 | py | Python | v1/GCRCatalogs/MB2GalaxyCatalog.py | adam-broussard/descqa | d9681bd393553c31882ec7e28e6c1c7b6e482dd3 | [
"BSD-3-Clause"
] | 4 | 2017-11-14T03:33:57.000Z | 2021-06-05T16:35:40.000Z | v1/GCRCatalogs/MB2GalaxyCatalog.py | adam-broussard/descqa | d9681bd393553c31882ec7e28e6c1c7b6e482dd3 | [
"BSD-3-Clause"
] | 136 | 2017-11-06T16:02:58.000Z | 2021-11-11T18:20:23.000Z | v1/GCRCatalogs/MB2GalaxyCatalog.py | adam-broussard/descqa | d9681bd393553c31882ec7e28e6c1c7b6e482dd3 | [
"BSD-3-Clause"
] | 31 | 2017-11-06T19:55:35.000Z | 2020-12-15T13:53:53.000Z | # Massive Black 2 galaxy catalog class
import numpy as np
from astropy.table import Table
import astropy.units as u
import astropy.cosmology
from .GalaxyCatalogInterface import GalaxyCatalog
| 52.192053 | 134 | 0.53242 |
9e4e87db0add45d330be3d156367bbd52e0ded32 | 714 | py | Python | skylernet/views.py | skylermishkin/skylernet | d715c69348c050d976ba7931127a576565b67ff1 | [
"MIT"
] | null | null | null | skylernet/views.py | skylermishkin/skylernet | d715c69348c050d976ba7931127a576565b67ff1 | [
"MIT"
] | null | null | null | skylernet/views.py | skylermishkin/skylernet | d715c69348c050d976ba7931127a576565b67ff1 | [
"MIT"
] | null | null | null | from django.shortcuts import get_object_or_404, render
from django.contrib.staticfiles.templatetags.staticfiles import static
| 42 | 96 | 0.564426 |
9e4e8b052d2746faabafff4026914e35d26807a7 | 532 | py | Python | src/objects/qubit.py | KaroliShp/Quantumformatics | 4166448706c06a1a45abd106da8152b4f4c40a25 | [
"MIT"
] | 2 | 2019-10-28T20:26:14.000Z | 2019-10-29T08:28:45.000Z | src/objects/qubit.py | KaroliShp/Quantumformatics | 4166448706c06a1a45abd106da8152b4f4c40a25 | [
"MIT"
] | 3 | 2019-10-28T09:19:27.000Z | 2019-10-28T13:42:08.000Z | src/objects/qubit.py | KaroliShp/Quantumformatics | 4166448706c06a1a45abd106da8152b4f4c40a25 | [
"MIT"
] | null | null | null | from src.dirac_notation.bra import Bra
from src.dirac_notation.ket import Ket
from src.dirac_notation.matrix import Matrix
from src.dirac_notation import functions as dirac
from src.dirac_notation import constants as const
from src.objects.quantum_system import QuantumSystem, SystemType
| 29.555556 | 64 | 0.755639 |
9e4edf8dd4337b4a83cb6c425f974138a731fbae | 9,926 | py | Python | cuddlefish/apiparser.py | mozilla/FlightDeck | 61d66783252ac1318c990e342877a26c64f59062 | [
"BSD-3-Clause"
] | 6 | 2015-04-24T03:10:44.000Z | 2020-12-27T19:46:33.000Z | cuddlefish/apiparser.py | fox2mike/FlightDeck | 3a2fc78c13dd968041b349c4f9343e6c8b22dd25 | [
"BSD-3-Clause"
] | null | null | null | cuddlefish/apiparser.py | fox2mike/FlightDeck | 3a2fc78c13dd968041b349c4f9343e6c8b22dd25 | [
"BSD-3-Clause"
] | 5 | 2015-09-18T19:58:31.000Z | 2020-01-28T05:46:55.000Z | import sys, re, textwrap
def parse_hunks(text):
# return a list of tuples. Each is one of:
# ("raw", string) : non-API blocks
# ("api-json", dict) : API blocks
processed = 0 # we've handled all bytes up-to-but-not-including this offset
line_number = 1
for m in re.finditer("<api[\w\W]*?</api>", text, re.M):
start = m.start()
if start > processed+1:
hunk = text[processed:start]
yield ("markdown", hunk)
processed = start
line_number += hunk.count("\n")
api_text = m.group(0)
api_lines = api_text.splitlines()
d = APIParser().parse(api_lines, line_number)
yield ("api-json", d)
processed = m.end()
line_number += api_text.count("\n")
if processed < len(text):
yield ("markdown", text[processed:])
if __name__ == "__main__":
json = False
if sys.argv[1] == "--json":
json = True
del sys.argv[1]
docs_text = open(sys.argv[1]).read()
docs_parsed = list(parse_hunks(docs_text))
if json:
import simplejson
print simplejson.dumps(docs_parsed, indent=2)
else:
TestRenderer().render_docs(docs_parsed)
| 35.833935 | 89 | 0.503728 |
9e4f2abe49eca6572412ecb2672b250ab2b29afd | 1,217 | py | Python | specs/core.py | farleykr/acrylamid | c6c0f60b594d2920f6387ba82b552093d7c5fe1b | [
"BSD-2-Clause-FreeBSD"
] | 61 | 2015-01-15T23:23:11.000Z | 2022-03-24T16:39:31.000Z | specs/core.py | farleykr/acrylamid | c6c0f60b594d2920f6387ba82b552093d7c5fe1b | [
"BSD-2-Clause-FreeBSD"
] | 28 | 2015-01-26T22:32:24.000Z | 2022-01-13T01:11:56.000Z | specs/core.py | farleykr/acrylamid | c6c0f60b594d2920f6387ba82b552093d7c5fe1b | [
"BSD-2-Clause-FreeBSD"
] | 25 | 2015-01-22T19:26:29.000Z | 2021-06-30T21:53:06.000Z | # -*- coding: utf-8 -*-
import attest
from acrylamid.core import cache
| 23.862745 | 56 | 0.532457 |
9e51608d7b0aa9e6ba5eb1fb96ffd50952b54f6c | 1,235 | py | Python | python/animate_sub_plots_sharc.py | FinMacDov/PhD_codes | 44e781c270fa9822a8137ef271f35c6e945c5828 | [
"MIT"
] | null | null | null | python/animate_sub_plots_sharc.py | FinMacDov/PhD_codes | 44e781c270fa9822a8137ef271f35c6e945c5828 | [
"MIT"
] | null | null | null | python/animate_sub_plots_sharc.py | FinMacDov/PhD_codes | 44e781c270fa9822a8137ef271f35c6e945c5828 | [
"MIT"
] | null | null | null | from subplot_animation import subplot_animation
import sys
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import os
import numpy as np
import glob
sys.path.append("/home/smp16fm/forked_amrvac/amrvac/tools/python")
from amrvac_pytools.datfiles.reading import amrvac_reader
from amrvac_pytools.vtkfiles import read, amrplot
program_name = sys.argv[0]
path2files = sys.argv[1:]
# Switches
refiner = '__'
fps = 3
start_frame = 0
in_extension = 'png'
out_extension = 'avi'
# set time to look over
time_start = 0
time_end = None
text_x_pos = 0.85
text_y_pos = 0.01
save_dir = '/shared/mhd_jet1/User/smp16fm/j/2D/results'
# make dirs
#path2files = "/shared/mhd_jet1/User/smp16fm/sj/2D/P300/B100/A20/"
# path2files = "../test/"
# dummy_name = 'solar_jet_con_'
dummy_name = ''
#read.load_vtkfile(0, file='/shared/mhd_jet1/User/smp16fm/sj/2D/P300/B100/A20/jet_t300_B100A_20_', type='vtu')
print(path2files[0])
test = subplot_animation(path2files[0], save_dir=save_dir, dummy_name='',
refiner=None, text_x_pos=0.85, text_y_pos=0.01,
time_start=0, time_end=time_end, start_frame=0, fps=fps,
in_extension='png', out_extension='avi')
| 27.444444 | 110 | 0.715789 |
9e554dd387e1b98981fc98073b0b6ac0775be949 | 812 | py | Python | swcf/controllers/index.py | pratiwilestari/simpleWebContactForm | 56369daadb8130bb72c19ae8ee10ad590804c84d | [
"MIT"
] | null | null | null | swcf/controllers/index.py | pratiwilestari/simpleWebContactForm | 56369daadb8130bb72c19ae8ee10ad590804c84d | [
"MIT"
] | null | null | null | swcf/controllers/index.py | pratiwilestari/simpleWebContactForm | 56369daadb8130bb72c19ae8ee10ad590804c84d | [
"MIT"
] | null | null | null | from flask.helpers import flash
from flask.wrappers import Request
from swcf import app
from flask import render_template, redirect, request, url_for
from swcf.dao.indexDAO import * | 31.230769 | 61 | 0.674877 |
9e55fcc920876b41b0c966a7f0b020aafcb8f66f | 87 | py | Python | examples/testlib2/box/methods_a.py | uibcdf/pyunitwizard | 54cdce7369e1f2a3771a1f05a4a6ba1d7610a5e7 | [
"MIT"
] | 2 | 2021-07-01T14:33:58.000Z | 2022-03-19T19:19:09.000Z | examples/testlib2/box/methods_a.py | uibcdf/pyunitwizard | 54cdce7369e1f2a3771a1f05a4a6ba1d7610a5e7 | [
"MIT"
] | 15 | 2021-02-11T18:54:16.000Z | 2022-03-18T17:38:03.000Z | examples/testlib2/box/methods_a.py | uibcdf/pyunitwizard | 54cdce7369e1f2a3771a1f05a4a6ba1d7610a5e7 | [
"MIT"
] | 2 | 2021-06-17T18:56:02.000Z | 2022-03-08T05:02:17.000Z | from testlib2 import _puw
| 14.5 | 34 | 0.770115 |
9e5734bc9428d420f659a156adfa25e7ae27b0df | 4,668 | py | Python | tests/broker/test_show_machine.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
] | 7 | 2015-07-31T05:57:30.000Z | 2021-09-07T15:18:56.000Z | tests/broker/test_show_machine.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
] | 115 | 2015-03-03T13:11:46.000Z | 2021-09-20T12:42:24.000Z | tests/broker/test_show_machine.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
] | 13 | 2015-03-03T11:17:59.000Z | 2021-09-09T09:16:41.000Z | #!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the show machine command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestShowMachine)
unittest.TextTestRunner(verbosity=2).run(suite)
| 45.320388 | 79 | 0.646744 |
9e5983beaa6b6cc08ac0ba87d128a18495efcf64 | 117 | py | Python | config-template.py | johanjordaan/silver-giggle | 5304a96b6aa1c4c5eb1f9069212423810aa89818 | [
"MIT"
] | 1 | 2021-12-04T05:11:26.000Z | 2021-12-04T05:11:26.000Z | config-template.py | johanjordaan/silver-giggle | 5304a96b6aa1c4c5eb1f9069212423810aa89818 | [
"MIT"
] | null | null | null | config-template.py | johanjordaan/silver-giggle | 5304a96b6aa1c4c5eb1f9069212423810aa89818 | [
"MIT"
] | null | null | null | host="mysql-general.cyqv8he15vrg.ap-southeast-2.rds.amazonaws.com"
user="admin"
password=""
database="silver_giggle"
| 23.4 | 66 | 0.794872 |