hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
eebe3ee2689c486643e9c66684f0834e67a050c1 | 2,001 | py | Python | lib/gams/general_utils.py | zzzace2000/nodegam | 79c8675e65d75237f2e853ae55bbc40ae7124ee9 | [
"MIT"
] | 7 | 2021-11-06T14:26:07.000Z | 2022-03-17T10:27:17.000Z | lib/gams/general_utils.py | zzzace2000/node | 4501233177173ee9b246a5a5e462afd3b1d51bbb | [
"MIT"
] | 1 | 2022-03-22T01:08:27.000Z | 2022-03-22T17:19:50.000Z | lib/gams/general_utils.py | zzzace2000/node | 4501233177173ee9b246a5a5e462afd3b1d51bbb | [
"MIT"
] | 1 | 2021-11-06T14:27:05.000Z | 2021-11-06T14:27:05.000Z | import time, os
import numpy as np
import json
| 35.105263 | 111 | 0.592704 |
eebf77f5393d40a51825e9d1d10647b08c84de24 | 140 | py | Python | practica3/pregunta8.py | Vanesamorales/practica-N-3-python | e87d4662b5df208cfbc3a15db23d324f46ad838e | [
"Apache-2.0"
] | null | null | null | practica3/pregunta8.py | Vanesamorales/practica-N-3-python | e87d4662b5df208cfbc3a15db23d324f46ad838e | [
"Apache-2.0"
] | null | null | null | practica3/pregunta8.py | Vanesamorales/practica-N-3-python | e87d4662b5df208cfbc3a15db23d324f46ad838e | [
"Apache-2.0"
] | null | null | null | import carpeta8
# bloque principal
lista=carpeta8.cargar()
carpeta8.imprimir(lista)
carpeta8.ordenar(lista)
carpeta8.imprimir(lista) | 20 | 25 | 0.785714 |
eebfbf0ca3fc84c6b27f16b71cc79b9f09285376 | 692 | py | Python | core/clean.py | Saij84/mediaRename | 984fbe47dfa27b8e229934e5b29c73dd0ab48c05 | [
"MIT"
] | null | null | null | core/clean.py | Saij84/mediaRename | 984fbe47dfa27b8e229934e5b29c73dd0ab48c05 | [
"MIT"
] | null | null | null | core/clean.py | Saij84/mediaRename | 984fbe47dfa27b8e229934e5b29c73dd0ab48c05 | [
"MIT"
] | null | null | null | import re
from mediaRename.constants import constants as CONST
def cleanReplace(data):
"""
Takes each dict object and clean
:param data: dict object
:return: none
"""
dataIn = data["files"]
# (regX, replaceSTR)
cleanPasses = [(CONST.CLEAN_PASSONE, ""), (CONST.CLEAN_PASSTWO, ""),
(CONST.CLEAN_PASSTHREE, ""), (CONST.CLEAN_REPLACE, "_")]
for cPass, replaceSTR in cleanPasses:
seachString = re.compile(cPass, re.IGNORECASE)
for fileDict in dataIn:
if isinstance(fileDict, dict):
changedVal = seachString.sub(replaceSTR, fileDict["newName"])
fileDict["newName"] = changedVal
| 28.833333 | 77 | 0.619942 |
eec036acad92775b225df98eed2eda788c78e178 | 32,553 | py | Python | mindaffectBCI/decoder/utils.py | rohitvk1/pymindaffectBCI | 0348784d9b0fbd9d595e31ae46d2e74632399507 | [
"MIT"
] | 44 | 2020-02-07T15:01:47.000Z | 2022-03-21T14:36:15.000Z | mindaffectBCI/decoder/utils.py | CkiChen/pymindaffectBCI | 0119145a8b280c776f4c4e6cd776fed0f0156404 | [
"MIT"
] | 17 | 2020-02-07T17:11:23.000Z | 2022-02-20T18:01:42.000Z | mindaffectBCI/decoder/utils.py | CkiChen/pymindaffectBCI | 0119145a8b280c776f4c4e6cd776fed0f0156404 | [
"MIT"
] | 19 | 2020-02-07T17:13:22.000Z | 2022-03-17T01:22:35.000Z | # Copyright (c) 2019 MindAffect B.V.
# Author: Jason Farquhar <jason@mindaffect.nl>
# This file is part of pymindaffectBCI <https://github.com/mindaffect/pymindaffectBCI>.
#
# pymindaffectBCI is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pymindaffectBCI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pymindaffectBCI. If not, see <http://www.gnu.org/licenses/>
import numpy as np
# time-series tests
def window_axis(a, winsz, axis=0, step=1, prependwindowdim=False):
''' efficient view-based slicing of equal-sized equally-spaced windows along a selected axis of a numpy nd-array '''
if axis < 0: # no negative axis indices
axis = len(a.shape)+axis
# compute the shape/strides for the windowed view of a
if prependwindowdim: # window dim before axis
shape = a.shape[:axis] + (winsz, int((a.shape[axis]-winsz)/step)+1) + a.shape[(axis+1):]
strides = a.strides[:axis] + (a.strides[axis], a.strides[axis]*step) + a.strides[(axis+1):]
else: # window dim after axis
shape = a.shape[:axis] + (int((a.shape[axis]-winsz)/step)+1, winsz) + a.shape[(axis+1):]
strides = a.strides[:axis] + (a.strides[axis]*step, a.strides[axis]) + a.strides[(axis+1):]
#print("a={}".format(a.shape))
#print("shape={} stride={}".format(shape,strides))
# return the computed view
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def equals_subarray(a, pat, axis=-1, match=-1):
''' efficiently find matches of a 1-d sub-array along axis within an nd-array '''
if axis < 0: # no negative dims
axis = a.ndim+axis
# reshape to match dims of a
if not isinstance(pat, np.ndarray): pat = np.array(pat) # ensure is numpy
pshape = np.ones(a.ndim+1, dtype=int); pshape[axis+1] = pat.size
pat = np.array(pat.ravel(),dtype=a.dtype).reshape(pshape) # [ ... x l x...]
# window a into pat-len pieces
aw = window_axis(a, pat.size, axis=axis, step=1) # [ ... x t-l x l x ...]
# do the match
F = np.all(np.equal(aw, pat), axis=axis+1) # [... x t-l x ...]
# pad to make the same shape as input
padshape = list(a.shape); padshape[axis] = a.shape[axis]-F.shape[axis]
if match == -1: # match at end of pattern -> pad before
F = np.append(np.zeros(padshape, dtype=F.dtype), F, axis)
else: # match at start of pattern -> pad after
F = np.append(F, np.zeros(padshape, dtype=F.dtype), axis)
return F
def extract_ringbuffer_segment(rb, bgn_ts, end_ts=None):
''' extract the data between start/end time stamps, from time-stamps contained in the last channel of a nd matrix'''
# get the data / msgs from the ringbuffers
X = rb.unwrap() # (nsamp,nch+1)
X_ts = X[:, -1] # last channel is timestamps
# TODO: binary-search to make these searches more efficient!
# search backwards for trial-start time-stamp
# TODO[X] : use a bracketing test.. (better with wrap-arround)
bgn_samp = np.flatnonzero(np.logical_and(X_ts[:-1] < bgn_ts, bgn_ts <= X_ts[1:]))
# get the index of this timestamp, guarding for after last sample
if len(bgn_samp) == 0 :
bgn_samp = 0 if bgn_ts <= X_ts[0] else len(X_ts)+1
else:
bgn_samp = bgn_samp[0]
# and just to be sure the trial-end timestamp
if end_ts is not None:
end_samp = np.flatnonzero(np.logical_and(X_ts[:-1] < end_ts, end_ts <= X_ts[1:]))
# get index of this timestamp, guarding for after last data sample
end_samp = end_samp[-1] if len(end_samp) > 0 else len(X_ts)
else: # until now
end_samp = len(X_ts)
# extract the trial data, and make copy (just to be sure)
X = X[bgn_samp:end_samp+1, :].copy()
return X
def unwrap(x,range=None):
''' unwrap a list of numbers to correct for truncation due to limited bit-resolution, e.g. time-stamps stored in 24bit integers'''
if range is None:
range = 1<< int(np.ceil(np.log2(max(x))))
wrap_ind = np.diff(x) < -range/2
unwrap = np.zeros(x.shape)
unwrap[np.flatnonzero(wrap_ind)+1]=range
unwrap=np.cumsum(unwrap)
x = x + unwrap
return x
def search_directories_for_file(f,*args):
"""search a given set of directories for given filename, return 1st match
Args:
f (str): filename to search for (or a pattern)
*args (): set for directory names to look in
Returns:
f (str): the *first* full path to where f is found, or f if not found.
"""
import os
import glob
f = os.path.expanduser(f)
if os.path.exists(f) or len(glob.glob(f))>0:
return f
for d in args:
#print('Searching dir: {}'.format(d))
df = os.path.join(d,f)
if os.path.exists(df) or len(glob.glob(df))>0:
f = df
break
return f
# toy data generation
#@function
def sliceY(Y, stimTimes_samp, featdim=True):
'''
Y = (nTrl, nSamp, nY, nE) if featdim=True
OR
Y=(nTrl, nSamp, nY) if featdim=False #(nE x nY x nSamp x nTrl)
'''
# make a sliced version
si = np.array(stimTimes_samp, dtype=int)
if featdim:
return Y[:, si, :, :] if Y.ndim > 3 else Y[si, :, :]
else:
return Y[:, si, :] if Y.ndim > 2 else Y[si, :]
def block_randomize(true_target, npermute, axis=-3, block_size=None):
''' make a block random permutaton of the input array
Inputs:
npermute: int - number permutations to make
true_target: (..., nEp, nY, e): true target value for nTrl trials of length nEp flashes
axis : int the axis along which to permute true_target'''
if true_target.ndim < 3:
raise ValueError("true target info must be at least 3d")
if not (axis == -3 or axis == true_target.ndim-2):
raise NotImplementedError("Only implementated for axis=-2 currently")
# estimate the number of blocks to use
if block_size is None:
block_size = max(1, true_target.shape[axis]/2/npermute)
nblk = int(np.ceil(true_target.shape[axis]/block_size))
blk_lims = np.linspace(0, true_target.shape[axis], nblk, dtype=int)
# convert to start/end index for each block
blk_lims = [(blk_lims[i], blk_lims[i+1]) for i in range(len(blk_lims)-1)]
cb = np.zeros(true_target.shape[:axis+1] + (npermute, true_target.shape[-1]))
for ti in range(cb.shape[axis+1]):
for di, dest_blk in enumerate(blk_lims):
yi = np.random.randint(true_target.shape[axis+1])
si = np.random.randint(len(blk_lims))
# ensure can't be the same block
if si == di:
si = si+1 if si < len(blk_lims)-1 else si-1
src_blk = blk_lims[si]
# guard for different lengths for source/dest blocks
dest_len = dest_blk[1] - dest_blk[0]
if dest_len > src_blk[1]-src_blk[0]:
if src_blk[0]+dest_len < true_target.shape[axis]:
# enlarge the src
src_blk = (src_blk[0], src_blk[0]+dest_len)
elif src_blk[1]-dest_len > 0:
src_blk = (src_blk[1]-dest_len, src_blk[1])
else:
raise ValueError("can't fit source and dest")
elif dest_len < src_blk[1]-src_blk[0]:
src_blk = (src_blk[0], src_blk[0]+dest_len)
cb[..., dest_blk[0]:dest_blk[1], ti, :] = true_target[..., src_blk[0]:src_blk[1], yi, :]
return cb
def upsample_codebook(trlen, cb, ep_idx, stim_dur_samp, offset_samp=(0, 0)):
''' upsample a codebook definition to sample rate
Inputs:
trlen : (int) length after up-sampling
cb : (nTr, nEp, ...) the codebook
ep_idx : (nTr, nEp) the indices of the codebook entries
stim_dur_samp: (int) the amount of time the cb entry is held for
offset_samp : (2,):int the offset for the stimulus in the upsampled trlen data
Outputs:
Y : ( nTrl, trlen, ...) the up-sampled codebook '''
if ep_idx is not None:
if not np.all(cb.shape[:ep_idx.ndim] == ep_idx.shape):
raise ValueError("codebook and epoch indices must has same shape")
trl_idx = ep_idx[:, 0] # start each trial
else: # make dummy ep_idx with 0 for every trial!
ep_idx = np.zeros((cb.shape[0],1),dtype=int)
trl_idx = ep_idx
Y = np.zeros((cb.shape[0], trlen)+ cb.shape[2:], dtype='float32') # (nTr, nSamp, ...)
for ti, trl_start_idx in enumerate(trl_idx):
for ei, epidx in enumerate(ep_idx[ti, :]):
if ei > 0 and epidx == 0: # zero indicates end of variable length trials
break
# start index for this epoch in this *trial*, including the 0-offset
ep_start_idx = -int(offset_samp[0])+int(epidx-trl_start_idx)
Y[ti, ep_start_idx:(ep_start_idx+int(stim_dur_samp)), ...] = cb[ti, ei, ...]
return Y
def lab2ind(lab,lab2class=None):
''' convert a list of labels (as integers) to a class indicator matrix'''
if lab2class is None:
lab2class = [ (l,) for l in set(lab) ] # N.B. list of lists
if not isinstance(lab,np.ndarray):
lab=np.array(lab)
Y = np.zeros(lab.shape+(len(lab2class),),dtype=bool)
for li,ls in enumerate(lab2class):
for l in ls:
Y[lab == l, li]=True
return (Y,lab2class)
def zero_outliers(X, Y, badEpThresh=4, badEpChThresh=None, verbosity=0):
'''identify and zero-out bad/outlying data
Inputs:
X = (nTrl, nSamp, d)
Y = (nTrl, nSamp, nY, nE) OR (nTrl, nSamp, nE)
nE=#event-types nY=#possible-outputs nEpoch=#stimulus events to process
'''
# remove whole bad epochs first
if badEpThresh > 0:
bad_ep, _ = idOutliers(X, badEpThresh, axis=(-2, -1)) # ave over time,ch
if np.any(bad_ep):
if verbosity > 0:
print("{} badEp".format(np.sum(bad_ep.ravel())))
# copy X,Y so don't modify in place!
X = X.copy()
Y = Y.copy()
X[bad_ep[..., 0, 0], ...] = 0
#print("Y={}, Ybad={}".format(Y.shape, Y[bad_ep[..., 0, 0], ...].shape))
# zero out Y also, so don't try to 'fit' the bad zeroed data
Y[bad_ep[..., 0, 0], ...] = 0
# Remove bad individual channels next
if badEpChThresh is None: badEpChThresh = badEpThresh*2
if badEpChThresh > 0:
bad_epch, _ = idOutliers(X, badEpChThresh, axis=-2) # ave over time
if np.any(bad_epch):
if verbosity > 0:
print("{} badEpCh".format(np.sum(bad_epch.ravel())))
# make index expression to zero out the bad entries
badidx = list(np.nonzero(bad_epch)) # convert to linear indices
badidx[-2] = slice(X.shape[-2]) # broadcast over the accumulated dimensions
if not np.any(bad_ep): # copy so don't update in place
X = X.copy()
X[tuple(badidx)] = 0
return (X, Y)
def idOutliers(X, thresh=4, axis=-2, verbosity=0):
''' identify outliers with excessively high power in the input data
Inputs:
X:float the data to identify outliers in
axis:int (-2) axis of X to sum to get power
thresh(float): threshold standard deviation for outlier detection
verbosity(int): verbosity level
Returns:
badEp:bool (X.shape axis==1) indicator for outlying elements
epPower:float (X.shape axis==1) power used to identify bad
'''
#print("X={} ax={}".format(X.shape,axis))
power = np.sqrt(np.sum(X**2, axis=axis, keepdims=True))
#print("power={}".format(power.shape))
good = np.ones(power.shape, dtype=bool)
for _ in range(4):
mu = np.mean(power[good])
sigma = np.sqrt(np.mean((power[good] - mu) ** 2))
badThresh = mu + thresh*sigma
good[power > badThresh] = False
good = good.reshape(power.shape) # (nTrl, nEp)
#print("good={}".format(good.shape))
bad = ~good
if verbosity > 1:
print("%d bad" % (np.sum(bad.ravel())))
return (bad, power)
def robust_mean(X,thresh=(3,3)):
"""Compute robust mean of values in X, using gaussian outlier criteria
Args:
X (the data): the data
thresh (2,): lower and upper threshold in standard deviations
Returns:
mu (): the robust mean
good (): the indices of the 'good' data in X
"""
good = np.ones(X.shape, dtype=bool)
for _ in range(4):
mu = np.mean(X[good])
sigma = np.sqrt(np.mean((X[good] - mu) ** 2))
# re-compute outlier list
good[:]=True
if thresh[0] is not None:
badThresh = mu + thresh[0]*sigma
good[X > badThresh] = False
if thresh[1] is not None:
badThresh = mu - thresh[0]*sigma
good[X < badThresh] = False
mu = np.mean(X[good])
return (mu, good)
try:
from scipy.signal import butter, bessel, sosfilt, sosfilt_zi
except:
#if True:
# use the pure-python fallbacks
def sosfilt_zi_warmup(zi, X, axis=-1, sos=None):
'''Use some initial data to "warmup" a second-order-sections filter to reduce startup artifacts.
Args:
zi (np.ndarray): the sos filter, state
X ([type]): the warmup data
axis (int, optional): The filter axis in X. Defaults to -1.
sos ([type], optional): the sos filter coefficients. Defaults to None.
Returns:
[np.ndarray]: the warmed up filter coefficients
'''
if axis < 0: # no neg axis
axis = X.ndim+axis
# zi => (order,...,2,...)
zi = np.reshape(zi, (zi.shape[0],) + (1,)*(axis) + (zi.shape[1],) + (1,)*(X.ndim-axis-1))
# make a programattic index expression to support arbitary axis
idx = [slice(None)]*X.ndim
# get the index to start the warmup
warmupidx = 0 if sos is None else min(sos.size*3,X.shape[axis]-1)
# center on 1st warmup value
idx[axis] = slice(warmupidx,warmupidx+1)
zi = zi * X[tuple(idx)]
# run the filter on the rest of the warmup values
if not sos is None and warmupidx>3:
idx[axis] = slice(warmupidx,1,-1)
_, zi = sosfilt(sos, X[tuple(idx)], axis=axis, zi=zi)
return zi
def iir_sosfilt_sos(stopband, fs, order=4, ftype='butter', passband=None, verb=0):
''' given a set of filter cutoffs return butterworth or bessel sos coefficients '''
# convert to normalized frequency, Note: not to close to 0/1
if stopband is None:
return np.array(())
if not hasattr(stopband[0],'__iter__'):
stopband=(stopband,)
sos=[]
for sb in stopband:
btype = None
if type(sb[-1]) is str:
btype = sb[-1]
sb = sb[:-1]
# convert to normalize frequency
sb = np.array(sb,dtype=np.float32)
sb[sb<0] = (fs/2)+sb[sb<0]+1 # neg freq count back from nyquist
Wn = sb/(fs/2)
if Wn[1] < .0001 or .9999 < Wn[0]: # no filter
continue
# identify type from frequencies used, cliping if end of frequency range
if Wn[0] < .0001:
Wn = Wn[1]
btype = 'highpass' if btype is None or btype == 'bandstop' else 'lowpass'
elif .9999 < Wn[1]:
Wn = Wn[0]
btype = 'lowpass' if btype is None or btype == 'bandstop' else 'highpass'
elif btype is None: # .001 < Wn[0] and Wn[1] < .999:
btype = 'bandstop'
if verb>0: print("{}={}={}".format(btype,sb,Wn))
if ftype == 'butter':
sosi = butter(order, Wn, btype=btype, output='sos')
elif ftype == 'bessel':
sosi = bessel(order, Wn, btype=btype, output='sos', norm='phase')
else:
raise ValueError("Unrecognised filter type")
sos.append(sosi)
# single big filter cascade
sos = np.concatenate(sos,axis=0)
return sos
def save_butter_sosfilt_coeff(filename=None, stopband=((45,65),(5.5,25,'bandpass')), fs=200, order=6, ftype='butter'):
''' design a butterworth sos filter cascade and save the coefficients '''
import pickle
sos = iir_sosfilt_sos(stopband, fs, order, passband=None, ftype=ftype)
zi = sosfilt_zi(sos)
if filename is None:
# auto-generate descriptive filename
filename = "{}_stopband{}_fs{}.pk".format(ftype,stopband,fs)
print("Saving to: {}\n".format(filename))
with open(filename,'wb') as f:
pickle.dump(sos,f)
pickle.dump(zi,f)
f.close()
# TODO[] : cythonize?
# TODO[X] : vectorize over d? ---- NO. 2.5x *slower*
def sosfilt_2d_py(sos,X,axis=-2,zi=None):
''' pure python fallback for second-order-sections filter in case scipy isn't available '''
X = np.asarray(X)
sos = np.asarray(sos)
if zi is None:
returnzi = False
zi = np.zeros((sos.shape[0],2,X.shape[-1]),dtype=X.dtype)
else:
returnzi = True
zi = np.asarray(zi)
Xshape = X.shape
if not X.ndim == 2:
print("Warning: X>2d.... treating as 2d...")
X = X.reshape((-1,Xshape[-1]))
if axis < 0:
axis = X.ndim + axis
if not axis == X.ndim-2:
raise ValueError("Only for time in dim 0/-2")
if sos.ndim != 2 or sos.shape[1] != 6:
raise ValueError('sos must be shape (n_sections, 6)')
if zi.ndim != 3 or zi.shape[1] != 2 or zi.shape[2] != X.shape[1]:
raise ValueError('zi must be shape (n_sections, 2, dim)')
# pre-normalize sos if needed
for j in range(sos.shape[0]):
if sos[j,3] != 1.0:
sos[j,:] = sos[j,:]/sos[j,3]
n_signals = X.shape[1]
n_samples = X.shape[0]
n_sections = sos.shape[0]
# extract the a/b
b = sos[:,:3]
a = sos[:,4:]
# loop over outputs
x_n = 0
for i in range(n_signals):
for n in range(n_samples):
for s in range(n_sections):
x_n = X[n, i]
# use direct II transposed structure
X[n, i] = b[s, 0] * x_n + zi[s, 0, i]
zi[s, 0, i] = b[s, 1] * x_n - a[s, 0] * X[n, i] + zi[s, 1, i]
zi[s, 1, i] = b[s, 2] * x_n - a[s, 1] * X[n, i]
# back to input shape
if not len(Xshape) == 2:
X = X.reshape(Xshape)
# match sosfilt, only return zi if given zi
if returnzi :
return X, zi
else:
return X
def sosfilt_zi_py(sos):
''' compute an initial state for a second-order section filter '''
sos = np.asarray(sos)
if sos.ndim != 2 or sos.shape[1] != 6:
raise ValueError('sos must be shape (n_sections, 6)')
n_sections = sos.shape[0]
zi = np.empty((n_sections, 2))
scale = 1.0
for section in range(n_sections):
b = sos[section, :3]
a = sos[section, 3:]
if a[0] != 1.0:
# Normalize the coefficients so a[0] == 1.
b = b / a[0]
a = a / a[0]
IminusA = np.eye(n_sections - 1) - np.linalg.companion(a).T
B = b[1:] - a[1:] * b[0]
# Solve zi = A*zi + B
lfilter_zi = np.linalg.solve(IminusA, B)
zi[section] = scale * lfilter_zi
scale *= b.sum() / a.sum()
return zi
# def butter_py(order,fc,fs,btype,output):
# ''' pure python butterworth filter synthesis '''
# if fc>=fs/2:
# error('fc must be less than fs/2')
# # I. Find poles of analog filter
# k= np.arange(order)
# theta= (2*k -1)*np.pi/(2*order);
# pa= -sin(theta) + j*cos(theta); # poles of filter with cutoff = 1 rad/s
# #
# # II. scale poles in frequency
# Fc= fs/np.pi * tan(np.pi*fc/fs); # continuous pre-warped frequency
# pa= pa*2*np.pi*Fc; # scale poles by 2*pi*Fc
# #
# # III. Find coeffs of digital filter
# # poles and zeros in the z plane
# p= (1 + pa/(2*fs))/(1 - pa/(2*fs)) # poles by bilinear transform
# q= -np.ones((1,N)); # zeros
# #
# # convert poles and zeros to polynomial coeffs
# a= poly(p); # convert poles to polynomial coeffs a
# a= real(a);
# b= poly(q); # convert zeros to polynomial coeffs b
# K= sum(a)/sum(b); # amplitude scale factor
# b= K*b;
if __name__=='__main__':
save_butter_sosfilt_coeff("sos_filter_coeff.pk")
#test_butter_sosfilt()
| 39.458182 | 183 | 0.595183 |
eec107c75238eeb480e6c150f395182753824077 | 155 | py | Python | Tasks/task_7.py | madhubmvs/python-self-teaching | adce7a18553fc13a96d0319fdeb5ce9894ec74fc | [
"MIT"
] | null | null | null | Tasks/task_7.py | madhubmvs/python-self-teaching | adce7a18553fc13a96d0319fdeb5ce9894ec74fc | [
"MIT"
] | null | null | null | Tasks/task_7.py | madhubmvs/python-self-teaching | adce7a18553fc13a96d0319fdeb5ce9894ec74fc | [
"MIT"
] | null | null | null | a = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
b = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
c = []
for x in a:
if x in b:
c.append(x)
print(c) | 17.222222 | 47 | 0.406452 |
eec118b9402f1ab3d9a333bb53d8180c1858ff75 | 2,100 | py | Python | model/test.py | yacoubb/lang-classifier | d39a342cf8ad64b191ea235f9af3f833033f254a | [
"MIT"
] | 1 | 2019-07-03T11:28:55.000Z | 2019-07-03T11:28:55.000Z | model/test.py | yacoubb/lang-classifier | d39a342cf8ad64b191ea235f9af3f833033f254a | [
"MIT"
] | null | null | null | model/test.py | yacoubb/lang-classifier | d39a342cf8ad64b191ea235f9af3f833033f254a | [
"MIT"
] | null | null | null | from tensorflow import keras
import os
import numpy as np
import sys
import json
sys.path.append("/".join(os.path.abspath(__file__).split("/")[:-2]))
from model.dataset import utils, test_sampler
summary, all_predictions = estimate_model_accuracy(
keras.models.load_model("./RMS_model/model.h5")
)
print(summary)
with open("./RMS_model/testing.txt", "w+") as test_file:
test_file.write(summary)
test_file.write("=" * 20 + "\n")
for word, pred in all_predictions:
test_file.write(word + ", " + pred + "\n")
| 28.378378 | 68 | 0.623333 |
eec16f1e4b653abf2db741d973b4bf4d50090976 | 927 | py | Python | codes/Layer/Layer.py | serenaklm/rumor_detection | 8f4822951db111cc2e21f9a2901872c9681a2cbb | [
"MIT"
] | 42 | 2020-03-24T03:09:19.000Z | 2022-02-15T14:13:13.000Z | codes/Layer/Layer.py | serenaklm/rumor_detection | 8f4822951db111cc2e21f9a2901872c9681a2cbb | [
"MIT"
] | 3 | 2020-08-18T13:15:20.000Z | 2021-06-15T12:17:08.000Z | codes/Layer/Layer.py | serenaklm/rumor_detection | 8f4822951db111cc2e21f9a2901872c9681a2cbb | [
"MIT"
] | 15 | 2020-03-22T23:48:02.000Z | 2022-03-14T23:53:42.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import numpy as np
from Layer import FeedForwardNetwork
from Layer import MultiHeadAttention
__author__ = "Serena Khoo" | 28.96875 | 167 | 0.785329 |
eec22817edf6f5ff4caafda2c75d1273cb9edbb8 | 2,102 | py | Python | crawler/crawler2.py | labcontext/image-inpainting-oldpaper | da4683a2c58d662e443ea24ab93fd9d8fcb96bda | [
"Apache-2.0"
] | null | null | null | crawler/crawler2.py | labcontext/image-inpainting-oldpaper | da4683a2c58d662e443ea24ab93fd9d8fcb96bda | [
"Apache-2.0"
] | 3 | 2021-03-19T11:16:57.000Z | 2022-01-13T02:18:17.000Z | crawler/crawler2.py | labcontext/image-inpainting-oldpaper | da4683a2c58d662e443ea24ab93fd9d8fcb96bda | [
"Apache-2.0"
] | null | null | null | import requests
import urllib.request
import os
import pickle
import argparse
# file read folder
path = 'http://db.itkc.or.kr//data/imagedb/BOOK/ITKC_{0}/ITKC_{0}_{1}A/ITKC_{0}_{1}A_{2}{5}_{3}{4}.JPG'
# Manual
label = ['BT', 'MO']
middle = 1400
last = ['A', 'V'] # A ~400 V ~009
num = 10
num1 = 400
fin = ['A', 'B', 'H', 'L']
# file path, save path
# pad for number
if __name__ == '__main__':
main()
| 26.275 | 103 | 0.460038 |
eec2dfa96c82d004b2ff333de47a8fe7f395770a | 2,646 | py | Python | src/spade/symbols/symbol.py | ArvinSKushwaha/SPADE | b9a0f7698606a698fbc5a44e3dd36cb40186bda3 | [
"MIT"
] | null | null | null | src/spade/symbols/symbol.py | ArvinSKushwaha/SPADE | b9a0f7698606a698fbc5a44e3dd36cb40186bda3 | [
"MIT"
] | null | null | null | src/spade/symbols/symbol.py | ArvinSKushwaha/SPADE | b9a0f7698606a698fbc5a44e3dd36cb40186bda3 | [
"MIT"
] | null | null | null | """This module holds the Symbol, ComputationalGraph, and ComputationalGraphNode classes and methods to help construct
a computational graph."""
from typing import Optional
from .operators import Add, Subtract, Multiply, Divide, Grad, Div, Curl, Laplacian
| 38.347826 | 117 | 0.712018 |
eec6e813387d5c509fe53af51947031d9b165546 | 2,218 | py | Python | test-runner/measurement.py | brycewang-microsoft/iot-sdks-e2e-fx | 211c9c2615a82076bda02a27152d67366755edbf | [
"MIT"
] | 12 | 2019-02-02T00:15:13.000Z | 2022-02-08T18:20:08.000Z | test-runner/measurement.py | brycewang-microsoft/iot-sdks-e2e-fx | 211c9c2615a82076bda02a27152d67366755edbf | [
"MIT"
] | 36 | 2019-02-14T22:53:17.000Z | 2022-03-22T22:41:38.000Z | test-runner/measurement.py | brycewang-microsoft/iot-sdks-e2e-fx | 211c9c2615a82076bda02a27152d67366755edbf | [
"MIT"
] | 12 | 2019-02-19T13:28:25.000Z | 2022-02-08T18:20:55.000Z | # Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for
# full license information.
import datetime
import threading
import contextlib
| 22.632653 | 82 | 0.596934 |
eeca3c40e6643d64e2cc7861e9484fa8ec9bd6f8 | 9,415 | py | Python | main.py | Arnav-Ghatti/Tkinter-Money-Tracker | 365dcafc78522d03062a8f062fa8167b9c015583 | [
"MIT"
] | null | null | null | main.py | Arnav-Ghatti/Tkinter-Money-Tracker | 365dcafc78522d03062a8f062fa8167b9c015583 | [
"MIT"
] | null | null | null | main.py | Arnav-Ghatti/Tkinter-Money-Tracker | 365dcafc78522d03062a8f062fa8167b9c015583 | [
"MIT"
] | null | null | null | import tkinter as tk
from tkinter import messagebox
import json
# Constants
FONT_NAME = "Open Sans"
BG_COLOR = "#f9f7f7"
FONT_COLOR = "#112d4e"
ACCENT = "#dbe2ef"
root = tk.Tk()
root.title("Money Tracker")
root.config(bg=BG_COLOR)
root.resizable(0, 0)
root.iconbitmap("C:\\Users\\ASUA\\Desktop\\Tests\\MoneyTransactionsOriginal\\money.ico")
transactions_history = {}
transactions = []
def set_listbox():
"""Refreshes the listbox"""
global listbox
listbox.delete(0, tk.END)
for item in transactions:
listbox.insert(tk.END, f"{item[0]} to {item[1]}, {clicked.get()}{item[2]}, {item[3]}")
def save_json(data):
"""Saves the date to C:\\Users\\ASUA\\Desktop\\Tests\\MoneyTransactionsOriginal\\history.json file"""
with open("C:\\Users\\ASUA\\Desktop\\Tests\\MoneyTransactionsOriginal\\history.json", "w") as file:
json.dump(transactions_history, file, indent=4)
def add_transactions():
"""Adds transactios to the listbox"""
try:
check_int = int(amount_input.get())
except ValueError:
messagebox.showwarning(title=" Error ", message="Please enter only numbers in amount field")
return
if check_fields():
transactions.append([sender_input.get(), reciever_input.get(), amount_input.get(), desc_input.get("1.0", tk.END)])
transactions_history["Transactions"] = transactions
clear_fields()
save_json(transactions_history)
set_listbox()
else:
messagebox.showwarning(title=" Error ", message="Please do not leave any fields empty")
def delete_transaction():
"""Deletes transactions from the listbox"""
try:
del transactions[listbox.curselection()[0]]
except IndexError:
messagebox.showwarning(title=" Error ", message="Please select any item")
else:
transactions_history["Transactions"] = transactions
save_json(transactions_history)
set_listbox()
def load_transactions():
"""Loads data of transactions from the selected item in the listbox"""
try:
selected_idx = listbox.curselection()[0]
selected_item = transactions[selected_idx]
except IndexError:
messagebox.showwarning(title=" Error ", message="Please select any item")
else:
sender_var.set(selected_item[0])
reciever_var.set(selected_item[1])
amount_var.set(selected_item[2])
desc_input.delete("1.0", tk.END)
desc_input.insert(tk.END, selected_item[3])
def update_transactions():
"""Updates selected transaction to the details newly entered"""
if check_fields():
try:
transactions[listbox.curselection()[0]] = [sender_var.get(), reciever_var.get(), amount_var.get(), desc_input.get("1.0", tk.END)]
except IndexError:
messagebox.showwarning(title=" Error ", message="Please select any item")
else:
transactions_history["Transactions"] = transactions
save_json(transactions_history)
set_listbox()
else:
messagebox.showwarning(title=" Error ", message="Please do not leave any fields empty")
# Title
title = tk.Label(root, text="Money Tracker", font=(FONT_NAME, 15, "bold"), bg=BG_COLOR, highlightthickness=0, fg=FONT_COLOR)
title.grid(row=0, column=0, columnspan=2, pady=3)
# ---------------------------- ENTRIES AND LABELS ------------------------------- #
input_frame = tk.Frame(root, bg=BG_COLOR, highlightthickness=0)
input_frame.grid(row=1, column=0, sticky="N", padx=5)
# Sender
sender_label = tk.Label(input_frame, text="Sender: ", font=(FONT_NAME, 12, "normal"), bg=BG_COLOR, fg=FONT_COLOR, highlightthickness=0)
sender_label.grid(row=0, column=0, sticky="W", pady=5)
sender_var = tk.StringVar()
sender_input = tk.Entry(input_frame, textvariable=sender_var, width=36, font=(FONT_NAME, 12, "normal"), bg=ACCENT, fg=FONT_COLOR, highlightthickness=0, bd=0)
sender_input.focus()
sender_input.grid(row=0, column=1, sticky="W", pady=5, padx=10, columnspan=2)
# Reciever
reciever_label = tk.Label(input_frame, text="Reciever: ", font=(FONT_NAME, 12, "normal"), bg=BG_COLOR, fg=FONT_COLOR, highlightthickness=0)
reciever_label.grid(row=1, column=0, sticky="W", pady=5)
reciever_var = tk.StringVar()
reciever_input = tk.Entry(input_frame, textvariable=reciever_var, width=36, font=(FONT_NAME, 12, "normal"), bg=ACCENT, fg=FONT_COLOR, highlightthickness=0, bd=0)
reciever_input.grid(row=1, column=1, sticky="W", pady=5, padx=10, columnspan=2)
# Amount
amount_label = tk.Label(input_frame, text="Amount: ", font=(FONT_NAME, 12, "normal"), bg=BG_COLOR, fg=FONT_COLOR, highlightthickness=0)
amount_label.grid(row=2, column=0, sticky="W", pady=5)
amount_var = tk.StringVar()
amount_input = tk.Entry(input_frame, textvariable=amount_var, width=27, font=(FONT_NAME, 12, "normal"), bg=ACCENT, fg=FONT_COLOR, highlightthickness=0, bd=0)
amount_input.grid(row=2, column=1, sticky="W", pady=5, padx=10)
# Description
desc_label = tk.Label(input_frame, text="Description: ", font=(FONT_NAME, 12, "normal"), bg=BG_COLOR, fg=FONT_COLOR, highlightthickness=0, bd=0)
desc_label.grid(row=3, column=0, sticky="N", pady=5)
desc_input = tk.Text(input_frame, width=36, height=12, font=(FONT_NAME, 12, "normal"), bg=ACCENT, fg=FONT_COLOR, highlightthickness=0, bd=0)
desc_input.grid(row=3, column=1, sticky="W", pady=5, padx=10, columnspan=2)
currencies = [
"$",
"",
"",
"",
""
]
clicked = tk.StringVar()
clicked.set("$")
currency = tk.OptionMenu(input_frame, clicked, *currencies)
currency.config(bg=ACCENT, fg=FONT_COLOR, bd=0, highlightthickness=0, font=(FONT_NAME, 10, "normal"))
currency["menu"].config(bg=ACCENT, fg=FONT_COLOR, bd=0, font=(FONT_NAME, 10, "normal"))
currency.grid(row=2, column=2)
# ---------------------------- BUTTONS ------------------------------- #
btn_frame = tk.Frame(root, bg=BG_COLOR, highlightthickness=0)
btn_frame.grid(row=2, column=0, padx=5, pady=5, sticky="N")
# Add
add_btn= tk.Button(btn_frame, text=" Add ", command=add_transactions, font=(FONT_NAME, 11, "normal"), bg=ACCENT, fg=FONT_COLOR, highlightthickness=0, bd=0)
add_btn.pack(side=tk.LEFT, padx=5, pady=5)
# Update
update_btn = tk.Button(btn_frame, text=" Update ", command=update_transactions, font=(FONT_NAME, 11, "normal"), bg=ACCENT, fg=FONT_COLOR, highlightthickness=0, bd=0)
update_btn.pack(side=tk.LEFT, padx=5, pady=5)
# Delete
del_btn = tk.Button(btn_frame, text=" Delete ", command=delete_transaction, font=(FONT_NAME, 11, "normal"), bg=ACCENT, fg=FONT_COLOR, highlightthickness=0, bd=0)
del_btn.pack(side=tk.LEFT, padx=5, pady=5)
# Load
load_btn = tk.Button(btn_frame, text=" Load ", command=load_transactions, font=(FONT_NAME, 11, "normal"), bg=ACCENT, fg=FONT_COLOR, highlightthickness=0, bd=0)
load_btn.pack(side=tk.LEFT, padx=5, pady=5)
# Refresh
refresh_btn = tk.Button(btn_frame, text=" Refresh ", command=set_listbox, font=(FONT_NAME, 11, "normal"), bg=ACCENT, fg=FONT_COLOR, highlightthickness=0, bd=0)
refresh_btn.pack(side=tk.LEFT, padx=5, pady=5)
# ---------------------------- LISTBOX ------------------------------- #
data_frame = tk.Frame(root, bg=ACCENT, highlightthickness=0)
data_frame.grid(row=1, column=1, rowspan=2)
# Scroll Bars
scroll_bar_y = tk.Scrollbar(data_frame, orient=tk.VERTICAL)
scroll_bar_x = tk.Scrollbar(data_frame, orient=tk.HORIZONTAL)
# Listbox
listbox = tk.Listbox(data_frame, height=18, width=50, yscrollcommand=scroll_bar_y.set, xscrollcommand=scroll_bar_x.set, font=(FONT_NAME, 12, "normal"), bg=ACCENT, fg=FONT_COLOR, highlightthickness=0, bd=0)
# Scroll Bars
scroll_bar_y.config(command=listbox.yview)
scroll_bar_y.pack(side=tk.RIGHT, fill=tk.Y)
scroll_bar_x.config(command=listbox.xview)
scroll_bar_x.pack(side=tk.BOTTOM, fill=tk.X)
listbox.pack(side=tk.LEFT, fill=tk.BOTH, expand=1)
# ---------------------------- STATUS BAR ------------------------------- #
status_frame = tk.LabelFrame(root, bd=0, relief=tk.SUNKEN, bg="#3f72af", highlightthickness=0)
status_frame.grid(sticky=tk.N+tk.S+tk.E+tk.W, columnspan=2)
# Made By
made_by = tk.Label(status_frame, text="Made By Arnav Ghatti", anchor=tk.E, font=(FONT_NAME, 9, "normal"), bg="#3f72af", highlightthickness=0, fg=BG_COLOR)
made_by.pack(side=tk.RIGHT, fill=tk.BOTH, expand=1)
# Version
version_label = tk.Label(status_frame, text="Version: 2.5.3", anchor=tk.W, font=(FONT_NAME, 9, "normal"), bg="#3f72af", highlightthickness=0, fg=BG_COLOR)
version_label.pack(side=tk.LEFT, fill=tk.BOTH, expand=1)
def load_data():
"""Loads data from the C:\\Users\\ASUA\\Desktop\\Tests\\MoneyTransactionsOriginal\\history.json file to the listbox"""
global transactions, listbox
with open("C:\\Users\\ASUA\\Desktop\\Tests\\MoneyTransactionsOriginal\\history.json", "r") as file:
transaction_history = json.load(file)
transactions = transaction_history["Transactions"]
listbox.delete(0, tk.END)
for item in transactions:
listbox.insert(tk.END, f"{item[0]} to {item[1]}, ${item[2]}, {item[3]}")
load_data()
root.mainloop()
| 40.235043 | 205 | 0.683696 |
eeca641ef832fde419fc26a2088df6a05f63fc33 | 519 | py | Python | ftmscan/utils/parsing.py | awilliamson10/ftmscan-python | d7ed384f1ac65461c86bed4a65f9332baf92c8f0 | [
"MIT"
] | 4 | 2022-01-10T21:58:02.000Z | 2022-03-27T20:21:35.000Z | polygonscan/utils/parsing.py | yusufseyrek/polygonscan-python | c58a8190e41a5c9bac0a5e88db809e5e207b1c77 | [
"MIT"
] | 3 | 2021-09-25T05:10:27.000Z | 2021-11-21T04:56:29.000Z | polygonscan/utils/parsing.py | yusufseyrek/polygonscan-python | c58a8190e41a5c9bac0a5e88db809e5e207b1c77 | [
"MIT"
] | 4 | 2021-09-25T05:11:08.000Z | 2022-03-09T01:01:33.000Z | import requests
| 30.529412 | 51 | 0.554913 |
eeca73f0a33396739525615f94801665b147bf27 | 12,725 | py | Python | empire_cellular_automaton/dataset_processing.py | ThomasMiller01/ProofOfConcept | 021bf29743309224628682d0f82b0be80ae83c95 | [
"MIT"
] | 1 | 2019-12-18T13:49:22.000Z | 2019-12-18T13:49:22.000Z | empire_cellular_automaton/dataset_processing.py | ThomasMiller01/Experiments | 021bf29743309224628682d0f82b0be80ae83c95 | [
"MIT"
] | null | null | null | empire_cellular_automaton/dataset_processing.py | ThomasMiller01/Experiments | 021bf29743309224628682d0f82b0be80ae83c95 | [
"MIT"
] | 1 | 2021-08-29T09:22:52.000Z | 2021-08-29T09:22:52.000Z | import json
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
import time
if __name__ == "__main__":
for directory in os.listdir('./datasets'):
if "example" not in directory:
save_figs(directory)
print("creating statistics done")
| 37.985075 | 122 | 0.558428 |
eecf75568a4959cab7877ed219454c84c98b7e64 | 403 | py | Python | mindhome_alpha/erpnext/patches/v11_0/add_expense_claim_default_account.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:55:29.000Z | 2021-04-29T14:55:29.000Z | mindhome_alpha/erpnext/patches/v11_0/add_expense_claim_default_account.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | null | null | null | mindhome_alpha/erpnext/patches/v11_0/add_expense_claim_default_account.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:39:01.000Z | 2021-04-29T14:39:01.000Z | from __future__ import unicode_literals
import frappe | 36.636364 | 121 | 0.791563 |
eed48753201aaf2076987680b987b0334df7af1f | 4,653 | py | Python | cliff/lister.py | tivaliy/cliff | a04a48f4f7dc72b1bcc95a5c6a550c7650e35ab3 | [
"Apache-2.0"
] | 187 | 2015-01-13T04:07:41.000Z | 2022-03-10T14:12:27.000Z | cliff/lister.py | tivaliy/cliff | a04a48f4f7dc72b1bcc95a5c6a550c7650e35ab3 | [
"Apache-2.0"
] | 3 | 2016-01-05T20:52:55.000Z | 2020-10-01T06:16:58.000Z | cliff/lister.py | tivaliy/cliff | a04a48f4f7dc72b1bcc95a5c6a550c7650e35ab3 | [
"Apache-2.0"
] | 69 | 2015-02-01T01:28:37.000Z | 2021-11-15T08:28:53.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Application base class for providing a list of data as output."""
import abc
import logging
from . import display
| 36.637795 | 78 | 0.585214 |
eed5699e06d3cac61b4a945b53a1004046c608f3 | 1,026 | py | Python | task3/task3.py | ksmirenko/ml-homework | a5e558352ffc332ad5e40526dda21f205718a203 | [
"MIT"
] | 1 | 2020-08-05T08:06:33.000Z | 2020-08-05T08:06:33.000Z | task3/task3.py | ksmirenko/ml-homework | a5e558352ffc332ad5e40526dda21f205718a203 | [
"MIT"
] | null | null | null | task3/task3.py | ksmirenko/ml-homework | a5e558352ffc332ad5e40526dda21f205718a203 | [
"MIT"
] | null | null | null | from PIL import Image
import numpy as np
# Works when launched from terminal
# noinspection PyUnresolvedReferences
from k_means import k_means
input_image_file = 'lena.jpg'
output_image_prefix = 'out_lena'
n_clusters = [2, 3, 5]
max_iterations = 100
launch_count = 3
main()
| 27.72973 | 104 | 0.692008 |
eed63ef06321c79002e85fdaeb08205c4299ea39 | 3,389 | py | Python | dcrnn_train.py | syin3/cs224w-traffic | 284836b49404bfd38ae23b31f89f8e617548e286 | [
"MIT"
] | 9 | 2019-03-20T01:02:07.000Z | 2020-11-25T06:45:30.000Z | dcrnn_train.py | syin3/cs224w-traffic | 284836b49404bfd38ae23b31f89f8e617548e286 | [
"MIT"
] | null | null | null | dcrnn_train.py | syin3/cs224w-traffic | 284836b49404bfd38ae23b31f89f8e617548e286 | [
"MIT"
] | 2 | 2020-09-24T07:03:58.000Z | 2020-11-09T04:43:03.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import tensorflow as tf
import yaml
from model.dcrnn_supervisor import DCRNNSupervisor
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config_filename', required=True, default=None, type=str, help='Configuration filename for restoring the model.')
parser.add_argument('--use_cpu_only', default=False, type=bool, help='Set true to only use cpu.')
# adjacent and distance-weighted
parser.add_argument('--weightType', required=True, choices=['a', 'd'], help='w/ or w/o distance pre-processing')
parser.add_argument('--att', dest='attention', action='store_true', help='Call this command to raise attention mechanism in the training.')
parser.add_argument('--no-att', dest='attention', action='store_false', help='Call this command not to raise attention mechanism in the training.')
parser.set_defaults(attention=False)
subparsers = parser.add_subparsers()
fullyConnectParser = subparsers.add_parser('fc', help='In fully connect mode, choose embed file')
fullyConnectParser.add_argument('--gEmbedFile', required=True, default='LA-n2v-14-0.1-1', help='Embedding file for n2v, should add up-directory when calling')
fullyConnectParser.add_argument('--network', nargs='?', const='fc', default='fc', help='To store the choice of fully connected')
graphConvParser = subparsers.add_parser('graphConv', help='In graph conv mode, choose W matrix form')
graphConvParser.add_argument('--hop', required=True, type=int, default=2,
help='k-hop neighbors, default is 2 for distance-processed matrix; but must be one for binary matrix')
graphConvParser.add_argument('--network', nargs='?', const='gconv', default='gconv', help='To store the choice of gconv')
args = parser.parse_args()
with open(args.config_filename) as f:
doc = yaml.load(f)
# default batch sizes to 64, in training, validation and in testing
doc['data']['batch_size'] = 64
doc['data']['test_batch_size'] = 64
doc['data']['val_batch_size'] = 64
# set matrix to adjacency or distance-weighted
if args.weightType == 'd':
doc['data']['graph_pkl_filename'] = "data/sensor_graph/adj_mx_la.pkl"
else:
doc['data']['graph_pkl_filename'] = "data/sensor_graph/adj_bin_la.pkl"
# record necessary info to log
doc['model']['weightMatrix'] = args.weightType
doc['model']['attention'] = args.attention
doc['model']['network'] = args.network
if 'gEmbedFile' in vars(args):
doc['model']['graphEmbedFile'] = args.gEmbedFile
doc['model']['max_diffusion_step'] = 0
if 'hop' in vars(args):
doc['model']['max_diffusion_step'] = args.hop
# save the info
with open(args.config_filename, 'w') as f:
yaml.dump(doc, f)
main(args)
| 42.3625 | 162 | 0.689584 |
eed698cee32da7af7d7cb366130b591986c4feae | 1,035 | py | Python | train.py | k2sebeom/DeepLOLCourt | 630f1eee1729c06f686abc7c2a7ecbdfe66803b3 | [
"MIT"
] | null | null | null | train.py | k2sebeom/DeepLOLCourt | 630f1eee1729c06f686abc7c2a7ecbdfe66803b3 | [
"MIT"
] | null | null | null | train.py | k2sebeom/DeepLOLCourt | 630f1eee1729c06f686abc7c2a7ecbdfe66803b3 | [
"MIT"
] | null | null | null | import torch.optim as optim
from torch import nn
from data.match_dataset import MatchDataset
from torch.utils.data import DataLoader
from models.lol_result_model import LOLResultModel
import torch
if __name__ == '__main__':
EPOCH = 50
BATCH_SIZE = 32
loader = DataLoader(MatchDataset('dataset/train_data.csv'), BATCH_SIZE)
print("Dataset Loaded")
loss_criterion = nn.BCELoss()
device = torch.device('cuda:0')
model = LOLResultModel(190)
print("Model created")
optimizer = optim.Adam(model.parameters(), lr=0.0001)
model.to(device)
for epoch in range(EPOCH):
loss_data = 0
for i, data in enumerate(loader):
output = model(data['x'].to(device))
loss = loss_criterion(output, data['y'].unsqueeze(1).float().to(device))
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_data = loss.data
print(f'Epoch {epoch}: {loss_data}')
torch.save(model.state_dict(), 'checkpoints/model.pth')
| 30.441176 | 84 | 0.656039 |
eed71f6a7395828dd1b7ba56051666be99d7beff | 774 | py | Python | src/cpfromddd.py | theonewolf/TripleD | 875c903a302d5502ac65224c16fa65da1246483e | [
"MIT"
] | 13 | 2015-04-04T14:41:38.000Z | 2021-12-28T12:24:29.000Z | src/cpfromddd.py | theonewolf/TripleD | 875c903a302d5502ac65224c16fa65da1246483e | [
"MIT"
] | null | null | null | src/cpfromddd.py | theonewolf/TripleD | 875c903a302d5502ac65224c16fa65da1246483e | [
"MIT"
] | 8 | 2015-01-26T17:15:27.000Z | 2019-09-14T03:22:46.000Z | #!/usr/bin/env python
import libtripled, logging, sys, os
# CONSTANTS
log = logging.getLogger('tripled.cpfromddd')
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
if len(sys.argv) < 4:
print '%s <master> <tripled src> <local dst>' % (sys.argv[0])
exit(-1)
tripled = libtripled.tripled(sys.argv[1])
try: os.makedirs(os.path.dirname(sys.argv[3]))
except OSError: pass
with open(sys.argv[3], 'w') as f:
for chunk in next_chunk(tripled, sys.argv[2]):
f.write(chunk)
| 28.666667 | 75 | 0.630491 |
eed75ce868931dabebd40ef5cd1f3bab8cc08cc7 | 10,094 | py | Python | torchrec/distributed/test_utils/test_sharding.py | samiwilf/torchrec | 50ff0973d5d01ec80fe36ba5f1d524c92c799836 | [
"BSD-3-Clause"
] | 1 | 2022-03-07T09:06:11.000Z | 2022-03-07T09:06:11.000Z | torchrec/distributed/test_utils/test_sharding.py | samiwilf/torchrec | 50ff0973d5d01ec80fe36ba5f1d524c92c799836 | [
"BSD-3-Clause"
] | null | null | null | torchrec/distributed/test_utils/test_sharding.py | samiwilf/torchrec | 50ff0973d5d01ec80fe36ba5f1d524c92c799836 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
from typing import cast, Dict, List, Optional, Tuple, Union
import torch
import torch.distributed as dist
import torch.nn as nn
from fbgemm_gpu.split_embedding_configs import EmbOptimType
from torchrec.distributed.embedding_types import EmbeddingTableConfig
from torchrec.distributed.model_parallel import DistributedModelParallel
from torchrec.distributed.planner import (
EmbeddingShardingPlanner,
ParameterConstraints,
Topology,
)
from torchrec.distributed.test_utils.multi_process import MultiProcessContext
from torchrec.distributed.test_utils.test_model import (
ModelInput,
TestEBCSharder,
TestEBSharder,
TestETCSharder,
TestETSharder,
TestSparseNNBase,
)
from torchrec.distributed.types import (
ModuleSharder,
ShardedTensor,
ShardingEnv,
ShardingPlan,
ShardingType,
)
from torchrec.modules.embedding_configs import BaseEmbeddingConfig
from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizerWrapper
| 36.705455 | 86 | 0.645928 |
eed876b1554e0a4c99de5f131d255d84ecaa3345 | 78 | py | Python | lyrebird/plugins/__init__.py | dodosophia/lyrebird | b3c3d6e0f0f47b8df0cc119a1e5d30763371fa3d | [
"MIT"
] | 1 | 2020-03-18T05:56:53.000Z | 2020-03-18T05:56:53.000Z | lyrebird/plugins/__init__.py | robert0825/lyrebird | 18bcbd2030bd4a506d1f519ae0316d8fc667db4f | [
"MIT"
] | null | null | null | lyrebird/plugins/__init__.py | robert0825/lyrebird | 18bcbd2030bd4a506d1f519ae0316d8fc667db4f | [
"MIT"
] | 1 | 2019-03-11T09:25:36.000Z | 2019-03-11T09:25:36.000Z | from .plugin_loader import manifest
from .plugin_manager import PluginManager
| 26 | 41 | 0.871795 |
eed9c6dd573fe2bb3afc30e2202d6ac77f9cb554 | 330 | py | Python | examples/optimizers/science/create_hgso.py | anukaal/opytimizer | 5f1ccc0da80e6a4cabd99578fa24cf4f6466f9b9 | [
"Apache-2.0"
] | 528 | 2018-10-01T20:00:09.000Z | 2022-03-27T11:15:31.000Z | examples/optimizers/science/create_hgso.py | anukaal/opytimizer | 5f1ccc0da80e6a4cabd99578fa24cf4f6466f9b9 | [
"Apache-2.0"
] | 17 | 2019-10-30T00:47:03.000Z | 2022-03-21T11:39:28.000Z | examples/optimizers/science/create_hgso.py | anukaal/opytimizer | 5f1ccc0da80e6a4cabd99578fa24cf4f6466f9b9 | [
"Apache-2.0"
] | 35 | 2018-10-01T20:03:23.000Z | 2022-03-20T03:54:15.000Z | from opytimizer.optimizers.science import HGSO
# One should declare a hyperparameters object based
# on the desired algorithm that will be used
params = {
'n_clusters': 2,
'l1': 0.0005,
'l2': 100,
'l3': 0.001,
'alpha': 1.0,
'beta': 1.0,
'K': 1.0
}
# Creates an HGSO optimizer
o = HGSO(params=params)
| 19.411765 | 51 | 0.633333 |
eedc7a11ee4379d86b302ba06badd9a7738a9e2e | 63 | py | Python | training_tools/architectures/image_generation/__init__.py | kylrth/training_tools | eccb19a28f65a83e40642c9761ccb1dd090a3e5d | [
"MIT"
] | null | null | null | training_tools/architectures/image_generation/__init__.py | kylrth/training_tools | eccb19a28f65a83e40642c9761ccb1dd090a3e5d | [
"MIT"
] | null | null | null | training_tools/architectures/image_generation/__init__.py | kylrth/training_tools | eccb19a28f65a83e40642c9761ccb1dd090a3e5d | [
"MIT"
] | null | null | null | """Image generating architectures.
Kyle Roth. 2019-07-10.
"""
| 12.6 | 34 | 0.698413 |
eeddefbcddacdcd31162977b74fe0703603b2f9f | 2,668 | py | Python | adverse/urls.py | michael-xander/communique-webapp | 85b450d7f6d0313c5e5ef53a262a850b7e93c3d6 | [
"MIT"
] | null | null | null | adverse/urls.py | michael-xander/communique-webapp | 85b450d7f6d0313c5e5ef53a262a850b7e93c3d6 | [
"MIT"
] | null | null | null | adverse/urls.py | michael-xander/communique-webapp | 85b450d7f6d0313c5e5ef53a262a850b7e93c3d6 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from .views import (EmergencyContactCreateView, EmergencyContactUpdateView, EmergencyContactDeleteView,
EmergencyContactDetailView, EmergencyContactListView, AdverseEventTypeUpdateView,
AdverseEventTypeCreateView, AdverseEventTypeDeleteView, AdverseEventTypeDetailView,
AdverseEventTypeListView, AdverseEventCreateView, AdverseEventDeleteView, AdverseEventDetailView,
AdverseEventListView, AdverseEventUpdateView, AdverseEventExportFormView, AdverseEventExportListView)
urlpatterns = [
url(r'^emergency-contacts/$', EmergencyContactListView.as_view(), name='adverse_emergency_contact_list'),
url(r'^emergency-contacts/create/$', EmergencyContactCreateView.as_view(), name='adverse_emergency_contact_create'),
url(r'^emergency-contacts/(?P<pk>[0-9]+)/$', EmergencyContactDetailView.as_view(),
name='adverse_emergency_contact_detail'),
url(r'^emergency-contacts/(?P<pk>[0-9]+)/update/$', EmergencyContactUpdateView.as_view(),
name='adverse_emergency_contact_update'),
url(r'^emergency-contacts/(?P<pk>[0-9]+)/delete/$', EmergencyContactDeleteView.as_view(),
name='adverse_emergency_contact_delete'),
url(r'^event-types/$', AdverseEventTypeListView.as_view(), name='adverse_event_type_list'),
url(r'^event-types/create/$', AdverseEventTypeCreateView.as_view(), name='adverse_event_type_create'),
url(r'^event-types/(?P<pk>[0-9]+)/$', AdverseEventTypeDetailView.as_view(), name='adverse_event_type_detail'),
url(r'^event-types/(?P<pk>[0-9]+)/update/$', AdverseEventTypeUpdateView.as_view(), name='adverse_event_type_update'),
url(r'^event-types/(?P<pk>[0-9]+)/delete/$', AdverseEventTypeDeleteView.as_view(), name='adverse_event_type_delete'),
url(r'^events/$', AdverseEventListView.as_view(), name='adverse_event_list'),
url(r'^events/create/$', AdverseEventCreateView.as_view(), name='adverse_event_create'),
url(r'^events/(?P<pk>[0-9]+)/$', AdverseEventDetailView.as_view(), name='adverse_event_detail'),
url(r'^events/(?P<pk>[0-9]+)/update/$', AdverseEventUpdateView.as_view(), name='adverse_event_update'),
url(r'^events/(?P<pk>[0-9]+)/delete/$', AdverseEventDeleteView.as_view(), name='adverse_event_delete'),
url(r'^events/export/$', AdverseEventExportFormView.as_view(), name='adverse_event_export_form'),
url(r'^events/export/(?P<start_year>[0-9]{4})-(?P<start_month>[0-9]{2})-(?P<start_day>[0-9]{2})/(?P<end_year>[0-9]{4})-(?P<end_month>[0-9]{2})-(?P<end_day>[0-9]{2})/$',
AdverseEventExportListView.as_view(), name='adverse_event_export_list'),
] | 86.064516 | 172 | 0.725262 |
eedf4a520738f711e0b9af209fc2128b16e46db5 | 1,133 | py | Python | qbflask/models.py | kevindkeogh/qbootstrapper-flask | 490906837d6522e3669193e5097bd33e1f953451 | [
"MIT"
] | 1 | 2017-04-27T08:59:01.000Z | 2017-04-27T08:59:01.000Z | qbflask/models.py | kevindkeogh/qbootstrapper-flask | 490906837d6522e3669193e5097bd33e1f953451 | [
"MIT"
] | null | null | null | qbflask/models.py | kevindkeogh/qbootstrapper-flask | 490906837d6522e3669193e5097bd33e1f953451 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
'''Handles all database interactions for qbootstrapper
'''
from flask import g
from qbflask import app
import sqlite3
def connect_db():
'''Connects to the database and returns the connection
'''
conn = sqlite3.connect(app.config['DATABASE'])
conn.row_factory = sqlite3.Row
return conn
def get_db():
'''Connects to the database and returns the connection
Note that it ensures that the 'g' object holds a connection to the database
'''
if not hasattr(g, 'db'):
g.db = connect_db()
return g.db
def init_db():
'''Creates the database from scratch
'''
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
| 21.788462 | 79 | 0.655781 |
eee0e160c355877e9ab99acba82ef48b402d10db | 2,795 | py | Python | termlog/interpret.py | brianbruggeman/termlog | 361883f790ab6fae158095585370672e3ca8e354 | [
"MIT"
] | 1 | 2019-11-22T09:32:25.000Z | 2019-11-22T09:32:25.000Z | termlog/interpret.py | brianbruggeman/termlog | 361883f790ab6fae158095585370672e3ca8e354 | [
"MIT"
] | null | null | null | termlog/interpret.py | brianbruggeman/termlog | 361883f790ab6fae158095585370672e3ca8e354 | [
"MIT"
] | null | null | null | """Interprets each AST node"""
import ast
import textwrap
from typing import Any, Dict, List
def extract_fields(code: str) -> Dict[str, Any]:
"""Extracts data from code block searching for variables
Args:
code: the code block to parse
"""
# Parsing expects that the code have no indentation
code = textwrap.dedent(code)
parsed = ast.parse(code)
queue: List[Any] = parsed.body
data = []
fields: Dict[str, Any] = {}
# Grab field names to get data needed for message
count = -1
while queue:
count += 1
node = queue.pop(0)
ignored = tuple([ast.ImportFrom, ast.Import, ast.Assert, ast.Raise])
unhandled = tuple(
[
ast.Constant,
ast.Dict,
ast.DictComp,
ast.Expr,
ast.GeneratorExp,
ast.For,
ast.List,
ast.ListComp,
ast.Return,
ast.Subscript,
ast.Try,
ast.With,
]
)
if isinstance(node, (list, tuple)):
queue.extend(node)
elif isinstance(node, (ast.Expr, ast.FormattedValue, ast.Assign, ast.Starred, ast.Attribute, ast.Subscript, ast.AnnAssign)):
queue.append(node.value)
elif isinstance(node, (ast.Call,)):
queue.extend(node.args)
elif isinstance(node, (ast.JoinedStr, ast.BoolOp)):
queue.extend(node.values)
elif isinstance(node, (ast.Str,)):
data.append(node.s)
elif isinstance(node, (ast.Name,)):
fields.update({node.id: None})
elif isinstance(node, (ast.BinOp,)):
queue.append(node.left)
queue.append(node.right)
elif isinstance(node, (ast.FunctionDef,)):
queue.extend(node.body)
elif isinstance(node, (ast.If, ast.IfExp)):
queue.append(node.body)
queue.append(node.orelse)
# elif isinstance(node, (ast.DictComp,)):
# queue.extend(node.generators)
# queue.append(node.key)
# queue.append(node.value)
# elif isinstance(node, (ast.Try,)):
# queue.extend(node.body)
# queue.extend(node.orelse)
# queue.extend(node.finalbody)
elif isinstance(node, ignored):
pass
elif isinstance(node, unhandled):
# print("Termlog Warning [Debug ast.Node]:", node, ", ".join([d for d in dir(node) if not d.startswith("_")]))
pass
else:
print("Termlog Warning [Unhandled ast.Node]:", node, ", ".join([d for d in dir(node) if not d.startswith("_")]))
if count > 4096: # to prevent a runaway queue
break
return fields
| 34.9375 | 132 | 0.544544 |
eee2473186eac206c8388e1f0a6f771a7776dd49 | 4,757 | py | Python | python3/koans/about_strings.py | PatrickBoynton/python_koans | 12243005b6ca5145a3989eadc42d1cca122fe9a6 | [
"MIT"
] | null | null | null | python3/koans/about_strings.py | PatrickBoynton/python_koans | 12243005b6ca5145a3989eadc42d1cca122fe9a6 | [
"MIT"
] | null | null | null | python3/koans/about_strings.py | PatrickBoynton/python_koans | 12243005b6ca5145a3989eadc42d1cca122fe9a6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
| 36.875969 | 81 | 0.644103 |
eee41fee815cbfd9d791866ac61cc5f679e6a33c | 630 | py | Python | acmicpc/2798/2798.py | love-adela/algorithm | 4ccd02173c96f8369962f1fd4e5166a221690fa2 | [
"MIT"
] | 3 | 2019-03-09T05:19:23.000Z | 2019-04-06T09:26:36.000Z | acmicpc/2798/2798.py | love-adela/algorithm | 4ccd02173c96f8369962f1fd4e5166a221690fa2 | [
"MIT"
] | 1 | 2020-02-23T10:38:04.000Z | 2020-02-23T10:38:04.000Z | acmicpc/2798/2798.py | love-adela/algorithm | 4ccd02173c96f8369962f1fd4e5166a221690fa2 | [
"MIT"
] | 1 | 2019-05-22T13:47:53.000Z | 2019-05-22T13:47:53.000Z | params = [int(x) for x in input().split()]
point = params[-1]
card_numbers = sorted([int(i) for i in input().split()])
max_sum = 0
for i in range(len(card_numbers)):
for j in range(i+1, len(card_numbers)):
for k in range(j+1, len(card_numbers)):
if card_numbers[i] + card_numbers[j] + card_numbers[k] > point:
break
if card_numbers[i] + card_numbers[j] + card_numbers[k] <= point \
and point - (card_numbers[i] + card_numbers[j] + card_numbers[k]) < point - max_sum:
max_sum = card_numbers[i] + card_numbers[j] + card_numbers[k]
print(max_sum)
| 39.375 | 96 | 0.603175 |
eee600143ae9d2506a33cc7fd8cd95666e09cf2a | 453 | py | Python | 2/2.py | pyl/AdventOfCode | 575a8ba2eb6bd597201986444a799a4384ac3983 | [
"MIT"
] | null | null | null | 2/2.py | pyl/AdventOfCode | 575a8ba2eb6bd597201986444a799a4384ac3983 | [
"MIT"
] | null | null | null | 2/2.py | pyl/AdventOfCode | 575a8ba2eb6bd597201986444a799a4384ac3983 | [
"MIT"
] | null | null | null | import os
import re
# from .m.red import readInput
data = open("2\\input.txt").read().split('\n')
parsedData = []
for x in data:
parsedData.append(list(filter(None, re.split("[- :]", x))))
parsedData.pop()
count = 0
for x in parsedData:
print(x)
if(x[3][int(x[0])-1] != x[3][int(x[1])-1]
and (x[3][int(x[1])-1] == x[2]
or x[3][int(x[0])-1] == x[2])):
print("found" + ' '.join(x))
count += 1
print(count)
| 15.62069 | 63 | 0.527594 |
eee70444919e0996101bd470d17bbcdf1da08d3b | 284 | py | Python | python/multi-2.6/simple.py | trammell/test | ccac5e1dac947032e64d813e53cb961417a58d05 | [
"Artistic-2.0"
] | null | null | null | python/multi-2.6/simple.py | trammell/test | ccac5e1dac947032e64d813e53cb961417a58d05 | [
"Artistic-2.0"
] | null | null | null | python/multi-2.6/simple.py | trammell/test | ccac5e1dac947032e64d813e53cb961417a58d05 | [
"Artistic-2.0"
] | null | null | null | #!/usr/bin/env python2.4
"""
"""
obj = MyClass(6, 7)
| 18.933333 | 66 | 0.542254 |
eee72143266c2f7d061e2031c509c2b48483a480 | 1,183 | py | Python | dd3/visitor/views.py | za/dd3 | b70d795fb3bd3ff805696b632beabf6d1f342389 | [
"Apache-2.0"
] | null | null | null | dd3/visitor/views.py | za/dd3 | b70d795fb3bd3ff805696b632beabf6d1f342389 | [
"Apache-2.0"
] | null | null | null | dd3/visitor/views.py | za/dd3 | b70d795fb3bd3ff805696b632beabf6d1f342389 | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import render
from django.http import JsonResponse
from django.db import connections
from django.db.models import Count
from django.contrib import admin
from visitor.models import Apache
import json
admin.site.register(Apache)
# Create your views here.
| 26.886364 | 55 | 0.718512 |
eee85fe54b0a7025f321a3dcd3adecc8d263a047 | 2,451 | py | Python | 02_test_and_prototype/CBH file subset tests.py | pnorton-usgs/notebooks | 17a38ecd3f3c052b9bd785c2e53e16a9082d1e71 | [
"MIT"
] | null | null | null | 02_test_and_prototype/CBH file subset tests.py | pnorton-usgs/notebooks | 17a38ecd3f3c052b9bd785c2e53e16a9082d1e71 | [
"MIT"
] | null | null | null | 02_test_and_prototype/CBH file subset tests.py | pnorton-usgs/notebooks | 17a38ecd3f3c052b9bd785c2e53e16a9082d1e71 | [
"MIT"
] | null | null | null | # ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.7
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %%
import pandas as pd
# %%
# %%
workdir = "/Users/pnorton/USGS/Projects/National_Hydrology_Model/regions/r10U/input/cbh"
filename = '%s/daymet_1980_2010_tmin.cbh' % workdir
missing = [-99.0, -999.0]
infile = open(filename, 'r')
fheader = ''
for ii in range(0,3):
line = infile.readline()
if line[0:4] in ['prcp', 'tmax', 'tmin']:
# Change the number of HRUs included to one
numhru = int(line[5:])
fheader += line[0:5] + ' 1\n'
else:
fheader += line
print fheader
print 'numhru:', numhru
# %%
# Read in the CBH data for the HRU we want to extract
hruindex = 1 # one-based hru index
df1 = pd.read_csv(infile, sep=' ', skipinitialspace=True,
#usecols=[0, 1, 2, 3, 4, 5, hruindex+5],
header=None)
# df1 = pd.read_csv(infile, sep=r"\s*", engine='python',
# skiprows=3, usecols=[0, 1, 2, 3, 4, 5, hruindex+6],
# header=None)
infile.close()
df1.head(10)
# %%
df1.loc[:,[0,1,2,8]]
# %%
# Write the subsetted CBH data out
outfile = open('crap.cbh', 'w')
outfile.write(fheader)
df1.to_csv(outfile, sep=' ', float_format='%0.4f', header=False, index=False)
outfile.close()
# %%
# %%
workdir = "/Users/pnorton/Projects/National_Hydrology_Model/tmp"
filename = '%s/daymet_1980_2011_prcp.cbh' % workdir
missing = [-99.0, -999.0]
infile = open(filename, 'r')
fheader = ''
for ii in range(0,3):
line = infile.readline()
if line[0:6] in ['precip', 'tmax', 'tmin']:
# Change the number of HRUs included to one
numhru = int(line[7:])
fheader += line[0:5] + ' 1\n'
else:
fheader += line
print fheader
print 'numhru:', numhru
# %%
df1 = pd.read_csv(infile, sep=' ', skipinitialspace=True,
#usecols=[0, 1, 2, 3, 4, 5, hruindex+5],
header=None)
# df1 = pd.read_csv(infile, sep=r"\s*", engine='python',
# skiprows=3, usecols=[0, 1, 2, 3, 4, 5, hruindex+6],
# header=None)
infile.close()
df1.head(10)
# %%
# Check for precip values less than 0.001
df2 = df1[df1.iloc[:,6:] < 0.001]
df2.sum().sum()
# %%
# %%
| 22.694444 | 88 | 0.575275 |
eee96c8768d0bd73bfcc0b80259c717f22d6398d | 655 | py | Python | tests/test_time_compare.py | ludwiglierhammer/pyhomogenize | 339cd823b0e8ce618f1b2e42a69c20fb92ca7485 | [
"MIT"
] | null | null | null | tests/test_time_compare.py | ludwiglierhammer/pyhomogenize | 339cd823b0e8ce618f1b2e42a69c20fb92ca7485 | [
"MIT"
] | null | null | null | tests/test_time_compare.py | ludwiglierhammer/pyhomogenize | 339cd823b0e8ce618f1b2e42a69c20fb92ca7485 | [
"MIT"
] | null | null | null |
import pytest
import pyhomogenize as pyh
from . import has_dask, requires_dask
from . import has_xarray, requires_xarray
from . import has_numpy, requires_numpy
| 34.473684 | 107 | 0.79084 |
eee9f9f542f197693a4587a809d1d13007ab6153 | 8,391 | py | Python | features/steps/zz_08_materials_steps.py | tewarfel/RayTracerChallenge_1 | 736cc5d159c267c9bcc14d42abb03eedc2f7e5f1 | [
"MIT"
] | 2 | 2020-05-13T20:54:50.000Z | 2021-06-06T03:37:41.000Z | features/steps/zz_08_materials_steps.py | tewarfel/RayTracerChallenge_1 | 736cc5d159c267c9bcc14d42abb03eedc2f7e5f1 | [
"MIT"
] | null | null | null | features/steps/zz_08_materials_steps.py | tewarfel/RayTracerChallenge_1 | 736cc5d159c267c9bcc14d42abb03eedc2f7e5f1 | [
"MIT"
] | null | null | null | from behave import *
from hamcrest import assert_that, equal_to
from vec3 import Vec3, vec3
from vec4 import Vec4, point, vector
from base import equal, normalize, transform, ray, lighting
import numpy as np
from shape import material, sphere, test_shape, normal_at, set_transform, intersect, glass_sphere, point_light
from base import render, translation, scaling, view_transform, world, camera, color, rotation_y, rotation_z, rotation_x, stripe_at, stripe_pattern
from parse_type import TypeBuilder
from step_helper import *
valid_test_objects = ["light","m", "in_shadow"]
parse_test_object = TypeBuilder.make_choice(valid_test_objects)
register_type(TestObject=parse_test_object)
valid_test_variables = ["intensity", "position", "eyev", "normalv", "result", "c1", "c2"]
parse_test_variable = TypeBuilder.make_choice(valid_test_variables)
register_type(TestVariable=parse_test_variable)
valid_light_elements = ["position", "intensity"]
parse_light_element = TypeBuilder.make_choice(valid_light_elements)
register_type(LightElement=parse_light_element)
valid_material_elements = ["color", "ambient", "diffuse", "specular", "shininess", "reflective", "transparency", "refractive_index", "pattern"]
parse_material_element = TypeBuilder.make_choice(valid_material_elements)
register_type(MaterialElement=parse_material_element)
valid_boolean_values = ["true", "false"]
parse_boolean_value = TypeBuilder.make_choice(valid_boolean_values)
register_type(BooleanValue=parse_boolean_value)
| 45.603261 | 196 | 0.725539 |
eeea795546b0f95cf627707162e00a3f25d014a4 | 2,073 | py | Python | wordfrequencies/wordfrequencies.py | chrisshiels/life | f6902ef656e0171c07eec3eb9343a275048ab849 | [
"MIT"
] | null | null | null | wordfrequencies/wordfrequencies.py | chrisshiels/life | f6902ef656e0171c07eec3eb9343a275048ab849 | [
"MIT"
] | null | null | null | wordfrequencies/wordfrequencies.py | chrisshiels/life | f6902ef656e0171c07eec3eb9343a275048ab849 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# 'wordfrequencies.py'.
# Chris Shiels.
import re
import sys
if __name__ == "__main__":
sys.exit(main(sys.stdin, sys.stdout, sys.stderr, sys.argv[1:]))
| 19.556604 | 78 | 0.545586 |
eeea7ce35f96919784a10c51746fa125d0fb04fb | 741 | py | Python | data/thread_generator.py | beesk135/ReID-Survey | d1467c0ce5d3ca78640196360a05df9ff9f9f42a | [
"MIT"
] | null | null | null | data/thread_generator.py | beesk135/ReID-Survey | d1467c0ce5d3ca78640196360a05df9ff9f9f42a | [
"MIT"
] | null | null | null | data/thread_generator.py | beesk135/ReID-Survey | d1467c0ce5d3ca78640196360a05df9ff9f9f42a | [
"MIT"
] | null | null | null | import threading
import time
import numpy as np
from collections import deque
| 29.64 | 55 | 0.62753 |
eeecbdae984ff942e14cb18d12ef5612889a5ac7 | 81 | py | Python | pbs/apps.py | AliTATLICI/django-rest-app | 901e1d50fe4c8732dccdb597d6cad6e099a2dbfa | [
"MIT"
] | null | null | null | pbs/apps.py | AliTATLICI/django-rest-app | 901e1d50fe4c8732dccdb597d6cad6e099a2dbfa | [
"MIT"
] | null | null | null | pbs/apps.py | AliTATLICI/django-rest-app | 901e1d50fe4c8732dccdb597d6cad6e099a2dbfa | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 13.5 | 33 | 0.728395 |
eeee2179bf362d1c71b12b2f474e4d3a6d80e573 | 1,544 | py | Python | spug/data_pipeline/sources/stock.py | syeehyn/spug | 216976e0171bbc14042377fbbb535180bd2efaf3 | [
"Apache-2.0"
] | null | null | null | spug/data_pipeline/sources/stock.py | syeehyn/spug | 216976e0171bbc14042377fbbb535180bd2efaf3 | [
"Apache-2.0"
] | null | null | null | spug/data_pipeline/sources/stock.py | syeehyn/spug | 216976e0171bbc14042377fbbb535180bd2efaf3 | [
"Apache-2.0"
] | 1 | 2021-12-05T22:54:28.000Z | 2021-12-05T22:54:28.000Z | """
fetch historical stocks prices
"""
from tqdm import tqdm
import pandas as pd
import pandas_datareader as pdr
from .base import DataFetcher
def get_stock_price(symbol, start, end):
"""get stock price of a company over a time range
Args:
symbol (str): ticker symbol of a stock
start (datetime.datetime): start time
end (datetime.datetime): end time
Returns:
pd.DataFrame: stock price of a company over a time range
"""
df = (
pdr.yahoo.daily.YahooDailyReader(symbol, start=start, end=end)
.read()
.reset_index()[["Date", "High", "Low", "Open", "Close", "Volume", "Adj Close"]]
)
df["date"] = pd.to_datetime(df.Date)
return df.drop("Date", axis=1)
| 31.510204 | 87 | 0.613342 |
eeee6f4fc03992c011356b8190353e8fc67ab368 | 809 | py | Python | parser/team07/Proyecto/clasesAbstractas/expresion.py | susanliss/tytus | a613a2352cf4a1d0e90ce27bb346ab60ed8039cc | [
"MIT"
] | null | null | null | parser/team07/Proyecto/clasesAbstractas/expresion.py | susanliss/tytus | a613a2352cf4a1d0e90ce27bb346ab60ed8039cc | [
"MIT"
] | null | null | null | parser/team07/Proyecto/clasesAbstractas/expresion.py | susanliss/tytus | a613a2352cf4a1d0e90ce27bb346ab60ed8039cc | [
"MIT"
] | null | null | null | from .instruccionAbstracta import InstruccionAbstracta
| 23.794118 | 67 | 0.651422 |
eeef030e3640987cf35e25ed5365b60fde947fe0 | 2,963 | py | Python | src/gluonts/nursery/tsbench/src/tsbench/evaluations/metrics/performance.py | RingoIngo/gluon-ts | 62fb20c36025fc969653accaffaa783671709564 | [
"Apache-2.0"
] | 1 | 2022-03-28T01:17:00.000Z | 2022-03-28T01:17:00.000Z | src/gluonts/nursery/tsbench/src/tsbench/evaluations/metrics/performance.py | RingoIngo/gluon-ts | 62fb20c36025fc969653accaffaa783671709564 | [
"Apache-2.0"
] | null | null | null | src/gluonts/nursery/tsbench/src/tsbench/evaluations/metrics/performance.py | RingoIngo/gluon-ts | 62fb20c36025fc969653accaffaa783671709564 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from __future__ import annotations
from dataclasses import dataclass
from typing import cast, Dict, List, Union
import numpy as np
import pandas as pd
from .metric import Metric
| 31.521277 | 98 | 0.59433 |
eef0f0b4303286161e71367939209bbe2bdf9cf9 | 1,940 | py | Python | Scripts/simulation/postures/posture_tunables.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/postures/posture_tunables.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/postures/posture_tunables.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\postures\posture_tunables.py
# Compiled at: 2016-02-19 01:17:07
# Size of source mod 2**32: 2003 bytes
from postures.posture_cost import TunablePostureCostVariant
from postures.posture_validators import TunablePostureValidatorVariant
from sims4.tuning.tunable import OptionalTunable, TunableTuple, TunableList | 114.117647 | 1,122 | 0.635052 |
eef0f57e2e52d98324d6736af1814a7fec12251f | 23 | py | Python | Game/History/__init__.py | ritwikd/interom | 0b626351fd742f2a99d0a6d11ba8c1a214aab576 | [
"MIT"
] | null | null | null | Game/History/__init__.py | ritwikd/interom | 0b626351fd742f2a99d0a6d11ba8c1a214aab576 | [
"MIT"
] | 1 | 2021-03-06T22:08:32.000Z | 2021-03-06T22:09:07.000Z | Game/History/__init__.py | ritwikd/interom | 0b626351fd742f2a99d0a6d11ba8c1a214aab576 | [
"MIT"
] | 1 | 2021-03-03T22:48:07.000Z | 2021-03-03T22:48:07.000Z | from . import Log, Move | 23 | 23 | 0.73913 |
eef278f2f4e2c217a17b9bdf16a63771a1fe90a6 | 107 | py | Python | Guitarist.py | Stanels42/pythonic-garage-band | 7dfdec84073720998368cc2042bed011244c88ae | [
"MIT"
] | 1 | 2021-10-01T09:48:42.000Z | 2021-10-01T09:48:42.000Z | Guitarist.py | Stanels42/pythonic-garage-band | 7dfdec84073720998368cc2042bed011244c88ae | [
"MIT"
] | 1 | 2019-12-06T04:22:11.000Z | 2019-12-06T04:22:11.000Z | Guitarist.py | Stanels42/pythonic-garage-band | 7dfdec84073720998368cc2042bed011244c88ae | [
"MIT"
] | 1 | 2019-12-06T19:39:55.000Z | 2019-12-06T19:39:55.000Z | from Musician import Musician
| 17.833333 | 29 | 0.747664 |
eef62d1ce6768e7a68a4a1159bbd33491dcbc7e8 | 6,126 | py | Python | tests/objects/fiber_manipulation_test.py | jifengting1/fastpliFork | 1ef7e2d268e03e21ded9390fc005b9fff2e0a3c1 | [
"MIT"
] | null | null | null | tests/objects/fiber_manipulation_test.py | jifengting1/fastpliFork | 1ef7e2d268e03e21ded9390fc005b9fff2e0a3c1 | [
"MIT"
] | null | null | null | tests/objects/fiber_manipulation_test.py | jifengting1/fastpliFork | 1ef7e2d268e03e21ded9390fc005b9fff2e0a3c1 | [
"MIT"
] | null | null | null | import unittest
import numpy as np
import fastpli.objects
import fastpli.tools
if __name__ == '__main__':
unittest.main()
| 42.839161 | 79 | 0.538851 |
eef6f9b0de74e501a4d4981b8350d4bf8e08d58a | 4,403 | py | Python | kerascv/layers/matchers/argmax_matcher.py | tanzhenyu/keras-cv | b7208ee25735c492ccc171874e34076111dcf637 | [
"Apache-2.0"
] | null | null | null | kerascv/layers/matchers/argmax_matcher.py | tanzhenyu/keras-cv | b7208ee25735c492ccc171874e34076111dcf637 | [
"Apache-2.0"
] | null | null | null | kerascv/layers/matchers/argmax_matcher.py | tanzhenyu/keras-cv | b7208ee25735c492ccc171874e34076111dcf637 | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
from kerascv.layers.iou_similarity import IOUSimilarity
iou_layer = IOUSimilarity()
| 39.666667 | 87 | 0.661821 |
eef840e020a73705ee971a6562f13c86679b8ac7 | 538 | py | Python | Atv1-Distribuida/servidorBackup.py | rodolfotr/Computacao_Distribuida | 1d9db06ef4ab7290a6ce9666b5cb83987cc74e9d | [
"MIT"
] | null | null | null | Atv1-Distribuida/servidorBackup.py | rodolfotr/Computacao_Distribuida | 1d9db06ef4ab7290a6ce9666b5cb83987cc74e9d | [
"MIT"
] | null | null | null | Atv1-Distribuida/servidorBackup.py | rodolfotr/Computacao_Distribuida | 1d9db06ef4ab7290a6ce9666b5cb83987cc74e9d | [
"MIT"
] | null | null | null | import socket
import struct
IP_BACKUP = '127.0.0.1'
PORTA_BACKUP = 5000
ARQUIVO_BACKUP = "/home/aluno-uffs/Documentos/Trab_Final/Atv1-Distribuida/cliente_BACKUP.c"
#Recebe o arquivo.
sockReceber = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sockReceber.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sockReceber.bind((IP_BACKUP, PORTA_BACKUP))
while (True):
l = sockReceber.recv(1561651651)
if (l):
f = open(ARQUIVO_BACKUP,'wb')
f.write(l)
f.close()
sockReceber.close() | 25.619048 | 91 | 0.734201 |
eef8835ed3e8db9f839217b35bfd1e4b67953a9b | 634 | py | Python | examples/example_regression.py | QuantLet/localpoly | 7db50e3fb2caf39af8f9db1f2108fd1a81fc51bc | [
"MIT"
] | 1 | 2021-04-28T09:39:53.000Z | 2021-04-28T09:39:53.000Z | examples/example_regression.py | QuantLet/localpoly | 7db50e3fb2caf39af8f9db1f2108fd1a81fc51bc | [
"MIT"
] | null | null | null | examples/example_regression.py | QuantLet/localpoly | 7db50e3fb2caf39af8f9db1f2108fd1a81fc51bc | [
"MIT"
] | 1 | 2021-05-11T19:06:17.000Z | 2021-05-11T19:06:17.000Z | import numpy as np
from matplotlib import pyplot as plt
from localpoly.base import LocalPolynomialRegression
# simulate data
np.random.seed(1)
X = np.linspace(-np.pi, np.pi, num=150)
y_real = np.sin(X)
y = np.random.normal(0, 0.3, len(X)) + y_real
# local polynomial regression
model = LocalPolynomialRegression(X=X, y=y, h=0.8469, kernel="gaussian", gridsize=100)
prediction_interval = (X.min(), X.max())
results = model.fit(prediction_interval)
# plot
plt.scatter(X, y)
plt.plot(X, y_real, "grey", ls="--", alpha=0.5, label="function")
plt.plot(results["X"], results["fit"], "r", alpha=0.9, label="fit")
plt.legend()
plt.show()
| 27.565217 | 86 | 0.706625 |
eefc2e95d04d1e10619a3cb3fe8a472e3a76f13a | 690 | py | Python | mint/modules/activations.py | remicongee/Mint | 0f2db9b4216d8e61ec6b6892fd5baf962847581c | [
"MIT"
] | null | null | null | mint/modules/activations.py | remicongee/Mint | 0f2db9b4216d8e61ec6b6892fd5baf962847581c | [
"MIT"
] | null | null | null | mint/modules/activations.py | remicongee/Mint | 0f2db9b4216d8e61ec6b6892fd5baf962847581c | [
"MIT"
] | 1 | 2020-12-02T09:02:55.000Z | 2020-12-02T09:02:55.000Z | ## Activation functions
from .module import Module
from ..utils import functional as F | 21.5625 | 49 | 0.592754 |
eefc3d409d2d8b66094f301c43a67fdc4a9f6792 | 2,829 | py | Python | utils/phase0/state_transition.py | hwwhww/eth2.0-specs | 729757d4279db4535b176361d67d1567c0df314b | [
"CC0-1.0"
] | 3 | 2020-07-22T14:51:07.000Z | 2022-01-02T12:02:45.000Z | utils/phase0/state_transition.py | hwwhww/eth2.0-specs | 729757d4279db4535b176361d67d1567c0df314b | [
"CC0-1.0"
] | null | null | null | utils/phase0/state_transition.py | hwwhww/eth2.0-specs | 729757d4279db4535b176361d67d1567c0df314b | [
"CC0-1.0"
] | null | null | null | from . import spec
from typing import ( # noqa: F401
Any,
Callable,
List,
NewType,
Tuple,
)
from .spec import (
BeaconState,
BeaconBlock,
)
| 28.009901 | 80 | 0.653588 |
eefc51b8229cb41587ef71a58d9e82472148716d | 1,419 | py | Python | greatbigcrane/buildout_manage/recipes/mercurial.py | pnomolos/greatbigcrane | db0763706e1e8ca1f2bd769aa79c99681f1a967e | [
"Apache-2.0"
] | 3 | 2015-11-19T21:35:22.000Z | 2016-07-17T18:07:07.000Z | greatbigcrane/buildout_manage/recipes/mercurial.py | pnomolos/greatbigcrane | db0763706e1e8ca1f2bd769aa79c99681f1a967e | [
"Apache-2.0"
] | null | null | null | greatbigcrane/buildout_manage/recipes/mercurial.py | pnomolos/greatbigcrane | db0763706e1e8ca1f2bd769aa79c99681f1a967e | [
"Apache-2.0"
] | null | null | null | """
Copyright 2010 Jason Chu, Dusty Phillips, and Phil Schalm
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from buildout_manage.recipetools import simple_property, bool_property
| 33 | 72 | 0.725863 |
eefdcd3ea1af6682c969002f242acba638c23ea1 | 799 | py | Python | e2e/codebuild/results_comment.py | hixio-mh/taskcat | a5d23a4b05592250c2ec0304d77571675628b00d | [
"Apache-2.0"
] | 920 | 2016-12-03T01:41:25.000Z | 2021-11-04T13:52:21.000Z | e2e/codebuild/results_comment.py | hixio-mh/taskcat | a5d23a4b05592250c2ec0304d77571675628b00d | [
"Apache-2.0"
] | 544 | 2017-02-23T22:41:25.000Z | 2021-11-03T23:02:25.000Z | e2e/codebuild/results_comment.py | hixio-mh/taskcat | a5d23a4b05592250c2ec0304d77571675628b00d | [
"Apache-2.0"
] | 225 | 2016-12-11T13:36:21.000Z | 2021-11-04T14:43:53.000Z | import os
import sys
import boto3
from github import Github
SSM_CLIENT = boto3.client("ssm")
GITHUB_REPO_NAME = os.environ.get("GITHUB_REPO_NAME", "")
PR_NUMBER = os.environ.get("PR_NUMBER", "")
FAILED = bool(int(sys.argv[2]))
GITHUB_TOKEN = os.environ.get("GITHUB_TOKEN", "")
if __name__ == "__main__":
repo = Github(GITHUB_TOKEN).get_repo(GITHUB_REPO_NAME)
pr = repo.get_pull(int(PR_NUMBER))
message, event = ("end to end tests failed", "REQUEST_CHANGES")
if not FAILED:
message, event = ("end to end tests passed\n", "APPROVE")
with open("../../cov_report", "r") as fh:
cov = fh.read().replace(f"/{GITHUB_REPO_NAME}/", "")
message += f"```{cov}```"
pr.create_review(body=message, event=event, commit=repo.get_commit(sys.argv[1]))
| 30.730769 | 84 | 0.653317 |
eefe78a5c5393bb02f57187df46d42fbd870dd68 | 2,460 | py | Python | openghg/client/_search.py | openghg/openghg | 9a05dd6fe3cee6123898b8f390cfaded08dbb408 | [
"Apache-2.0"
] | 5 | 2021-03-02T09:04:07.000Z | 2022-01-25T09:58:16.000Z | openghg/client/_search.py | openghg/openghg | 9a05dd6fe3cee6123898b8f390cfaded08dbb408 | [
"Apache-2.0"
] | 229 | 2020-09-30T15:08:39.000Z | 2022-03-31T14:23:55.000Z | openghg/client/_search.py | openghg/openghg | 9a05dd6fe3cee6123898b8f390cfaded08dbb408 | [
"Apache-2.0"
] | null | null | null | from __future__ import annotations
from typing import TYPE_CHECKING, Dict, List, Optional, Union
from Acquire.Client import Wallet
if TYPE_CHECKING:
from openghg.dataobjects import SearchResults
__all__ = ["Search"]
| 29.638554 | 96 | 0.597561 |
e1003c20209106cf6d3e01c2eabbb6012b595686 | 1,524 | py | Python | ikats/client/opentsdb_stub.py | IKATS/ikats_api | 86f965e9ea83fde1fb64f187b294d383d267f77f | [
"Apache-2.0"
] | null | null | null | ikats/client/opentsdb_stub.py | IKATS/ikats_api | 86f965e9ea83fde1fb64f187b294d383d267f77f | [
"Apache-2.0"
] | null | null | null | ikats/client/opentsdb_stub.py | IKATS/ikats_api | 86f965e9ea83fde1fb64f187b294d383d267f77f | [
"Apache-2.0"
] | 1 | 2020-01-27T14:44:27.000Z | 2020-01-27T14:44:27.000Z | # -*- coding: utf-8 -*-
"""
Copyright 2019 CS Systmes d'Information
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import random
from ikats.client.opentsdb_client import OpenTSDBClient
| 27.709091 | 81 | 0.694226 |
e101989a4d6808941cf59d0b6ca5c8dec9a09fac | 4,467 | py | Python | models/seeding/base.py | Sanzeed/balanced_influence_maximization | 0797b8a8f536cac8023e128ab13eb532f902bcad | [
"MIT"
] | 4 | 2021-06-03T02:53:46.000Z | 2022-01-25T07:07:08.000Z | models/seeding/base.py | Sanzeed/balanced_influence_maximization | 0797b8a8f536cac8023e128ab13eb532f902bcad | [
"MIT"
] | null | null | null | models/seeding/base.py | Sanzeed/balanced_influence_maximization | 0797b8a8f536cac8023e128ab13eb532f902bcad | [
"MIT"
] | 1 | 2021-06-17T02:17:22.000Z | 2021-06-17T02:17:22.000Z | import numpy as np
from scipy.stats import bernoulli
import heapq
| 44.227723 | 120 | 0.591896 |
e102bdd6852dce95483c7c8cdb3211b3d9ab7231 | 43 | py | Python | run_5395.py | mpi3d/goodix-fp-dump | 039940845bd5eeb98cd92d72f267e3be77feb156 | [
"MIT"
] | 136 | 2021-05-05T14:16:17.000Z | 2022-03-31T09:04:18.000Z | run_5395.py | tsunekotakimoto/goodix-fp-dump | b88ecbababd3766314521fe30ee943c4bd1810df | [
"MIT"
] | 14 | 2021-08-20T09:49:39.000Z | 2022-03-20T13:18:05.000Z | run_5395.py | tsunekotakimoto/goodix-fp-dump | b88ecbababd3766314521fe30ee943c4bd1810df | [
"MIT"
] | 11 | 2021-08-02T15:49:11.000Z | 2022-02-06T22:06:42.000Z | from driver_53x5 import main
main(0x5395)
| 10.75 | 28 | 0.813953 |
e10338cc76f582f3f2a03b933dc6086137bca50f | 7,104 | py | Python | v2ray/com/core/proxy/vmess/inbound/config_pb2.py | xieruan/v2bp | 350b2f80d3a06494ed4092945804c1c851fdf1db | [
"MIT"
] | 7 | 2020-06-24T07:15:15.000Z | 2022-03-08T16:36:09.000Z | v2ray/com/core/proxy/vmess/inbound/config_pb2.py | xieruan/vp | 350b2f80d3a06494ed4092945804c1c851fdf1db | [
"MIT"
] | null | null | null | v2ray/com/core/proxy/vmess/inbound/config_pb2.py | xieruan/vp | 350b2f80d3a06494ed4092945804c1c851fdf1db | [
"MIT"
] | 6 | 2020-07-06T06:51:20.000Z | 2021-03-23T06:26:36.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: v2ray.com/core/proxy/vmess/inbound/config.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from v2ray.com.core.common.protocol import user_pb2 as v2ray_dot_com_dot_core_dot_common_dot_protocol_dot_user__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='v2ray.com/core/proxy/vmess/inbound/config.proto',
package='v2ray.core.proxy.vmess.inbound',
syntax='proto3',
serialized_options=b'\n\"com.v2ray.core.proxy.vmess.inboundP\001Z\007inbound\252\002\036V2Ray.Core.Proxy.Vmess.Inbound',
serialized_pb=b'\n/v2ray.com/core/proxy/vmess/inbound/config.proto\x12\x1ev2ray.core.proxy.vmess.inbound\x1a)v2ray.com/core/common/protocol/user.proto\"\x1a\n\x0c\x44\x65tourConfig\x12\n\n\x02to\x18\x01 \x01(\t\"0\n\rDefaultConfig\x12\x10\n\x08\x61lter_id\x18\x01 \x01(\r\x12\r\n\x05level\x18\x02 \x01(\r\"\xd6\x01\n\x06\x43onfig\x12.\n\x04user\x18\x01 \x03(\x0b\x32 .v2ray.core.common.protocol.User\x12>\n\x07\x64\x65\x66\x61ult\x18\x02 \x01(\x0b\x32-.v2ray.core.proxy.vmess.inbound.DefaultConfig\x12<\n\x06\x64\x65tour\x18\x03 \x01(\x0b\x32,.v2ray.core.proxy.vmess.inbound.DetourConfig\x12\x1e\n\x16secure_encryption_only\x18\x04 \x01(\x08\x42P\n\"com.v2ray.core.proxy.vmess.inboundP\x01Z\x07inbound\xaa\x02\x1eV2Ray.Core.Proxy.Vmess.Inboundb\x06proto3'
,
dependencies=[v2ray_dot_com_dot_core_dot_common_dot_protocol_dot_user__pb2.DESCRIPTOR,])
_DETOURCONFIG = _descriptor.Descriptor(
name='DetourConfig',
full_name='v2ray.core.proxy.vmess.inbound.DetourConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='to', full_name='v2ray.core.proxy.vmess.inbound.DetourConfig.to', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=126,
serialized_end=152,
)
_DEFAULTCONFIG = _descriptor.Descriptor(
name='DefaultConfig',
full_name='v2ray.core.proxy.vmess.inbound.DefaultConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='alter_id', full_name='v2ray.core.proxy.vmess.inbound.DefaultConfig.alter_id', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='level', full_name='v2ray.core.proxy.vmess.inbound.DefaultConfig.level', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=154,
serialized_end=202,
)
_CONFIG = _descriptor.Descriptor(
name='Config',
full_name='v2ray.core.proxy.vmess.inbound.Config',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='user', full_name='v2ray.core.proxy.vmess.inbound.Config.user', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='default', full_name='v2ray.core.proxy.vmess.inbound.Config.default', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='detour', full_name='v2ray.core.proxy.vmess.inbound.Config.detour', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='secure_encryption_only', full_name='v2ray.core.proxy.vmess.inbound.Config.secure_encryption_only', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=205,
serialized_end=419,
)
_CONFIG.fields_by_name['user'].message_type = v2ray_dot_com_dot_core_dot_common_dot_protocol_dot_user__pb2._USER
_CONFIG.fields_by_name['default'].message_type = _DEFAULTCONFIG
_CONFIG.fields_by_name['detour'].message_type = _DETOURCONFIG
DESCRIPTOR.message_types_by_name['DetourConfig'] = _DETOURCONFIG
DESCRIPTOR.message_types_by_name['DefaultConfig'] = _DEFAULTCONFIG
DESCRIPTOR.message_types_by_name['Config'] = _CONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DetourConfig = _reflection.GeneratedProtocolMessageType('DetourConfig', (_message.Message,), {
'DESCRIPTOR' : _DETOURCONFIG,
'__module__' : 'v2ray.com.core.proxy.vmess.inbound.config_pb2'
# @@protoc_insertion_point(class_scope:v2ray.core.proxy.vmess.inbound.DetourConfig)
})
_sym_db.RegisterMessage(DetourConfig)
DefaultConfig = _reflection.GeneratedProtocolMessageType('DefaultConfig', (_message.Message,), {
'DESCRIPTOR' : _DEFAULTCONFIG,
'__module__' : 'v2ray.com.core.proxy.vmess.inbound.config_pb2'
# @@protoc_insertion_point(class_scope:v2ray.core.proxy.vmess.inbound.DefaultConfig)
})
_sym_db.RegisterMessage(DefaultConfig)
Config = _reflection.GeneratedProtocolMessageType('Config', (_message.Message,), {
'DESCRIPTOR' : _CONFIG,
'__module__' : 'v2ray.com.core.proxy.vmess.inbound.config_pb2'
# @@protoc_insertion_point(class_scope:v2ray.core.proxy.vmess.inbound.Config)
})
_sym_db.RegisterMessage(Config)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 39.248619 | 757 | 0.758164 |
e103652358a900837a67abd9fbc1244e03d12a88 | 2,631 | py | Python | RedditReader/redditReader.py | Semicolon42/PythonProjects | eb6ec5d13594013a2703af43eb0d1c29406faaad | [
"Apache-2.0"
] | null | null | null | RedditReader/redditReader.py | Semicolon42/PythonProjects | eb6ec5d13594013a2703af43eb0d1c29406faaad | [
"Apache-2.0"
] | null | null | null | RedditReader/redditReader.py | Semicolon42/PythonProjects | eb6ec5d13594013a2703af43eb0d1c29406faaad | [
"Apache-2.0"
] | null | null | null | import logging
import csv
import time
from bs4 import BeautifulSoup
import requests
logging.basicConfig(
format='%(asctime)s %(levelname)s:%(message)s',
level=logging.INFO)
if __name__ == '__main__':
print('start up')
main()
print('all done')
| 30.241379 | 79 | 0.542379 |
e10557e7b3374a814dff92034c545370c1354b22 | 2,605 | py | Python | asteroid/repl.py | asteroid-lang/asteroid | 537c60dd639e4f83fdefff4d36e1d63c3b4139a4 | [
"MIT"
] | 2 | 2022-02-09T20:33:05.000Z | 2022-02-09T20:33:08.000Z | asteroid/repl.py | asteroid-lang/asteroid | 537c60dd639e4f83fdefff4d36e1d63c3b4139a4 | [
"MIT"
] | 40 | 2022-01-22T02:29:51.000Z | 2022-03-31T14:45:31.000Z | asteroid/repl.py | asteroid-lang/asteroid | 537c60dd639e4f83fdefff4d36e1d63c3b4139a4 | [
"MIT"
] | 2 | 2022-01-20T18:20:11.000Z | 2022-02-12T22:35:22.000Z | from asteroid.interp import interp
from asteroid.version import VERSION
from asteroid.state import state
from asteroid.globals import ExpectationError
from asteroid.walk import function_return_value
from asteroid.support import term2string
from sys import stdin
import readline
| 26.581633 | 83 | 0.554702 |
e106417c74eb34df2f46cb1cc4d7afaf1c61501e | 1,762 | py | Python | apis/file_state.py | brockpalen/ltfsee-globus | 5cb322ef09cd4f883951de96e5cb242f876ccd9c | [
"MIT"
] | null | null | null | apis/file_state.py | brockpalen/ltfsee-globus | 5cb322ef09cd4f883951de96e5cb242f876ccd9c | [
"MIT"
] | null | null | null | apis/file_state.py | brockpalen/ltfsee-globus | 5cb322ef09cd4f883951de96e5cb242f876ccd9c | [
"MIT"
] | null | null | null | """API for eeadm file state."""
from http import HTTPStatus
from flask import request
from flask_restx import Namespace, Resource, fields
from core.eeadm.file_state import EEADM_File_State
from ltfsee_globus.auth import token_required
api = Namespace(
"file_state", description="Get state of a file in archive eeadm file state"
)
# model for returning data from eeadm file state -s
# https://www.ibm.com/support/knowledgecenter/ST9MBR_1.3.0/ee_eeadm_file_state_command_output.html
file_state_model = api.model(
"file_state",
{
"state": fields.String,
"replicas": fields.Integer,
"tapes": fields.List(fields.String),
"path": fields.String,
},
)
# model for the input of a file
# must be abolute path
file_model = api.model("file", {"path": fields.String})
# create the API
| 32.036364 | 100 | 0.713394 |
e1068254019048e1b19e7e8d94638f8a3b8808de | 1,350 | py | Python | src/helpers/fix_test_data_for_roc.py | Iretha/IoT23-network-traffic-anomalies-classification | 93c157589e8128e8d9d5091d93052b18cd3ac35d | [
"MIT"
] | 9 | 2021-04-07T18:16:54.000Z | 2021-12-08T16:49:03.000Z | src/helpers/fix_test_data_for_roc.py | Iretha/IoT-23-anomaly-detection | 93c157589e8128e8d9d5091d93052b18cd3ac35d | [
"MIT"
] | 2 | 2021-09-02T03:52:03.000Z | 2021-11-15T11:32:55.000Z | src/helpers/fix_test_data_for_roc.py | Iretha/IoT23-network-traffic-anomalies-classification | 93c157589e8128e8d9d5091d93052b18cd3ac35d | [
"MIT"
] | null | null | null | from numpy import sort
from src.helpers.dataframe_helper import df_get, write_to_csv
def __copy_random_record_of_class(from_df, from_file_path, to_df, to_file_path, classes=None):
"""
TODO if we want to be more precise, we have to move the row, not just copy it
"""
if classes is None or len(classes) == 0:
return
print('Missing classes: ' + str(classes) + ' in ' + to_file_path)
cnt = 0
for clz in classes:
sample_df = from_df[from_df['detailed-label'] == clz].sample(1)
to_df = to_df.append(sample_df)
cnt += 1
if cnt > 0:
write_to_csv(to_df, to_file_path, mode='w')
| 34.615385 | 110 | 0.718519 |
e106968b5aabed3c4faf9536ea2f316b06ae7ec9 | 7,925 | py | Python | 130_html_to_csv/150_mkcsv_t_info_d.py | takobouzu/BOAT_RACE_DB | f16ed8f55aef567c0ecc6ebd3ad0e917f5c600d8 | [
"MIT"
] | 6 | 2020-12-23T01:06:04.000Z | 2022-01-12T10:18:36.000Z | 130_html_to_csv/150_mkcsv_t_info_d.py | takobouzu/BOAT_RACE_DB | f16ed8f55aef567c0ecc6ebd3ad0e917f5c600d8 | [
"MIT"
] | 15 | 2021-03-02T05:59:24.000Z | 2021-09-12T08:12:38.000Z | 130_html_to_csv/150_mkcsv_t_info_d.py | takobouzu/BOAT_RACE_DB | f16ed8f55aef567c0ecc6ebd3ad0e917f5c600d8 | [
"MIT"
] | 1 | 2021-05-09T10:47:21.000Z | 2021-05-09T10:47:21.000Z | '''
BOAT_RACE_DB2
140_mkcsv_t_info_d.py
HTMLt_info_dCSV
macOS 11.1/Raspbian OS 10.4/python 3.9.1/sqlite3 3.32.3
2021.02.01 ver 1.00
'''
import os
import datetime
from bs4 import BeautifulSoup
#
BASE_DIR = '/home/pi/BOAT_RACE_DB'
'''
mkcsv_t_info_d
HTMLt_info_dCSV
'''
#
mkcsv_t_info_d() #t_info_dCSV
| 48.323171 | 120 | 0.418801 |
e1071b566b934eed8eaf574357b76325acbfe989 | 174 | py | Python | python/show_nmc.py | Typas/Data-Assimilation-Project | 4b880c7faadf778d891ffab77ebfbde1db5c5baf | [
"MIT"
] | null | null | null | python/show_nmc.py | Typas/Data-Assimilation-Project | 4b880c7faadf778d891ffab77ebfbde1db5c5baf | [
"MIT"
] | null | null | null | python/show_nmc.py | Typas/Data-Assimilation-Project | 4b880c7faadf778d891ffab77ebfbde1db5c5baf | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import numpy as np
B = np.reshape(np.genfromtxt("data/b_nmc.txt"), (40, 40))
import matplotlib.pyplot as plt
plt.contourf(B)
plt.colorbar()
plt.show()
| 21.75 | 57 | 0.724138 |
e10776844de6cd61363f91f2091e32c884366312 | 602 | py | Python | hello.py | QuocTrungTran/cgi-lab | fa79815b0e0ebd3d925e4d30043f2536ef2d9b4f | [
"Apache-2.0"
] | null | null | null | hello.py | QuocTrungTran/cgi-lab | fa79815b0e0ebd3d925e4d30043f2536ef2d9b4f | [
"Apache-2.0"
] | null | null | null | hello.py | QuocTrungTran/cgi-lab | fa79815b0e0ebd3d925e4d30043f2536ef2d9b4f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import os, json
print("Content-type:text/html\r\n\r\n")
print
print("<title>Test CGI</title>")
print("<p>Hello World!</>")
# #Q1
# print(os.environ)
# json_object = json.dumps(dict(os.environ), indent=4)
# #print(json_object)
#Q2
# for param in os.environ.keys():
# if (param=="QUERY_STRING"):
# #print(f"<em>{param}</em> = {os.environ[param]}</li>")
# print("<b>%20s</b>: %s<br>" % (param, os.environ[param]))
# #Q3
# for param in os.environ.keys():
# if (param=="HTTP_USER_AGENT"):
# print("<b>%20s</b>: %s<br>" % (param, os.environ[param])) | 26.173913 | 67 | 0.593023 |
e1088f7eca5eb9b2a0d3d520b6c9dd794d84bb1c | 2,194 | py | Python | onetabtobear.py | vinceblake/saveTabToBear | 4b3a79c06e9130c95fa1f87b30999f2fbfe2e017 | [
"MIT"
] | null | null | null | onetabtobear.py | vinceblake/saveTabToBear | 4b3a79c06e9130c95fa1f87b30999f2fbfe2e017 | [
"MIT"
] | null | null | null | onetabtobear.py | vinceblake/saveTabToBear | 4b3a79c06e9130c95fa1f87b30999f2fbfe2e017 | [
"MIT"
] | null | null | null | #!/usr/local/bin/python3
from subprocess import Popen, PIPE
from urllib.parse import quote
import sqlite3, datetime, sys, re
# Global Variables
removeCheckedItems = True # Set to false if you want to keep "completed" to-do items when this is run
bearDbFile = str(sys.argv[3])
oneTabID = str(sys.argv[4])
# Methods
def create_connection(db_file): # Establish SQLITE database connection cursor
""" create a database connection to the SQLite database
specified by the db_file
:param db_file: database file
:return: Connection or None
"""
conn = None
try:
conn = sqlite3.connect(db_file)
except:
print("Failed to establish connection")
return None
return conn
# Main functionality:
if __name__ == '__main__':
title = sys.argv[1]
url = sys.argv[2]
# Connect to Bear database
beardb = create_connection(bearDbFile)
bear = beardb.cursor()
# Process tab and update database:
updateOneTab() | 30.054795 | 115 | 0.639927 |
e1093ea692aa40b78e1fe9867c9ec44b0222ae19 | 1,319 | py | Python | defects4cpp/d++.py | HansolChoe/defects4cpp | cb9e3db239c50e6ec38127cec117865f0ee7a5cf | [
"MIT"
] | 10 | 2021-06-23T01:53:19.000Z | 2022-03-31T03:14:01.000Z | defects4cpp/d++.py | HansolChoe/defects4cpp | cb9e3db239c50e6ec38127cec117865f0ee7a5cf | [
"MIT"
] | 34 | 2021-05-27T01:09:04.000Z | 2022-03-28T07:53:35.000Z | defects4cpp/d++.py | HansolChoe/defects4cpp | cb9e3db239c50e6ec38127cec117865f0ee7a5cf | [
"MIT"
] | 6 | 2021-09-03T07:16:56.000Z | 2022-03-29T07:30:35.000Z | import sys
from time import perf_counter
from command import CommandList
from errors import DppArgparseError, DppDockerError, DppError
from message import message
if __name__ == "__main__":
from multiprocessing import freeze_support
freeze_support()
main()
| 24.425926 | 79 | 0.639121 |
e109e7b0486674fec7a7133e0f5ef96b64e2f7e2 | 9,962 | py | Python | wz/ui/choice_grid.py | gradgrind/WZ | 672d93a3c9d7806194d16d6d5b9175e4046bd068 | [
"Apache-2.0"
] | null | null | null | wz/ui/choice_grid.py | gradgrind/WZ | 672d93a3c9d7806194d16d6d5b9175e4046bd068 | [
"Apache-2.0"
] | null | null | null | wz/ui/choice_grid.py | gradgrind/WZ | 672d93a3c9d7806194d16d6d5b9175e4046bd068 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
ui/choice_grid.py
Last updated: 2021-05-04
Manage the grid for the puil-subject-choice-editor.
=+LICENCE=============================
Copyright 2021 Michael Towers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=-LICENCE========================================
"""
### Display texts
_PUPIL = "Schler"
_GROUPS = "Gruppen"
## Measurements are in mm ##
_SEP_SIZE = 1
_HEIGHT_LINE = 6
_WIDTH_TOGGLE = 8
COLUMNS = (35, 15, 15, _SEP_SIZE) # + ...
ROWS = (
#title
12,
# info rows
_HEIGHT_LINE, _HEIGHT_LINE, _HEIGHT_LINE, _HEIGHT_LINE,
_HEIGHT_LINE, _HEIGHT_LINE,
# header (tags)
_HEIGHT_LINE, _SEP_SIZE
) # + _HEIGHT_LINE * n
# Content of marked toggle-cells
MARK = 'X'
#####################################################
from qtpy.QtWidgets import QApplication
from qtpy.QtGui import QColor, QBrush
from qtpy.QtCore import Qt
from ui.gridbase import GridBase
| 36.490842 | 76 | 0.511443 |
e10a689e78f45e04945a350aa7275406f0c3d7c2 | 72 | py | Python | numberstest.py | dreadnaught-ETES/school | 9faa2b6379db8f819872b8597896f5291812c5d6 | [
"CC0-1.0"
] | null | null | null | numberstest.py | dreadnaught-ETES/school | 9faa2b6379db8f819872b8597896f5291812c5d6 | [
"CC0-1.0"
] | null | null | null | numberstest.py | dreadnaught-ETES/school | 9faa2b6379db8f819872b8597896f5291812c5d6 | [
"CC0-1.0"
] | null | null | null | import math
result=(math.pow(3,2)+1)*(math.fmod(16,7))/7
print(result) | 24 | 45 | 0.680556 |
e10b54355c9e418ed2013419152b910332c40ec9 | 5,585 | py | Python | EPH_CORE_SkyObjectMgr.py | polsterc16/ephem | 70ac6c079c80344b83499b96edaff57fb5881efc | [
"MIT"
] | null | null | null | EPH_CORE_SkyObjectMgr.py | polsterc16/ephem | 70ac6c079c80344b83499b96edaff57fb5881efc | [
"MIT"
] | null | null | null | EPH_CORE_SkyObjectMgr.py | polsterc16/ephem | 70ac6c079c80344b83499b96edaff57fb5881efc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 5 16:25:45 2019
@author: polsterc16
==============================================================================
LICENCE INFORMATION
==============================================================================
This Software uses Code (spg4) provided by "Brandon Rhodes" under
the "MIT License". For more Information see "licence-info.txt".
Diese Software benutzt Code (spg4), welcher von "Brandon Rhodes" unter
der "MIT License" zur Verfuegung gestellt wird. Fuer weitere Information
siehe "licence-info.txt".
==============================================================================
"""
import EPH_CORE_TimeSpaceMgr as TSMgr
import EPH_SAT_SatelliteMgr as SatMgr
import EPH_PLANET_PlanetMgr as PlanetMgr
import EPH_STAR_StarMgr as StarMgr
import EPH_MOON_MoonMgr as MoonMgr
| 34.263804 | 78 | 0.476813 |
e10cde8f79b9f8a7e8e8be18b130895124b76c09 | 3,370 | py | Python | integration-test-reports/run_reports.py | sutoiku/autostat | b0e6588e587450c4cbdb19a021d847f7571ba466 | [
"MIT"
] | null | null | null | integration-test-reports/run_reports.py | sutoiku/autostat | b0e6588e587450c4cbdb19a021d847f7571ba466 | [
"MIT"
] | 1 | 2022-03-16T19:05:46.000Z | 2022-03-16T19:05:46.000Z | integration-test-reports/run_reports.py | sutoiku/autostat | b0e6588e587450c4cbdb19a021d847f7571ba466 | [
"MIT"
] | 1 | 2021-07-14T19:37:44.000Z | 2021-07-14T19:37:44.000Z | from autostat.run_settings import RunSettings, Backend
from autostat.kernel_search import kernel_search, get_best_kernel_info
from autostat.dataset_adapters import Dataset
from autostat.utils.test_data_loader import load_test_dataset
from html_reports import Report
from markdown import markdown
import matplotlib.pyplot as plt
from datetime import datetime
import os
import time
import random
import numpy as np
print(os.getcwd())
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
report = Report()
logger = HtmlLogger(report)
matlab_data_path = "data/"
files_sorted_by_num_data_points = [
"01-airline.mat",
# "07-call-centre.mat",
# "08-radio.mat",
"04-wheat.mat",
# "02-solar.mat",
# "11-unemployment.mat",
# # "10-sulphuric.mat",
# # "09-gas-production.mat",
# "03-mauna.mat",
# # "13-wages.mat",
# # "06-internet.mat",
# "05-temperature.mat",
# "12-births.mat",
]
if __name__ == "__main__":
random.seed(1234)
np.random.seed(1234)
print("starting report")
run_settings = RunSettings(
max_search_depth=2,
expand_kernel_specs_as_sums=False,
num_cpus=12,
use_gpu=False,
use_parallel=True,
gpu_memory_share_needed=0.45,
backend=Backend.SKLEARN,
).replace_base_kernels_by_names(["PER", "LIN", "RBF"])
logger.print(str(run_settings))
logger.print("\n" + str(run_settings.asdict()))
prediction_scores = []
for file_name in files_sorted_by_num_data_points:
file_num = int(file_name[:2])
dataset = load_test_dataset(matlab_data_path, file_num, split=0.1)
run_settings = run_settings.replace_kernel_proto_constraints_using_dataset(
dataset
)
title_separator(f"Dataset: {file_name}")
tic = time.perf_counter()
kernel_scores = kernel_search(dataset, run_settings=run_settings, logger=logger)
toc = time.perf_counter()
best_kernel_info = get_best_kernel_info(kernel_scores)
prediction_scores.append(best_kernel_info.prediction_score)
logger.print(f"best_kernel_info {str(best_kernel_info)}")
logger.print(f"Total time for {file_name}: {toc-tic:.3f} s")
logger.prepend(f"prediction_scores: {str(prediction_scores)}")
logger.prepend(f"sum prediction_scores: {str(sum(prediction_scores))}")
report.write_report(filename=f"reports/report_{timestamp()}.html")
print("report done")
| 26.124031 | 88 | 0.651929 |
e10f737d8a704aff53053429254515a89ebf061b | 424 | py | Python | backend/apps/lyusers/urls.py | lybbn/django-vue3-lyadmin | df8ed48971eb3e3da977e1fd0467b1230b56afe4 | [
"MIT"
] | 1 | 2022-03-01T07:20:36.000Z | 2022-03-01T07:20:36.000Z | backend/apps/lyusers/urls.py | lybbn/django-vue3-lyadmin | df8ed48971eb3e3da977e1fd0467b1230b56afe4 | [
"MIT"
] | null | null | null | backend/apps/lyusers/urls.py | lybbn/django-vue3-lyadmin | df8ed48971eb3e3da977e1fd0467b1230b56afe4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@Remark:
"""
from django.urls import path, re_path
from rest_framework import routers
from apps.lyusers.views import UserManageViewSet
system_url = routers.SimpleRouter()
system_url.register(r'users', UserManageViewSet)
urlpatterns = [
re_path('users/disableuser/(?P<pk>.*?)/',UserManageViewSet.as_view({'put':'disableuser'}), name=''),
]
urlpatterns += system_url.urls | 21.2 | 110 | 0.731132 |
e1124f5104c7b2ddd81c1b4c389bcffa152ee3a4 | 44,393 | py | Python | srt_gc_launchGui.py | OrigamiAztec/LaunchGUITesting | e097afb075b313e13550937f450adf6653f88812 | [
"MIT"
] | null | null | null | srt_gc_launchGui.py | OrigamiAztec/LaunchGUITesting | e097afb075b313e13550937f450adf6653f88812 | [
"MIT"
] | null | null | null | srt_gc_launchGui.py | OrigamiAztec/LaunchGUITesting | e097afb075b313e13550937f450adf6653f88812 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Texas A&M University Sounding Rocketry Team
SRT-6 | 2018-2019
SRT-9 | 2021-2022
%-------------------------------------------------------------%
TAMU SRT
_____ __ _____ __ __
/ ___/______ __ _____ ___/ / / ___/__ ___ / /________ / /
/ (_ / __/ _ \/ // / _ \/ _ / / /__/ _ \/ _ \/ __/ __/ _ \/ /
\___/_/ \___/\_,_/_//_/\_,_/ \___/\___/_//_/\__/_/ \___/_/
%-------------------------------------------------------------%
Filepath:
gc/srt_gc_launchGui/srt_gc_launchGui.py
Developers:
(C) Doddanavar, Roshan 20171216
(L) Doddanavar, Roshan 20180801
Diaz, Antonio
Description:
Launch Control GUI, interfaces w/ srt_gc_launchArduino/srt_gc_launchArduino.ino
Input(s):
<None>
Output(s):
./log/*.log plain-text command log
./dat/*.dat plain-text data archive
'''
# Installed modules --> Utilities
import sys
import os
import serial, serial.tools.list_ports
from serial.serialutil import SerialException
import time
from datetime import datetime
import numpy as np
# Installed modules --> PyQt related
from PyQt5 import (QtGui, QtCore, QtSvg)
from PyQt5.QtCore import (Qt, QThread, pyqtSignal, QDate, QTime, QDateTime, QSize)
from PyQt5.QtWidgets import (QMainWindow, QWidget, QDesktopWidget, QPushButton, QApplication, QGroupBox, QGridLayout, QStatusBar, QFrame, QTabWidget,QComboBox)
import pyqtgraph as pg
# Program modules
from srt_gc_launchState import State
from srt_gc_launchThread import SerThread, UptimeThread
from srt_gc_launchTools import Tools, Object
from srt_gc_launchStyle import Style, Color
from srt_gc_launchConstr import Constr
# used to monitor wifi networks.
import subprocess
# used to get date and time in clock method.
import datetime as dt
# used to connect to ethernet socket in connect method.
import socket
# data for ethernet connection to SRT6 router
# Create a TCP/IP socket for srt router
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
TCP_IP = '192.168.1.177'
TCP_PORT = 23
server_address = (TCP_IP, TCP_PORT)
if (__name__ == '__main__'):
'''
Executive Control
'''
app = QApplication(sys.argv) # Utility for window exit condition
gui = Gui() # Creates instance of "Gui" class
sys.exit(app.exec_()) # Window exit condition | 39.530721 | 193 | 0.493974 |
e11508b726f072695da36af59f196eefb588d2a7 | 1,359 | py | Python | setup.py | charettes/cricket | ed3ef911e0776e225291a370220f0d9476afdd4e | [
"BSD-3-Clause"
] | 1 | 2015-11-06T07:51:04.000Z | 2015-11-06T07:51:04.000Z | setup.py | charettes/cricket | ed3ef911e0776e225291a370220f0d9476afdd4e | [
"BSD-3-Clause"
] | null | null | null | setup.py | charettes/cricket | ed3ef911e0776e225291a370220f0d9476afdd4e | [
"BSD-3-Clause"
] | null | null | null | #/usr/bin/env python
import sys
from setuptools import setup
from cricket import VERSION
try:
readme = open('README.rst')
long_description = str(readme.read())
finally:
readme.close()
required_pkgs = [
'tkreadonly',
]
if sys.version_info < (2, 7):
required_pkgs.extend(['argparse', 'unittest2', 'pyttk'])
setup(
name='cricket',
version=VERSION,
description='A graphical tool to assist running test suites.',
long_description=long_description,
author='Russell Keith-Magee',
author_email='russell@keith-magee.com',
url='http://pybee.org/cricket',
packages=[
'cricket',
'cricket.django',
'cricket.unittest',
],
install_requires=required_pkgs,
scripts=[],
entry_points={
'console_scripts': [
'cricket-django = cricket.django.__main__:main',
'cricket-unittest = cricket.unittest.__main__:main',
]
},
license='New BSD',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Topic :: Software Development',
'Topic :: Software Development :: Testing',
'Topic :: Utilities',
],
test_suite='tests'
)
| 25.641509 | 66 | 0.61663 |
e117f67f0c631749e3bd721fb7eedb16a22bb6f3 | 2,701 | py | Python | experimentation/tools/sorald/legacy/violation_scraper_apache_commons.py | gothius/sorald | 4c8761da495e528389c033660fae1f3c3a18cac3 | [
"MIT"
] | 49 | 2020-06-04T20:30:20.000Z | 2022-03-16T01:30:20.000Z | experimentation/tools/sorald/legacy/violation_scraper_apache_commons.py | gothius/sorald | 4c8761da495e528389c033660fae1f3c3a18cac3 | [
"MIT"
] | 551 | 2020-06-02T13:33:56.000Z | 2022-03-31T15:58:17.000Z | experimentation/tools/sorald/legacy/violation_scraper_apache_commons.py | gothius/sorald | 4c8761da495e528389c033660fae1f3c3a18cac3 | [
"MIT"
] | 12 | 2020-06-04T11:39:43.000Z | 2022-03-27T20:04:30.000Z | import requests;
import json;
from collections import Counter # Counts and orders the list of violations
import sys;
from urllib.parse import quote_plus # Make sysarg url-safe
# List of Apache Commons libraries which I know can be analyzed (without crashing/failing their tests)
commonsList = ["bcel",
"beanutils",
"cli",
"codec",
"collections",
"compress",
"configuration",
"crypto",
"csv",
"daemon",
"dbcp",
"dbutils",
"exec",
"fileupload",
"geometry",
"imaging",
"io",
"jexl",
"lang",
"logging",
"math",
"net",
"ognl",
"pool",
"scxml",
"statistics",
"text",
"validator",
"vfs"];
# Number of issues per page (Max 500)
pageSize = 500;
# Fill array with SQ violations. Keep making calls until all (up to 10000 since SQ doesn't support more) issues have been caught.
# Pretty prints a list, printing every object on its own line
if __name__ == "__main__":
main();
| 28.734043 | 157 | 0.640133 |
e11a8e425c834148530d1f4e74a6a8f4d690673a | 146 | py | Python | Curso Python/ex009.py | sandro-fidelis/Cursos | cee1960181b1309be93034694cab8cf2878e2194 | [
"MIT"
] | null | null | null | Curso Python/ex009.py | sandro-fidelis/Cursos | cee1960181b1309be93034694cab8cf2878e2194 | [
"MIT"
] | null | null | null | Curso Python/ex009.py | sandro-fidelis/Cursos | cee1960181b1309be93034694cab8cf2878e2194 | [
"MIT"
] | null | null | null | n = int(input('Qual tabuada deseja ver: '))
c=1
print(11*'=')
while c <= 10:
print('{} x {:2} = {}'.format(n,c,c*n))
c += 1
print(11*'=')
| 18.25 | 43 | 0.493151 |
e11b19ef6b4d98bab620857b523abf42ea96c9a9 | 8,782 | py | Python | train.py | genisplaja/tf-diffwave | 32b0b403e7ca157f015f9af9f7dcdfa79e312a6a | [
"MIT"
] | 23 | 2020-09-29T08:38:09.000Z | 2022-03-16T03:00:44.000Z | train.py | genisplaja/tf-diffwave | 32b0b403e7ca157f015f9af9f7dcdfa79e312a6a | [
"MIT"
] | 1 | 2020-10-03T08:36:48.000Z | 2020-10-03T08:36:48.000Z | train.py | genisplaja/tf-diffwave | 32b0b403e7ca157f015f9af9f7dcdfa79e312a6a | [
"MIT"
] | 7 | 2020-09-29T19:11:53.000Z | 2022-01-06T14:29:21.000Z | import argparse
import json
import os
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tqdm
from config import Config
from dataset import LJSpeech
from model import DiffWave
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', default=None)
parser.add_argument('--load-step', default=0, type=int)
parser.add_argument('--ir-unit', default=10, type=int)
parser.add_argument('--data-dir', default=None)
parser.add_argument('--download', default=False, action='store_true')
parser.add_argument('--from-raw', default=False, action='store_true')
args = parser.parse_args()
config = Config()
if args.config is not None:
print('[*] load config: ' + args.config)
with open(args.config) as f:
config = Config.load(json.load(f))
log_path = os.path.join(config.train.log, config.train.name)
if not os.path.exists(log_path):
os.makedirs(log_path)
ckpt_path = os.path.join(config.train.ckpt, config.train.name)
if not os.path.exists(ckpt_path):
os.makedirs(ckpt_path)
lj = LJSpeech(config.data, args.data_dir, args.download, not args.from_raw)
diffwave = DiffWave(config.model)
trainer = Trainer(diffwave, lj, config)
if args.load_step > 0:
super_path = os.path.join(config.train.ckpt, config.train.name)
ckpt_path = '{}_{}.ckpt'.format(config.train.name, args.load_step)
ckpt_path = next(
name for name in os.listdir(super_path)
if name.startswith(ckpt_path) and name.endswith('.index'))
ckpt_path = os.path.join(super_path, ckpt_path[:-6])
print('[*] load checkpoint: ' + ckpt_path)
trainer.model.restore(ckpt_path, trainer.optim)
with open(os.path.join(config.train.ckpt, config.train.name + '.json'), 'w') as f:
json.dump(config.dump(), f)
trainer.train(args.load_step, args.ir_unit)
| 37.370213 | 86 | 0.535186 |
e11c13c2da24636a124e7f9a0bd4c8ced1cf20aa | 1,122 | py | Python | src/foxdot/research/boudoir/180617_1936_chuchotement_pantophobique.py | Neko250/aisthesis | 1d4a2c3070d10596c28b25ea2170523583e7eff0 | [
"Apache-2.0"
] | 4 | 2018-06-29T18:39:34.000Z | 2021-06-20T16:44:29.000Z | src/foxdot/research/boudoir/180617_1936_chuchotement_pantophobique.py | Neko250/aisthesis | 1d4a2c3070d10596c28b25ea2170523583e7eff0 | [
"Apache-2.0"
] | null | null | null | src/foxdot/research/boudoir/180617_1936_chuchotement_pantophobique.py | Neko250/aisthesis | 1d4a2c3070d10596c28b25ea2170523583e7eff0 | [
"Apache-2.0"
] | null | null | null | # boudoir - chuchotement pantophobique
# https://www.youtube.com/watch?v=KL2zW6Q5hWs
# https://gist.github.com/jf-parent/c8ea7e54e30593af01512f4e21b54670
Scale.default = Scale.major
Root.default = 0
Clock.bpm = 120
b1.reset() >> glass(
[0],
dur = 16,
).after(16, 'stop')
Clock.set_time(0)
Clock.future(0, play1)
Clock.future(30, play2)
Clock.future(60, play3)
Clock.future(120, play3)
Clock.future(240, play3)
| 20.4 | 68 | 0.496435 |
e1215b8a95ad1e693c4f500b1993173740393e02 | 14,101 | py | Python | cogs/fun.py | Der-Eddy/discord_bot | bc2511e6d030ee2e099410bd846ea871fe3f109d | [
"MIT"
] | 122 | 2016-08-05T02:27:31.000Z | 2022-03-21T07:53:10.000Z | cogs/fun.py | Der-Eddy/discord_bot | bc2511e6d030ee2e099410bd846ea871fe3f109d | [
"MIT"
] | 15 | 2017-12-07T14:28:20.000Z | 2021-11-19T13:03:37.000Z | cogs/fun.py | Der-Eddy/discord_bot | bc2511e6d030ee2e099410bd846ea871fe3f109d | [
"MIT"
] | 100 | 2016-08-21T18:12:29.000Z | 2022-02-19T11:21:23.000Z | import random
import urllib.parse
import sqlite3
import asyncio
import aiohttp
import discord
from discord.ext import commands
import loadconfig
| 55.956349 | 226 | 0.582654 |
e121641fdd16503ebb092e218a41471693799a5f | 3,362 | py | Python | src/service/plugins/ssrs/ssr.py | awesome-archive/ssrs | 29c6e02d08270b3d9ca2174f29d4d32733acfdb6 | [
"Apache-2.0"
] | 32 | 2018-05-09T06:08:34.000Z | 2022-02-18T14:21:23.000Z | src/service/plugins/ssrs/ssr.py | awesome-archive/ssrs | 29c6e02d08270b3d9ca2174f29d4d32733acfdb6 | [
"Apache-2.0"
] | 1 | 2019-08-08T07:24:31.000Z | 2019-08-08T07:24:31.000Z | src/service/plugins/ssrs/ssr.py | awesome-archive/ssrs | 29c6e02d08270b3d9ca2174f29d4d32733acfdb6 | [
"Apache-2.0"
] | 19 | 2018-08-02T08:11:05.000Z | 2021-07-07T02:10:18.000Z | # !/usr/bin/env python
# -*- coding: utf-8 -*-
import base64
import json
import copy
import socket
import subprocess
import six
| 34.659794 | 118 | 0.496133 |
e122eb0c0e3191c6ed28f670de3cb045fb8a32e8 | 1,866 | py | Python | asconnect/models/beta_detail.py | guojiubo/asconnect | 1c725dc2036f0617854f19b9a310a91c42239c72 | [
"MIT"
] | 14 | 2020-09-30T14:45:38.000Z | 2022-03-04T09:49:26.000Z | asconnect/models/beta_detail.py | guojiubo/asconnect | 1c725dc2036f0617854f19b9a310a91c42239c72 | [
"MIT"
] | 8 | 2020-09-30T14:50:18.000Z | 2022-01-25T06:18:20.000Z | asconnect/models/beta_detail.py | guojiubo/asconnect | 1c725dc2036f0617854f19b9a310a91c42239c72 | [
"MIT"
] | 7 | 2020-10-09T18:06:18.000Z | 2022-01-25T05:21:12.000Z | """Build beta detail models for the API"""
import enum
from typing import Dict, Optional
import deserialize
from asconnect.models.common import BaseAttributes, Links, Relationship, Resource
| 32.172414 | 81 | 0.758307 |
e126ebf5b69520889633dea016ffe4b49b9b61da | 922 | py | Python | code2.py | cskurdal/VRSurfing | 6d3dae816a59b5949cac29d60b05ed75616c97f9 | [
"MIT"
] | null | null | null | code2.py | cskurdal/VRSurfing | 6d3dae816a59b5949cac29d60b05ed75616c97f9 | [
"MIT"
] | null | null | null | code2.py | cskurdal/VRSurfing | 6d3dae816a59b5949cac29d60b05ed75616c97f9 | [
"MIT"
] | null | null | null | import math
from plotter import Plotter
from plots import LinePlot
import board
import digitalio
import busio
import adafruit_sdcard
import storage
from adafruit_bitmapsaver import save_pixels
plot()
#save()
print('done')
#import jax.numpy as np
#
#def periodic_spikes(firing_periods, duration: int):
# return 0 == (1 + np.arange(duration))[:, None] % firing_periods
#
#
#periodic_spikes(5, 22)
| 23.641026 | 71 | 0.659436 |
e1284d4bbaf6bf582868bfb66265b4397932b66a | 385 | py | Python | scripts/compile-tests.py | PENGUINLIONG/liella | d0d4bc3e05419705712384b15d1c5db00ee12f73 | [
"Apache-2.0",
"MIT"
] | null | null | null | scripts/compile-tests.py | PENGUINLIONG/liella | d0d4bc3e05419705712384b15d1c5db00ee12f73 | [
"Apache-2.0",
"MIT"
] | null | null | null | scripts/compile-tests.py | PENGUINLIONG/liella | d0d4bc3e05419705712384b15d1c5db00ee12f73 | [
"Apache-2.0",
"MIT"
] | null | null | null | from os import listdir
import subprocess
for f in listdir("tests/vulkan"):
if f.endswith(".spv"):
continue
print(f"-- compiling test {f}")
p = subprocess.run(["glslangValidator", f"tests/vulkan/{f}", "-H", "-o", f"tests/vulkan/{f}.spv"], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.returncode != 0:
print(bytes.decode(p.stdout))
| 29.615385 | 162 | 0.644156 |
e1297aff417ed953bdd6f0365aac41a401e15814 | 5,769 | py | Python | collections/nemo_asr/nemo_asr/parts/dataset.py | petermartigny/NeMo | b20821e637314940e36b63d32c601c43d1b74051 | [
"Apache-2.0"
] | 1 | 2020-03-22T11:23:11.000Z | 2020-03-22T11:23:11.000Z | collections/nemo_asr/nemo_asr/parts/dataset.py | petermartigny/NeMo | b20821e637314940e36b63d32c601c43d1b74051 | [
"Apache-2.0"
] | null | null | null | collections/nemo_asr/nemo_asr/parts/dataset.py | petermartigny/NeMo | b20821e637314940e36b63d32c601c43d1b74051 | [
"Apache-2.0"
] | 1 | 2020-08-25T06:43:34.000Z | 2020-08-25T06:43:34.000Z | # Taken straight from Patter https://github.com/ryanleary/patter
# TODO: review, and copyright and fix/add comments
import torch
from torch.utils.data import Dataset
from .manifest import Manifest
def audio_seq_collate_fn(batch):
"""
collate a batch (iterable of (sample tensor, label tensor) tuples) into
properly shaped data tensors
:param batch:
:return: inputs (batch_size, num_features, seq_length), targets,
input_lengths, target_sizes
"""
# sort batch by descending sequence length (for packed sequences later)
batch.sort(key=lambda x: -x[0].size(0))
minibatch_size = len(batch)
# init tensors we need to return
inputs = torch.zeros(minibatch_size, batch[0][0].size(0))
input_lengths = torch.zeros(minibatch_size, dtype=torch.long)
target_sizes = torch.zeros(minibatch_size, dtype=torch.long)
targets = []
metadata = []
# iterate over minibatch to fill in tensors appropriately
for i, sample in enumerate(batch):
input_lengths[i] = sample[0].size(0)
inputs[i].narrow(0, 0, sample[0].size(0)).copy_(sample[0])
target_sizes[i] = len(sample[1])
targets.extend(sample[1])
metadata.append(sample[2])
targets = torch.tensor(targets, dtype=torch.long)
return inputs, targets, input_lengths, target_sizes, metadata
| 39.244898 | 78 | 0.600277 |
e129ccfbd3be47531b273fb3289a20523a49c675 | 5,277 | py | Python | HandSComp.py | CRZaug/NonlinearWaves | 2adfc2cc5e0c18576c6b73420a913ef1ce23000d | [
"MIT"
] | null | null | null | HandSComp.py | CRZaug/NonlinearWaves | 2adfc2cc5e0c18576c6b73420a913ef1ce23000d | [
"MIT"
] | null | null | null | HandSComp.py | CRZaug/NonlinearWaves | 2adfc2cc5e0c18576c6b73420a913ef1ce23000d | [
"MIT"
] | null | null | null | """
~~~ IMPORT EXPERIMENTAL DATA, PROCESS, AND NONDIMENSIONALIZE ~~~
This code reads in the rescaled Snodgrass data and compares parameters
to known parameters found in the Henderson and Segur paper.
1. Get distances
2. Read in the gauge data for each event (get frequencies and Fourier magnitudes)
3. Adjust the y axis units
4. Get the k vector using integer division and clean up
5. Get the carrier wave location (requires some restricting)
6. Factor out carrier wave
7. Get the energies at each gauge
8. Get nondimensionalization constants
"""
import numpy as np
import os
import glob
import matplotlib.pyplot as plt
from numpy.fft import fft, ifft
import NLS
import random as rand
from scipy import interpolate
### STEP 1: Get distance information
distv = np.array([0.0,2400000.0,4200000.0,8700000.0]) # Distances between gauges in METERS
### STEP 2: Read in information at each gauge for each event
subdirs = ['Aug1Data','Aug2Data','JulyData']
# Define something that will list directories that are not hidden
# Read in the data
j = 0
for sd in subdirs:
files = listdirNH(sd+'/Rescaled')
# Initialize some values
n = 0
pi =0
fig1,ax1 = plt.subplots(4,1)
plt.suptitle(sd)
# Get files
Deltavals = []
for f in files:
datavals = np.transpose(np.loadtxt(f).view(float))
N = len(datavals[1])
x = datavals[0] # Frequencies
sly = datavals[1] # Magnitudes
#ly = np.sqrt(sly*x)*0.01 #MULTIPLY VERSION (get the amplitude in meters)
mns = []
for w in range(N-1):
mns.append(np.abs(x[w+1]-x[w]))
#mns.append(np.mean(mns))
### STEP 3: Adjust the y axis units
ly = np.sqrt(sly*np.mean(mns))*0.01 # INTEGRATED VERSION
### STEP 4: Get the k vector using integer division and clean up
L = 3600*3 # The period
k = (x*0.001)//(2*np.pi/L) # Convert to mHz, then divide by 2pi/L to get the k vector
# REMOVE DUPLICATE VALUES
ndk = np.array(())
for fi in range(len(k)):
num = k[fi]
if num not in ndk:
ndk = np.append(ndk,num)
lll =[]
for h in ndk:
l1=np.where(k==h)[0]
lll.append(l1)
ndy = np.array(())
for ar in lll:
val = np.mean(ly[ar])
ndy=np.append(ndy,val)
### STEP 5: Get the location of the carrier wave (defined by the first gauge)
if n == 0:
m = max(ndy)
i = np.where(ndy == m)
if len(i[0]) > 1:
newi = i[0][len(i[0])//2]
carriermode = np.array(newi)
carrierloc = ndk[carriermode]
else:
newi = i[0][0]
carriermode = np.array(newi)
carrierloc = ndk[carriermode]
# First, find the carrier mode in ANY file, not just the first one
loc = np.where(np.logical_and(ndk>carrierloc*0.99, ndk<carrierloc*1.001))
#loc = np.where(np.logical_and(ndk>carrierloc-1, ndk<carrierloc+1))
# Be a little more restrictive
if len(loc[0])>1:
loc = loc[0][0]
else:
loc = loc[0][0]
### STEP 6: Redefine the k vector so that the carrier mode is at 0 (factor it out)
knew = ndk-ndk[loc]
xnew = x-x[loc]
### STEP 7: Get the "Energy" integrals
fnc = interpolate.interp1d(x, sly,kind ='cubic')
longx = np.linspace(x[0],x[-1],1000)
newy = fnc(longx)
A0 = np.sqrt(2*NLS.trapezoid(newy,(x[-1]-x[0])))*0.01
figg,axx = plt.subplots()
axx.plot(x,sly,'.',markersize=7)
axx.plot(longx,newy)
plt.show()
M000 = NLS.trapezoid(newy[np.where(np.logical_and(longx>41.2,longx<75.6))],(74.6-41.2))
Deltavals.append(M000)
### STEP 8: Get nondimensionalization constants
g = 9.81 #(m/s^2)
if n==0:
w0 = (2*np.pi)**2/L*ndk[loc] # Get the value from the integer
k0 = w0**2/g # The carrier wavenumber
m = max(ndy)
epsilon = 2*m*k0 # The nondimensionalization constant epsilon
heps = A0*k0
print(f,'Special Values')
print('2A0',A0)
print('Maximum value',m)
print('Carrier frequency',w0)
print('Wavenumber',k0)
print('MY epsilon',epsilon)
print('HENDERSON EPSILON', heps)
print('period',L)
n = n+1
M0 = Deltavals[0]
MX = Deltavals/M0
energyy = np.log(MX)
# Get the fit and define a new y vector
A = np.vstack([distv, np.ones(len(distv))]).T
m, b = np.linalg.lstsq(A, energyy,rcond=-1)[0] # m is delta
hdeltab = -m
hdeltas = hdeltab/(2*heps**2*k0)
xplot = np.linspace(distv[0],distv[-1],100)
newy = m*xplot+b
print('HENDERSON BIG Delta ',hdeltab, 'b ', b)
print('HENDERSON LITTLE delta', hdeltas)
print()
| 28.370968 | 95 | 0.548607 |
e12ad429759f61a8d7e2d053224398fdfc9dad67 | 19 | py | Python | pkgs/conf-pkg/src/genie/libs/conf/rip/__init__.py | miott/genielibs | 6464642cdd67aa2367bdbb12561af4bb060e5e62 | [
"Apache-2.0"
] | 94 | 2018-04-30T20:29:15.000Z | 2022-03-29T13:40:31.000Z | pkgs/conf-pkg/src/genie/libs/conf/rip/__init__.py | miott/genielibs | 6464642cdd67aa2367bdbb12561af4bb060e5e62 | [
"Apache-2.0"
] | 67 | 2018-12-06T21:08:09.000Z | 2022-03-29T18:00:46.000Z | pkgs/conf-pkg/src/genie/libs/conf/rip/__init__.py | miott/genielibs | 6464642cdd67aa2367bdbb12561af4bb060e5e62 | [
"Apache-2.0"
] | 49 | 2018-06-29T18:59:03.000Z | 2022-03-10T02:07:59.000Z | from .rip import *
| 9.5 | 18 | 0.684211 |
e12b12da65a67d3755d54e62e2738980186e27db | 15,542 | py | Python | src/services/explorer/core.py | solomonricky/epic-awesome-gamer | a6ecff90a716bb145931bb4042f9510e68698694 | [
"Apache-2.0"
] | null | null | null | src/services/explorer/core.py | solomonricky/epic-awesome-gamer | a6ecff90a716bb145931bb4042f9510e68698694 | [
"Apache-2.0"
] | null | null | null | src/services/explorer/core.py | solomonricky/epic-awesome-gamer | a6ecff90a716bb145931bb4042f9510e68698694 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Time : 2022/1/17 15:20
# Author : QIN2DIM
# Github : https://github.com/QIN2DIM
# Description:
import os.path
import time
from hashlib import sha256
from typing import List, Optional, Union, Dict
import cloudscraper
import yaml
from lxml import etree # skipcq: BAN-B410 - Ignore credible sources
from selenium.common.exceptions import WebDriverException, InvalidCookieDomainException
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from services.settings import DIR_EXPLORER, EPIC_EMAIL
from services.settings import logger
from services.utils import ToolBox, ChallengerContext, StandardContext
from .exceptions import DiscoveryTimeoutException, ProtocolOutdatedWarning
| 33.786957 | 140 | 0.559452 |
e12b30211ce2a1a3e4ccda61c62066c6b101ba25 | 7,312 | py | Python | model/utils.py | Tiamat-Tech/VAENAR-TTS | 69b6b5be1ab5168cfd3c6ab902075638e76a3b8d | [
"MIT"
] | 62 | 2021-07-15T10:09:56.000Z | 2022-03-31T02:53:09.000Z | model/utils.py | Tiamat-Tech/VAENAR-TTS | 69b6b5be1ab5168cfd3c6ab902075638e76a3b8d | [
"MIT"
] | 3 | 2021-07-19T14:45:26.000Z | 2022-03-31T02:38:57.000Z | model/utils.py | Tiamat-Tech/VAENAR-TTS | 69b6b5be1ab5168cfd3c6ab902075638e76a3b8d | [
"MIT"
] | 10 | 2021-07-19T03:20:44.000Z | 2022-02-21T07:07:38.000Z | import torch
import torch.nn as nn
from torch.nn import functional as F
| 37.88601 | 112 | 0.583425 |
e12bf8233ff1f13a2dd46e4e371f37801c0e563f | 2,186 | py | Python | fedjax/legacy/core/federated_algorithm.py | alshedivat/fedjax | ff46ba9955f167160353d7be72f6f5e1febee32c | [
"Apache-2.0"
] | null | null | null | fedjax/legacy/core/federated_algorithm.py | alshedivat/fedjax | ff46ba9955f167160353d7be72f6f5e1febee32c | [
"Apache-2.0"
] | null | null | null | fedjax/legacy/core/federated_algorithm.py | alshedivat/fedjax | ff46ba9955f167160353d7be72f6f5e1febee32c | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface definitions for federated algorithms."""
import abc
from typing import Generic, List, TypeVar
from fedjax.legacy.core.model import Model
from fedjax.legacy.core.typing import FederatedData
T = TypeVar('T')
| 34.15625 | 80 | 0.747027 |
0100201d7067edc12b14792aa66df0f99a8f5f65 | 2,306 | py | Python | lib/galaxy/webapps/galaxy/services/jobs.py | itisAliRH/galaxy | b3b693ea0788f773442c8481472a87f43ccb10d7 | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy/webapps/galaxy/services/jobs.py | itisAliRH/galaxy | b3b693ea0788f773442c8481472a87f43ccb10d7 | [
"CC-BY-3.0"
] | 6 | 2021-11-11T20:57:49.000Z | 2021-12-10T15:30:33.000Z | lib/galaxy/webapps/galaxy/services/jobs.py | itisAliRH/galaxy | b3b693ea0788f773442c8481472a87f43ccb10d7 | [
"CC-BY-3.0"
] | null | null | null | from enum import Enum
from typing import (
Any,
Dict,
)
from galaxy import (
exceptions,
model,
)
from galaxy.managers import hdas
from galaxy.managers.context import ProvidesUserContext
from galaxy.managers.jobs import (
JobManager,
JobSearch,
view_show_job,
)
from galaxy.schema.fields import EncodedDatabaseIdField
from galaxy.schema.schema import JobIndexQueryPayload
| 28.469136 | 98 | 0.662186 |
0101f0c173a9caa73adb1fcaf5f05657435355f6 | 1,984 | py | Python | tests/deephub/trainer/test_early_stopping.py | deeplab-ai/deephub | b1d271436fab69cdfad14f19fa2e29c5338f18d6 | [
"Apache-2.0"
] | 8 | 2019-10-17T12:46:13.000Z | 2020-03-12T08:09:40.000Z | tests/deephub/trainer/test_early_stopping.py | deeplab-ai/deephub | b1d271436fab69cdfad14f19fa2e29c5338f18d6 | [
"Apache-2.0"
] | 12 | 2019-10-22T13:11:56.000Z | 2022-02-10T00:23:30.000Z | tests/deephub/trainer/test_early_stopping.py | deeplab-ai/deephub | b1d271436fab69cdfad14f19fa2e29c5338f18d6 | [
"Apache-2.0"
] | 1 | 2019-10-17T13:21:27.000Z | 2019-10-17T13:21:27.000Z | import pytest
import numpy as np
from deephub.models.registry.toy import DebugToyModel
from deephub.models.feeders import MemorySamplesFeeder
from deephub.trainer import Trainer
| 36.072727 | 107 | 0.594758 |
0102028974c26fedb9d3e8e681861c033e610fbc | 2,157 | py | Python | tests/switchconfig/conftest.py | utsc-networking/utsc-tools | d5bc10cf825f1be46999d5a42da62cc0df456f0c | [
"MIT"
] | null | null | null | tests/switchconfig/conftest.py | utsc-networking/utsc-tools | d5bc10cf825f1be46999d5a42da62cc0df456f0c | [
"MIT"
] | null | null | null | tests/switchconfig/conftest.py | utsc-networking/utsc-tools | d5bc10cf825f1be46999d5a42da62cc0df456f0c | [
"MIT"
] | null | null | null | from typing import TYPE_CHECKING
import pytest
from . import CapturedOutput
from utsc.switchconfig import config
from prompt_toolkit.application import create_app_session
from prompt_toolkit.input import create_pipe_input
if TYPE_CHECKING:
from .. import MockedUtil
from pytest_mock import MockerFixture
# endregion
# region failed interactive fixture experiment
# def pytest_addoption(parser):
# parser.addoption(
# "--interactive", action="store_true", default=False, help="run interactive tests"
# )
# @pytest.fixture()
# def interactive(request, capfd: 'CaptureFixture'):
# if request.config.getoption("--interactive") or os.getenv("VSCODE_DEBUGGER"):
# # here we reach directly into capsys._capture,
# # because the capsys.disabled context manager
# # does not suspend capturing of stdin.
# capmanager: 'CaptureManager' = capfd.request.config.pluginmanager.getplugin("capturemanager")
# capmanager.suspend(in_=True)
# assert capfd._capture # noqa
# capfd._capture.suspend_capturing(in_=True) # noqa
# yield
# capmanager.resume()
# capfd._capture.resume_capturing() # noqa
# else:
# pytest.skip("This test can only be run with the --interactive option")
# def pytest_collection_modifyitems(config, items):
# if config.getoption("--interactive"):
# # --interactive given in cli: do not skip interactive tests
# return
# skip_interactive = pytest.mark.skip(reason="need --interactive option to run")
# for item in items:
# if "interactive" in item.keywords and not os.getenv("VSCODE_DEBUGGER"):
# item.add_marker(skip_interactive)
# endregion
| 31.26087 | 103 | 0.696801 |
0104208de3be81be65db916a9965b3d5c0b060ef | 10,742 | py | Python | hf/protocol/frame.py | HashFast/hashfast-tools | 9617691ac997f12085b688c3ecc6746e8510976d | [
"BSD-3-Clause"
] | 1 | 2020-12-15T02:49:36.000Z | 2020-12-15T02:49:36.000Z | hf/protocol/frame.py | HashFast/hashfast-tools | 9617691ac997f12085b688c3ecc6746e8510976d | [
"BSD-3-Clause"
] | null | null | null | hf/protocol/frame.py | HashFast/hashfast-tools | 9617691ac997f12085b688c3ecc6746e8510976d | [
"BSD-3-Clause"
] | 3 | 2015-09-02T00:31:06.000Z | 2020-12-15T02:52:06.000Z | # Copyright (c) 2014, HashFast Technologies LLC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of HashFast Technologies LLC nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL HASHFAST TECHNOLOGIES LLC BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from abc import ABCMeta, abstractmethod
from ..load import crc
from ..util import with_metaclass, int_to_lebytes, lebytes_to_int
# Operation codes from hf_protocol.h.
opcodes = {
# Serial protocol operation codes (Second header byte)
'OP_NULL': 0,
'OP_ROOT': 1,
'OP_RESET': 2,
'OP_PLL_CONFIG': 3,
'OP_ADDRESS': 4,
'OP_READDRESS': 5,
'OP_HIGHEST': 6,
'OP_BAUD': 7,
'OP_UNROOT': 8,
'OP_HASH': 9,
'OP_NONCE': 10,
'OP_ABORT': 11,
'OP_STATUS': 12,
'OP_GPIO': 13,
'OP_CONFIG': 14,
'OP_STATISTICS': 15,
'OP_GROUP': 16,
'OP_CLOCKGATE': 17,
# Factory Codes
'OP_SERIAL': 50, # Serial number read/write
'OP_LIMITS': 51, # Operational limits read/write
'OP_HISTORY': 52, # Read operational history data
'OP_CHARACTERIZE': 53, # Characterize one or more die
'OP_CHAR_RESULT': 54, # Characterization result
'OP_SETTINGS': 55, # Read or write settings
'OP_FAN_SETTINGS': 56,
'OP_POWER': 57,
'OP_BAD_CORE': 58, # Set or clear bad core status
# USB interface specific operation codes
'OP_USB_INIT': 128, # Initialize USB interface details
'OP_GET_TRACE': 129, # Send back the trace buffer if present
'OP_LOOPBACK_USB': 130,
'OP_LOOPBACK_UART': 131,
'OP_DFU': 132, # Jump into the boot loader
'OP_USB_SHUTDOWN': 133, # Initialize USB interface details
'OP_DIE_STATUS': 134, # Die status. There are 4 die per ASIC
'OP_GWQ_STATUS': 135, # Global Work Queue protocol status
'OP_WORK_RESTART': 136, # Stratum work restart regime
'OP_USB_STATS1': 137, # Statistics class 1
'OP_USB_GWQSTATS': 138, # GWQ protocol statistics
'OP_USB_NOTICE': 139, # Asynchronous notification event
'OP_PING': 140, # Echo
'OP_CORE_MAP': 141, # Return core map
'OP_VERSION': 142, # Version information
'OP_FAN': 143, # Set Fan Speed
'OP_NAME': 144, # System name write/read
'OP_USB_DEBUG': 255
}
opnames = {}
for opcode_name, opcode in opcodes.items():
assert opcode not in opnames
opnames[opcode] = opcode_name
known_opcodes = set(opcodes.keys())
known_opnames = set(opnames.keys())
# Fix: Restore when using serial line directly
# crc32 = framebytes[-4:]
# if crc32 != crc.crc32_to_bytelist(crc.crc32(data)):
# raise HF_Error("Bad CRC32 checksum.")
# Fix: Document terminology: frame is the whole thing and consists of up to
# three parts: the header, the data, and the CRC32 checksum.
# Fix: Wants to verify checksums and throw exception if they are not right.
# And check for 0xaa.
# Fix: Wants to make all the fields of the header accessible, but also provide raw bytes.
# Fix: Should be able to initialize with stream of bytes or by filling in fields
# and asking for the bytes. Throw exception if field values are out of bounds.
# Fix: Maybe want something which checks for known opcode and whether fields are
# plausible for that opcode -- problem is that if we are using this to report
# what was seen on the wire, we need to make those assumptions, maybe.
# Fix: The really pure way to do this is to create a subclass for every opcode type
# and then have specific methods for that type. Probably more trouble than
# its worth, but it would also let us have specific methods for parameters
# that just occupy a couple bits. | 41.474903 | 106 | 0.67129 |
01044352dba301fc4c0e8b880755aef7cda79a1f | 561 | py | Python | page/models.py | Dynamicist-handa/EscuelaLingua | 198abfcc14204d8ecd2706f2de2650293219662e | [
"Apache-2.0"
] | null | null | null | page/models.py | Dynamicist-handa/EscuelaLingua | 198abfcc14204d8ecd2706f2de2650293219662e | [
"Apache-2.0"
] | null | null | null | page/models.py | Dynamicist-handa/EscuelaLingua | 198abfcc14204d8ecd2706f2de2650293219662e | [
"Apache-2.0"
] | null | null | null | from django.db import models
from django.conf import settings
from courses.models import Course
# Create your models here.
| 26.714286 | 69 | 0.741533 |
010633c8fe4a1f8f50f2cbc160f034fdd91b60e5 | 11,162 | py | Python | src/command.py | 2minchul/chip-helper | 437d33938a19bab7e7380ff9dd0e7e98ec26fdb7 | [
"Apache-2.0"
] | 2 | 2020-05-12T06:11:39.000Z | 2020-07-17T10:45:20.000Z | src/command.py | 2minchul/chip-helper | 437d33938a19bab7e7380ff9dd0e7e98ec26fdb7 | [
"Apache-2.0"
] | 3 | 2021-06-08T21:30:59.000Z | 2022-03-12T00:28:26.000Z | src/command.py | 2minchul/chip-helper | 437d33938a19bab7e7380ff9dd0e7e98ec26fdb7 | [
"Apache-2.0"
] | null | null | null | import argparse
import os
import re
import sys
from operator import itemgetter
from typing import Optional
import sentry_sdk
import youtube_dl
from selenium.common.exceptions import SessionNotCreatedException
from cmd_tool import (
get_execution_path,
exit_enter,
get_input_path_or_exit,
get_chrome_driver_path_or_exit,
get_resource_path,
cd
)
from imagetools import Size
from qrcode import NaverQrCode, make_qr_image, make_redirect_html
from thumbnail import composite_thumbnail, capture_video
from youtube_uploader import YoutubeUploader, YoutubeUploaderException
sentry_sdk.init("https://1ff694f9169a4fa383a867fe10ed9329@o342398.ingest.sentry.io/5243685")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Chip Helper')
subparsers = parser.add_subparsers(help='commands', dest='command', required=True)
subparsers.add_parser('makedirs', help='Create dirs like "nnnn" format in a specific path')
subparsers.add_parser('organize', help='Create numeric dirs and move video files in it')
subparsers.add_parser('thumbnail', help='Create thumbnails')
subparsers.add_parser('upload', help='Upload videos to youtube')
subparsers.add_parser('youtube-url', help='Make youtube_url.txt in input dirs')
subparsers.add_parser('qrcode', help='Generate Naver QR and composite qr image')
args = parser.parse_args()
func = {
'makedirs': make_dirs,
'thumbnail': make_thumbnail,
'upload': upload_videos,
'youtube-url': update_youtube_urls,
'qrcode': qrcode,
'organize': organize,
}.get(args.command)
func()
print(' .')
exit_enter()
| 34.450617 | 110 | 0.610554 |
0108aa0614cb046c0695b8425a9b7d179e4c447f | 1,338 | py | Python | code/get.py | tionn/holo-at-on | 8bda6e73d94184fa6fde3c1d26640e96341ae2a2 | [
"CC0-1.0"
] | null | null | null | code/get.py | tionn/holo-at-on | 8bda6e73d94184fa6fde3c1d26640e96341ae2a2 | [
"CC0-1.0"
] | null | null | null | code/get.py | tionn/holo-at-on | 8bda6e73d94184fa6fde3c1d26640e96341ae2a2 | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import io
import urllib2
import string
from BeautifulSoup import BeautifulSoup
import pandas as pd
import sys
city_url = 'http://twblg.dict.edu.tw/holodict_new/index/xiangzhen_level1.jsp?county=1'
if __name__=='__main__':
#
data = extract_items(city_url)
data.pop() # ignore data from ''
print 'Cities and countries are done.'
#
area_url = get_area_url()
for i in area_url:
area_data = extract_items(i)
data.extend(area_data)
print 'Townships are done.'
#df = pd.DataFrame(data, columns=['name', 'holo'])
df = pd.DataFrame(data)
df.to_csv('moe_mapping.csv', encoding='utf-8', index=False, header=0)
print 'csv file done.' | 24.777778 | 89 | 0.672646 |
01092c860365112e2ab6bab4644a012763fb75a9 | 3,729 | py | Python | Soccer_league_project1.py | denisela1/Soccer_League_P1 | 5bc6de71259643ed2a6d9791ddbc70773f1c259d | [
"BSD-3-Clause-Clear"
] | 1 | 2018-02-26T08:47:15.000Z | 2018-02-26T08:47:15.000Z | Soccer_league_project1.py | denisela1/Soccer_League_P1 | 5bc6de71259643ed2a6d9791ddbc70773f1c259d | [
"BSD-3-Clause-Clear"
] | null | null | null | Soccer_league_project1.py | denisela1/Soccer_League_P1 | 5bc6de71259643ed2a6d9791ddbc70773f1c259d | [
"BSD-3-Clause-Clear"
] | null | null | null | import csv
#global variables for teams:
sharks = []
dragons = []
raptors = []
#read the csv file with the player info and create a player dictionary:
#distribute kids based on experience:
#finalize teams:
#update the player dictionary to include the assigned teams:
#write the league info into the text file:
#generate letters to send the guardians:
if __name__ == "__main__":
read_players()
experienced_players()
inexperienced_players()
make_teams()
create_textfile()
final_league()
letter_generator()
| 33.594595 | 97 | 0.61196 |
010b4ad2a97b357a77ffe35ad3089e6223aec664 | 2,312 | py | Python | Gobot-Mecanum/robot.py | FRC1076/2019-Parade | 3824449ed10e33b401efb646fd2e6470c3941c8b | [
"MIT"
] | null | null | null | Gobot-Mecanum/robot.py | FRC1076/2019-Parade | 3824449ed10e33b401efb646fd2e6470c3941c8b | [
"MIT"
] | 2 | 2019-06-17T23:38:23.000Z | 2019-06-17T23:39:43.000Z | Gobot-Mecanum/robot.py | FRC1076/2019-Parade | 3824449ed10e33b401efb646fd2e6470c3941c8b | [
"MIT"
] | null | null | null | import wpilib
import wpilib.drive
import ctre
import robotmap
from wpilib.interfaces import GenericHID
RIGHT_HAND = GenericHID.Hand.kRight
LEFT_HAND = GenericHID.Hand.kLeft
if __name__ == "__main__":
wpilib.run(Robot,physics_enabled=True) | 27.2 | 86 | 0.608564 |
01122030ff57d9377ddf61352858ba09c5197d30 | 139 | py | Python | blog/urls.py | 31-13/portfolio | 86d69abc05ead28823db5def49622f04af0ebfd2 | [
"MIT"
] | null | null | null | blog/urls.py | 31-13/portfolio | 86d69abc05ead28823db5def49622f04af0ebfd2 | [
"MIT"
] | null | null | null | blog/urls.py | 31-13/portfolio | 86d69abc05ead28823db5def49622f04af0ebfd2 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import path
from .views import blog
urlpatterns = [
path('', blog, name='blog'),
]
| 15.444444 | 32 | 0.705036 |
0112992950dc4c577579c050f7017281022ccc42 | 139 | py | Python | iris_sdk/models/maps/local_rate_center_list.py | NumberAI/python-bandwidth-iris | 0e05f79d68b244812afb97e00fd65b3f46d00aa3 | [
"MIT"
] | 2 | 2020-04-13T13:47:59.000Z | 2022-02-23T20:32:41.000Z | iris_sdk/models/maps/local_rate_center_list.py | bandwidthcom/python-bandwidth-iris | dbcb30569631395041b92917252d913166f7d3c9 | [
"MIT"
] | 5 | 2020-09-18T20:59:24.000Z | 2021-08-25T16:51:42.000Z | iris_sdk/models/maps/local_rate_center_list.py | bandwidthcom/python-bandwidth-iris | dbcb30569631395041b92917252d913166f7d3c9 | [
"MIT"
] | 5 | 2018-12-12T14:39:50.000Z | 2020-11-17T21:42:29.000Z | #!/usr/bin/env python
from iris_sdk.models.maps.base_map import BaseMap | 19.857143 | 49 | 0.791367 |
01157eaf40b4347f7763196480bf6b81341c469b | 5,374 | py | Python | webapp/services/hexun_service.py | myfreshcity/mystock | 3a8832e8c498128683b6af528da92d7fda32386d | [
"MIT"
] | 2 | 2016-09-19T09:18:17.000Z | 2022-02-16T14:55:51.000Z | webapp/services/hexun_service.py | myfreshcity/mystock | 3a8832e8c498128683b6af528da92d7fda32386d | [
"MIT"
] | 2 | 2020-04-29T13:01:45.000Z | 2020-04-29T13:01:45.000Z | webapp/services/hexun_service.py | myfreshcity/mystock | 3a8832e8c498128683b6af528da92d7fda32386d | [
"MIT"
] | 2 | 2018-06-29T15:09:36.000Z | 2019-09-05T09:26:06.000Z | import re
import traceback
import urllib2
import pandas as pd
import json,random,time,datetime
from bs4 import BeautifulSoup
from pandas.tseries.offsets import YearEnd
from sqlalchemy import text
from webapp import db, app
from webapp.models import FinanceBasic
headers = {'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}
#ttm
#,,
| 36.067114 | 128 | 0.595646 |
0118814a3663bee91c59984af98f47d72c8f9e4c | 2,555 | py | Python | machine-learning-and-ai/handwriting-classifier/neural_network_handwriting_classifier.py | fraserlove/python | b449259c02e73102e37a4cd42018dbcc6b04d0ba | [
"Apache-2.0"
] | 16 | 2020-06-11T16:54:55.000Z | 2022-01-07T01:36:05.000Z | machine-learning-and-ai/handwriting-classifier/neural_network_handwriting_classifier.py | fraserlove/python-projects | b449259c02e73102e37a4cd42018dbcc6b04d0ba | [
"Apache-2.0"
] | null | null | null | machine-learning-and-ai/handwriting-classifier/neural_network_handwriting_classifier.py | fraserlove/python-projects | b449259c02e73102e37a4cd42018dbcc6b04d0ba | [
"Apache-2.0"
] | 15 | 2020-06-14T08:29:50.000Z | 2021-08-05T17:25:42.000Z | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
# Network hyperparameters
learning_rate = 0.0001 # 1.95 for sigmoid activation function
batch_size = 10
update_step = 10
input_nodes = 784 # 28x38 images as input
layer_1_nodes = 500
layer_2_nodes = 500
layer_3_nodes = 500
output_nodes = 10
network_input = tf.placeholder(tf.float32, [None, input_nodes])
target_output = tf.placeholder(tf.float32, [None, output_nodes])
# Network model, weights and biases
layer_1 = tf.Variable(tf.random_normal([input_nodes, layer_1_nodes]))
layer_2 = tf.Variable(tf.random_normal([layer_1_nodes, layer_2_nodes]))
layer_3 = tf.Variable(tf.random_normal([layer_2_nodes, layer_3_nodes]))
output_layer = tf.Variable(tf.random_normal([layer_3_nodes, output_nodes]))
layer_1_bias = tf.Variable(tf.random_normal([layer_1_nodes]))
layer_2_bias = tf.Variable(tf.random_normal([layer_2_nodes]))
layer_3_bias = tf.Variable(tf.random_normal([layer_3_nodes]))
output_layer_bias = tf.Variable(tf.random_normal([output_nodes]))
# Feedforward calculations
layer_1_out = tf.nn.relu(tf.matmul(network_input, layer_1) + layer_1_bias)
layer_2_out = tf.nn.relu(tf.matmul(layer_1_out, layer_2) + layer_2_bias)
layer_3_out = tf.nn.relu(tf.matmul(layer_2_out, layer_3) + layer_3_bias)
network_out_1 = tf.matmul(layer_3_out, output_layer) + output_layer_bias
network_out_2 = tf.nn.softmax(network_out_1)
cost_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = network_out_1, labels = target_output))
training_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost_function)
predicitions = tf.equal(tf.argmax(network_out_2, 1), tf.argmax(target_output, 1))
accuracy = tf.reduce_mean(tf.cast(predicitions, tf.float32))
# Running the neural network
with tf.Session() as session:
session.run(tf.global_variables_initializer())
no_epochs = 10
for epoch in range(no_epochs):
total_cost = 0
no_batches = int(mnist.train.num_examples / batch_size)
for batch in range(no_batches):
input_data, labels = mnist.train.next_batch(batch_size)
step, cost = session.run([training_step, cost_function], feed_dict = {network_input: input_data, target_output: labels})
total_cost += cost
print('Epoch {} out of {} completed, loss: {}'.format(epoch, no_epochs, total_cost))
print('Accuracy: {}'.format(accuracy.eval({network_input: mnist.test.images, target_output: mnist.test.labels})))
| 46.454545 | 132 | 0.768689 |