hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4125c4ef4416a704e2a8626b154e255f03a002cf
| 433
|
py
|
Python
|
rampwf/utils/tests/test_sanitize.py
|
DimitriPapadopoulos/ramp-workflow
|
c235e80b81fc8d8a5e0c175df50a55cc58dd78aa
|
[
"BSD-3-Clause"
] | 66
|
2017-08-31T08:48:45.000Z
|
2022-03-21T16:05:31.000Z
|
rampwf/utils/tests/test_sanitize.py
|
DimitriPapadopoulos/ramp-workflow
|
c235e80b81fc8d8a5e0c175df50a55cc58dd78aa
|
[
"BSD-3-Clause"
] | 265
|
2017-06-02T19:22:38.000Z
|
2022-03-31T13:08:00.000Z
|
rampwf/utils/tests/test_sanitize.py
|
DimitriPapadopoulos/ramp-workflow
|
c235e80b81fc8d8a5e0c175df50a55cc58dd78aa
|
[
"BSD-3-Clause"
] | 44
|
2017-06-03T15:35:58.000Z
|
2022-03-31T12:46:42.000Z
|
import pytest
from rampwf.utils.sanitize import _sanitize_input
| 27.0625
| 60
| 0.709007
|
4127cc56a9b643adacb0505a74b957d4c74ed758
| 118
|
py
|
Python
|
issues/apps.py
|
6aika/o3-6a-kkhprp
|
de0373733a0f4a936a86f6a19b28ca2e577beb71
|
[
"MIT"
] | 6
|
2016-07-08T08:50:51.000Z
|
2018-06-06T09:58:43.000Z
|
issues/apps.py
|
6aika/issue-reporting
|
de0373733a0f4a936a86f6a19b28ca2e577beb71
|
[
"MIT"
] | 50
|
2016-04-19T12:22:08.000Z
|
2021-09-22T17:39:33.000Z
|
issues/apps.py
|
6aika/o3-6a-kkhprp
|
de0373733a0f4a936a86f6a19b28ca2e577beb71
|
[
"MIT"
] | 5
|
2016-07-08T08:50:56.000Z
|
2019-07-06T11:34:42.000Z
|
from django.apps import AppConfig
| 16.857143
| 33
| 0.728814
|
412aadfb4c71d1e45e7e11134561c9b5c2fc6eda
| 2,018
|
py
|
Python
|
FileNamePurifier/FileNamePurifier.py
|
dbpiper/FileNamePurifier
|
620088ea3be1b8874609fa769cfb8e6b636d5e8b
|
[
"MIT"
] | null | null | null |
FileNamePurifier/FileNamePurifier.py
|
dbpiper/FileNamePurifier
|
620088ea3be1b8874609fa769cfb8e6b636d5e8b
|
[
"MIT"
] | null | null | null |
FileNamePurifier/FileNamePurifier.py
|
dbpiper/FileNamePurifier
|
620088ea3be1b8874609fa769cfb8e6b636d5e8b
|
[
"MIT"
] | null | null | null |
from Parser import Parser
from LexicalAnalyzer import LexicalAnalyzer
| 46.930233
| 133
| 0.693756
|
412b47d093592288c113a1eac3194f68134c0446
| 11,406
|
py
|
Python
|
data/transforms.py
|
raja21068/Federated-Learning-For-Medical-Images
|
aa30ce9d8106fd4039188fc56fa99bdc9f46f0e0
|
[
"MIT"
] | 27
|
2021-03-05T05:56:35.000Z
|
2022-03-30T03:15:43.000Z
|
data/transforms.py
|
DiahannWu/FL-MRCM
|
946c981a044452333791b7da26609c0874da292c
|
[
"MIT"
] | 8
|
2021-03-08T10:41:19.000Z
|
2021-12-30T04:53:21.000Z
|
data/transforms.py
|
DiahannWu/FL-MRCM
|
946c981a044452333791b7da26609c0874da292c
|
[
"MIT"
] | 5
|
2021-03-28T14:02:30.000Z
|
2022-01-11T08:31:42.000Z
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import torch
def to_tensor(data):
"""
Convert numpy array to PyTorch tensor. For complex arrays, the real and imaginary parts
are stacked along the last dimension.
Args:
data (np.array): Input numpy array
Returns:
torch.Tensor: PyTorch version of data
"""
if np.iscomplexobj(data):
data = np.stack((data.real, data.imag), axis=-1)
return torch.from_numpy(data)
def to_numpy(data):
"""
Convert PyTorch tensor to numpy array. For complex tensor with two channels, the complex numpy arrays are used.
Args:
data (torch.Tensor): Input torch tensor
Returns:
np.array numpy arrays
"""
if data.shape[-1] == 2:
out = np.zeros(data.shape[:-1], dtype=np.complex64)
real = data[..., 0].numpy()
imag = data[..., 1].numpy()
out.real = real
out.imag = imag
else:
out = data.numpy()
return out
def apply_mask(data, mask_func, seed=None):
"""
Subsample given k-space by multiplying with a mask.
Args:
data (torch.Tensor): The input k-space data. This should have at least 3 dimensions, where
dimensions -3 and -2 are the spatial dimensions, and the final dimension has size
2 (for complex values).
mask_func (callable): A function that takes a shape (tuple of ints) and a random
number seed and returns a mask.
seed (int or 1-d array_like, optional): Seed for the random number generator.
Returns:
(tuple): tuple containing:
masked data (torch.Tensor): Subsampled k-space data
mask (torch.Tensor): The generated mask
"""
shape = np.array(data.shape)
shape[:-3] = 1
mask = mask_func(shape, seed)
return data * mask, mask
def fft2(data, normalized=True):
"""
Apply centered 2 dimensional Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The FFT of the input.
"""
assert data.size(-1) == 2
data = ifftshift(data, dim=(-3, -2))
data = torch.fft(data, 2, normalized=normalized)
data = fftshift(data, dim=(-3, -2))
return data
def rfft2(data):
"""
Apply centered 2 dimensional Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The FFT of the input.
"""
data = ifftshift(data, dim=(-2, -1))
data = torch.rfft(data, 2, normalized=True, onesided=False)
data = fftshift(data, dim=(-3, -2))
return data
def ifft2(data, normalized=True):
"""
Apply centered 2-dimensional Inverse Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The IFFT of the input.
"""
assert data.size(-1) == 2
data = ifftshift(data, dim=(-3, -2))
data = torch.ifft(data, 2, normalized=normalized)
data = fftshift(data, dim=(-3, -2))
return data
def irfft2(data):
"""
Apply centered 2-dimensional Inverse Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The IFFT of the input.
"""
data = ifftshift(data, dim=(-3, -2))
data = torch.irfft(data, 2, normalized=True, onesided=False)
data = fftshift(data, dim=(-2, -1))
return data
def complex_to_mag_phase(data):
"""
:param data (torch.Tensor): A complex valued tensor, where the size of the third last dimension should be 2
:return: Mag and Phase (torch.Tensor): tensor of same size as input
"""
assert data.size(-3) == 2
mag = (data ** 2).sum(dim=-3).sqrt()
phase = torch.atan2(data[:, 1, :, :], data[:, 0, :, :])
return torch.stack((mag, phase), dim=-3)
def mag_phase_to_complex(data):
"""
:param data (torch.Tensor): Mag and Phase (torch.Tensor):
:return: A complex valued tensor, where the size of the third last dimension is 2
"""
assert data.size(-3) == 2
real = data[:, 0, :, :] * torch.cos(data[:, 1, :, :])
imag = data[:, 0, :, :] * torch.sin(data[:, 1, :, :])
return torch.stack((real, imag), dim=-3)
def partial_fourier(data):
"""
:param data:
:return:
"""
def complex_abs(data):
"""
Compute the absolute value of a complex valued input tensor.
Args:
data (torch.Tensor): A complex valued tensor, where the size of the final dimension
should be 2.
Returns:
torch.Tensor: Absolute value of data
"""
assert data.size(-1) == 2 or data.size(-3) == 2
return (data ** 2).sum(dim=-1).sqrt() if data.size(-1) == 2 else (data ** 2).sum(dim=-3).sqrt()
def root_sum_of_squares(data, dim=0):
"""
Compute the Root Sum of Squares (RSS) transform along a given dimension of a tensor.
Args:
data (torch.Tensor): The input tensor
dim (int): The dimensions along which to apply the RSS transform
Returns:
torch.Tensor: The RSS value
"""
return torch.sqrt((data ** 2).sum(dim))
def center_crop(data, shape):
"""
Apply a center crop to the input real image or batch of real images.
Args:
data (torch.Tensor): The input tensor to be center cropped. It should have at
least 2 dimensions and the cropping is applied along the last two dimensions.
shape (int, int): The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image
"""
assert 0 < shape[0] <= data.shape[-2]
assert 0 < shape[1] <= data.shape[-1]
w_from = (data.shape[-2] - shape[0]) // 2
h_from = (data.shape[-1] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to]
def complex_center_crop(data, shape):
"""
Apply a center crop to the input image or batch of complex images.
Args:
data (torch.Tensor): The complex input tensor to be center cropped. It should
have at least 3 dimensions and the cropping is applied along dimensions
-3 and -2 and the last dimensions should have a size of 2.
shape (int, int): The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image
"""
assert 0 < shape[0] <= data.shape[-3]
assert 0 < shape[1] <= data.shape[-2]
w_from = (data.shape[-3] - shape[0]) // 2
h_from = (data.shape[-2] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to, :]
def normalize(data, mean, stddev, eps=0.):
"""
Normalize the given tensor using:
(data - mean) / (stddev + eps)
Args:
data (torch.Tensor): Input data to be normalized
mean (float): Mean value
stddev (float): Standard deviation
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized tensor
"""
return (data - mean) / (stddev + eps)
def normalize_instance(data, eps=0.):
"""
Normalize the given tensor using:
(data - mean) / (stddev + eps)
where mean and stddev are computed from the data itself.
Args:
data (torch.Tensor): Input data to be normalized
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized tensor
"""
mean = data.mean()
std = data.std()
return normalize(data, mean, std, eps), mean, std
def normalize_volume(data, mean, std, eps=0.):
"""
Normalize the given tensor using:
(data - mean) / (stddev + eps)
where mean and stddev are provided and computed from volume.
Args:
data (torch.Tensor): Input data to be normalized
mean: mean of whole volume
std: std of whole volume
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized tensor
"""
return normalize(data, mean, std, eps), mean, std
def normalize_complex(data, eps=0.):
"""
Normalize the given complex tensor using:
(data - mean) / (stddev + eps)
where mean and stddev are computed from magnitude of data.
Note that data is centered by complex mean so that the result centered data have average zero magnitude.
Args:
data (torch.Tensor): Input data to be normalized (*, 2)
mean: mean of image magnitude
std: std of image magnitude
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized complex tensor with 2 channels (*, 2)
"""
mag = complex_abs(data)
mag_mean = mag.mean()
mag_std = mag.std()
temp = mag_mean/mag
mean_real = data[..., 0] * temp
mean_imag = data[..., 1] * temp
mean_complex = torch.stack((mean_real, mean_imag), dim=-1)
stddev = mag_std
return (data - mean_complex) / (stddev + eps), mag_mean, stddev
# Helper functions
def roll(x, shift, dim):
"""
Similar to np.roll but applies to PyTorch Tensors
"""
if isinstance(shift, (tuple, list)):
assert len(shift) == len(dim)
for s, d in zip(shift, dim):
x = roll(x, s, d)
return x
shift = shift % x.size(dim)
if shift == 0:
return x
left = x.narrow(dim, 0, x.size(dim) - shift)
right = x.narrow(dim, x.size(dim) - shift, shift)
return torch.cat((right, left), dim=dim)
def fftshift(x, dim=None):
"""
Similar to np.fft.fftshift but applies to PyTorch Tensors
"""
if dim is None:
dim = tuple(range(x.dim()))
shift = [dim // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = x.shape[dim] // 2
else:
shift = [x.shape[i] // 2 for i in dim]
return roll(x, shift, dim)
def ifftshift(x, dim=None):
"""
Similar to np.fft.ifftshift but applies to PyTorch Tensors
"""
if dim is None:
dim = tuple(range(x.dim()))
shift = [(dim + 1) // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = (x.shape[dim] + 1) // 2
else:
shift = [(x.shape[i] + 1) // 2 for i in dim]
return roll(x, shift, dim)
| 29.703125
| 115
| 0.608276
|
412b523c6ab5df841fbace6c8ecfb1e65fe6d301
| 171
|
py
|
Python
|
tests/test_entries_to_mt.py
|
Hagihara-A/migrate-exblog
|
f5df20e07e74bc1bb14888c143bc43b2d775f666
|
[
"MIT"
] | null | null | null |
tests/test_entries_to_mt.py
|
Hagihara-A/migrate-exblog
|
f5df20e07e74bc1bb14888c143bc43b2d775f666
|
[
"MIT"
] | 1
|
2019-01-07T14:34:14.000Z
|
2019-01-07T14:34:14.000Z
|
tests/test_entries_to_mt.py
|
Hagihara-A/scrape-excite-blog
|
f5df20e07e74bc1bb14888c143bc43b2d775f666
|
[
"MIT"
] | null | null | null |
import doctest
from migrate_exblog import entries_to_mt
| 19
| 55
| 0.795322
|
412d18a2cbe30949e9cef10400c6fc6b33fdbee8
| 97
|
py
|
Python
|
deeppavlov/utils/server/__init__.py
|
xbodx/DeepPavlov
|
4b60bf162df4294b8b0db3b72786cdd699c674fa
|
[
"Apache-2.0"
] | 5,893
|
2018-02-01T18:13:20.000Z
|
2022-03-31T19:22:21.000Z
|
deeppavlov/utils/server/__init__.py
|
xbodx/DeepPavlov
|
4b60bf162df4294b8b0db3b72786cdd699c674fa
|
[
"Apache-2.0"
] | 749
|
2018-01-31T11:36:02.000Z
|
2022-03-30T07:24:22.000Z
|
deeppavlov/utils/server/__init__.py
|
xbodx/DeepPavlov
|
4b60bf162df4294b8b0db3b72786cdd699c674fa
|
[
"Apache-2.0"
] | 1,155
|
2018-02-01T10:52:15.000Z
|
2022-03-29T02:12:15.000Z
|
from .server import get_server_params, get_ssl_params, redirect_root_to_docs, start_model_server
| 48.5
| 96
| 0.886598
|
f5aba0aa3a1bda30d3d5e14338fb55d72ab3b386
| 1,883
|
py
|
Python
|
b5/lib/state.py
|
team23/b5
|
90f45e86966eeb7a259667bbe06a5555648d012d
|
[
"BSD-3-Clause"
] | 14
|
2018-11-24T23:33:35.000Z
|
2022-02-04T23:46:49.000Z
|
b5/lib/state.py
|
team23/b5
|
90f45e86966eeb7a259667bbe06a5555648d012d
|
[
"BSD-3-Clause"
] | 3
|
2020-02-10T11:05:11.000Z
|
2020-03-04T08:42:11.000Z
|
b5/lib/state.py
|
team23/b5
|
90f45e86966eeb7a259667bbe06a5555648d012d
|
[
"BSD-3-Clause"
] | 1
|
2020-02-11T19:45:13.000Z
|
2020-02-11T19:45:13.000Z
|
import os
import tempfile
from types import TracebackType
from typing import Any, BinaryIO, Optional, TextIO, Type, Union
import yaml
| 28.969231
| 115
| 0.60701
|
f5ac35c88920717e7f434d347b3a61d75f1b9fd5
| 2,711
|
py
|
Python
|
lines_ext.py
|
subhrajit02/handwritten-digit-recognision
|
239a4bd1283393865d2655b91ad4674ce8450882
|
[
"MIT"
] | null | null | null |
lines_ext.py
|
subhrajit02/handwritten-digit-recognision
|
239a4bd1283393865d2655b91ad4674ce8450882
|
[
"MIT"
] | null | null | null |
lines_ext.py
|
subhrajit02/handwritten-digit-recognision
|
239a4bd1283393865d2655b91ad4674ce8450882
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
def rem_multi_lines(lines, thresh):
"""
to remove the multiple lines with close proximity
:param lines: initial list with all the lines(multiple in place of singular)
:param thresh: dist between two lines for them to be considered as same
:return: final list with singular lines in place of multiple
"""
a = []
i = 0
lines.append([800, 0]) # random val/ noise
out = []
# this loop collects lines with close proximity in a list (a) and then appends that
# complete list in a common list called out.
while i < len(lines) - 1:
if lines[i] not in a:
a.append(lines[i])
if abs(lines[i + 1][0] - lines[i][0]) < thresh:
a.append(lines[i + 1])
else:
out.append(a)
a = []
i += 1
# print(out)
final = []
for i in out:
a = np.array(i)
final.append(np.average(a, axis=0))
# print(final)
for i in final.copy():
if i[0] < 0:
final.remove(i)
return final
def draw_r_theta_lines(img, lines, color):
"""
draw lines on image which are of (r, theta) form
:param img: image to draw the lines on
:param lines: list of lines on the form (r, theta)
:param color: color of lines
:return:
"""
for rho, theta in lines:
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 1000 * (-b))
y1 = int(y0 + 1000 * a)
x2 = int(x0 - 1000 * (-b))
y2 = int(y0 - 1000 * a)
cv2.line(img, (x1, y1), (x2, y2), color, 2)
| 25.101852
| 88
| 0.560679
|
f5acb14365decf5cb2d85dfdb8cc3ac0e9ffe41f
| 1,553
|
py
|
Python
|
examples/wmt/tools/scorer/nlm.py
|
godweiyang/ParaGen
|
9665d1244ea38a41fc06b4e0a7f6411985e2221f
|
[
"Apache-2.0"
] | 50
|
2022-01-18T07:25:46.000Z
|
2022-03-14T13:06:18.000Z
|
examples/wmt/tools/scorer/nlm.py
|
JiangtaoFeng/ParaGen
|
509334bf16e3674e009bb9dc37ecc38ae3b5c977
|
[
"Apache-2.0"
] | 2
|
2022-01-19T09:36:42.000Z
|
2022-02-23T07:16:02.000Z
|
examples/wmt/tools/scorer/nlm.py
|
JiangtaoFeng/ParaGen
|
509334bf16e3674e009bb9dc37ecc38ae3b5c977
|
[
"Apache-2.0"
] | 6
|
2022-01-19T09:28:53.000Z
|
2022-03-10T10:20:08.000Z
|
# Before running this command, you should firstly run:
# pip install fairseq
# pip install fastBPE
# wget https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.en.tar.gz
# tar zxvf wmt19.en.tar.gz
import argparse
from itertools import islice
import numpy as np
from fairseq.models.transformer_lm import TransformerLanguageModel
parser = argparse.ArgumentParser()
parser.add_argument('--hypo_filename', metavar='N', type=str, help='hypo_filename')
parser.add_argument('--out_filename', metavar='N', type=str, help='out_filename')
# parser.add_argument('--num_candidates', type=int, help="num_candidates")
args, unknown = parser.parse_known_args()
en_lm = TransformerLanguageModel.from_pretrained('wmt19.en', 'model.pt', tokenizer='moses', bpe='fastbpe')
en_lm.cuda()
num_processed = 0
ppl = []
batch_num = 1000
with open(args.hypo_filename, 'r') as f, open(args.out_filename, 'w') as out:
while True:
n_lines = list(map(lambda x: x.strip(), islice(f, batch_num)))
if len(n_lines) == 0:
break
for ele in en_lm.score(n_lines, beam=1):
ppl.append(float(ele['positional_scores'].mean().neg().exp().item()))
num_processed += batch_num
print(f"Processed {num_processed}")
ppl = np.array(ppl)
ppl = np.nan_to_num(ppl, nan=np.nanmax(ppl))
# scores = 1 - ppl/ppl.max()
# for ele in zip(ppl.tolist(), scores.tolist()):
# out.write(f"{np.log(ele[0])}, {ele[0]}, {ele[1]}\n")
ppl = np.array(ppl)
for ele in ppl.tolist():
out.write(f"{np.log(ele)}\n")
| 36.116279
| 106
| 0.676755
|
f5accc4b43ec1556256e37986ed9a579a786c19a
| 2,742
|
py
|
Python
|
aioli_openapi/service.py
|
jimorie/aioli-openapi
|
5a5ea6471d332adc8361ad39af7421e4686811fd
|
[
"MIT"
] | null | null | null |
aioli_openapi/service.py
|
jimorie/aioli-openapi
|
5a5ea6471d332adc8361ad39af7421e4686811fd
|
[
"MIT"
] | null | null | null |
aioli_openapi/service.py
|
jimorie/aioli-openapi
|
5a5ea6471d332adc8361ad39af7421e4686811fd
|
[
"MIT"
] | null | null | null |
import warnings
from apispec import APISpec
from apispec.ext.marshmallow import MarshmallowPlugin
from aioli.service import BaseService
from aioli.controller import BaseHttpController
from aioli.exceptions import NoMatchFound
| 31.159091
| 106
| 0.496718
|
f5ae655bb41bdfdac3cd957f9a322f3eb321c3ad
| 124
|
py
|
Python
|
wrangle_scripts/wrangle_data.py
|
es-g/dash
|
443b04593e66f7f2dcea325937eee4683f4c7a13
|
[
"MIT"
] | null | null | null |
wrangle_scripts/wrangle_data.py
|
es-g/dash
|
443b04593e66f7f2dcea325937eee4683f4c7a13
|
[
"MIT"
] | null | null | null |
wrangle_scripts/wrangle_data.py
|
es-g/dash
|
443b04593e66f7f2dcea325937eee4683f4c7a13
|
[
"MIT"
] | null | null | null |
import pandas as pd
import plotly.graph_objs as go
| 12.4
| 37
| 0.685484
|
f5ae7c5fd10eb5c3a55627538569669fa5235f04
| 399
|
py
|
Python
|
cpdb/popup/factories.py
|
invinst/CPDBv2_backend
|
b4e96d620ff7a437500f525f7e911651e4a18ef9
|
[
"Apache-2.0"
] | 25
|
2018-07-20T22:31:40.000Z
|
2021-07-15T16:58:41.000Z
|
cpdb/popup/factories.py
|
invinst/CPDBv2_backend
|
b4e96d620ff7a437500f525f7e911651e4a18ef9
|
[
"Apache-2.0"
] | 13
|
2018-06-18T23:08:47.000Z
|
2022-02-10T07:38:25.000Z
|
cpdb/popup/factories.py
|
invinst/CPDBv2_backend
|
b4e96d620ff7a437500f525f7e911651e4a18ef9
|
[
"Apache-2.0"
] | 6
|
2018-05-17T21:59:43.000Z
|
2020-11-17T00:30:26.000Z
|
import factory
from faker import Faker
from popup.models import Popup
fake = Faker()
| 23.470588
| 55
| 0.719298
|
f5afafed15f47453d454c043799fdd7a4422ab1b
| 1,863
|
py
|
Python
|
src_old/tests/delete_migrations.py
|
rishikesh67/django-tenant-oracle-schemas
|
918a64e842b678fc506eadbb4d7e51b0b38ab0a2
|
[
"MIT"
] | null | null | null |
src_old/tests/delete_migrations.py
|
rishikesh67/django-tenant-oracle-schemas
|
918a64e842b678fc506eadbb4d7e51b0b38ab0a2
|
[
"MIT"
] | 8
|
2019-12-04T23:26:11.000Z
|
2022-02-10T09:42:18.000Z
|
src/tests/delete_migrations.py
|
rishikesh67/django-tenant-oracle-schemas
|
918a64e842b678fc506eadbb4d7e51b0b38ab0a2
|
[
"MIT"
] | 2
|
2019-06-26T05:31:16.000Z
|
2019-07-01T12:22:50.000Z
|
import os
import glob
import shutil
import logging
# logging.basicConfig(level=logging.DEBUG)
# DEBUG:root:Skipping file /Users/hygull/Projects/Python3/DjangoTenantOracleSchemas/django-tenant-oracle-schemas/src/tenants/models.py
# logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
# 2019-06-24 16:19:29,898 Skipping file /Users/hygull/Projects/Python3/DjangoTenantOracleSchemas/django-tenant-oracle-schemas/src/manage.py
# logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG, datefmt='%d/%m/%Y %H:%M:%S %p')
# 24/06/2019 04:23:31 PM Skipping file /Users/hygull/Projects/Python3/DjangoTenantOracleSchemas/django-tenant-oracle-schemas/src/manage.py
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG, datefmt='[%d/%m/%Y %H:%M:%S %p] =>')
# 24/06/2019 16:24:02 PM Skipping file /Users/hygull/Projects/Python3/DjangoTenantOracleSchemas/django-tenant-oracle-schemas/src/manage.py
| 38.8125
| 139
| 0.7343
|
f5b0b5d5e4ce7c8e9669a43f27a5226a60590d4f
| 6,075
|
py
|
Python
|
qa2nli/converters/processors.py
|
nli-for-qa/conversion
|
588de7fbbcdeb9698fe888b6e3ece7dfadf25238
|
[
"MIT"
] | null | null | null |
qa2nli/converters/processors.py
|
nli-for-qa/conversion
|
588de7fbbcdeb9698fe888b6e3ece7dfadf25238
|
[
"MIT"
] | null | null | null |
qa2nli/converters/processors.py
|
nli-for-qa/conversion
|
588de7fbbcdeb9698fe888b6e3ece7dfadf25238
|
[
"MIT"
] | 1
|
2021-07-04T01:59:56.000Z
|
2021-07-04T01:59:56.000Z
|
from typing import Callable, List, Union, Optional, Dict, Tuple
import re
import spacy
import logging
import math
from enum import Enum
logger = logging.getLogger(__name__)
Preprocessor = PreprocessorBase
dots = re.compile(r"[\.\'\"\?, ]{2,}[\w ]*")
| 31.806283
| 82
| 0.597366
|
f5b0c54a48711381cd579c3094b7c9b18f185760
| 2,106
|
py
|
Python
|
trphysx/data_utils/dataset_cylinder.py
|
zabaras/transformer-physx
|
eb28d09957641cc594b3e5acf4ace2e4dc193584
|
[
"MIT"
] | 33
|
2020-10-15T06:43:36.000Z
|
2022-03-24T10:46:12.000Z
|
trphysx/data_utils/dataset_cylinder.py
|
zabaras/transformer-physx
|
eb28d09957641cc594b3e5acf4ace2e4dc193584
|
[
"MIT"
] | 2
|
2021-05-18T14:31:38.000Z
|
2021-07-30T18:18:50.000Z
|
trphysx/data_utils/dataset_cylinder.py
|
zabaras/transformer-physx
|
eb28d09957641cc594b3e5acf4ace2e4dc193584
|
[
"MIT"
] | 6
|
2020-12-01T05:54:01.000Z
|
2022-03-25T21:22:09.000Z
|
"""
=====
Distributed by: Notre Dame SCAI Lab (MIT Liscense)
- Associated publication:
url: https://arxiv.org/abs/2010.03957
doi:
github: https://github.com/zabaras/transformer-physx
=====
"""
import logging
import h5py
import torch
from .dataset_phys import PhysicalDataset
from ..embedding.embedding_model import EmbeddingModel
logger = logging.getLogger(__name__)
| 37.607143
| 121
| 0.61396
|
f5b4beb61d529163a339e65d180ea7a983c8e73d
| 359
|
py
|
Python
|
HLTrigger/Configuration/python/HLT_75e33/paths/L1T_SingleTkMuon_22_cfi.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 1
|
2021-11-30T16:24:46.000Z
|
2021-11-30T16:24:46.000Z
|
HLTrigger/Configuration/python/HLT_75e33/paths/L1T_SingleTkMuon_22_cfi.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 4
|
2021-11-29T13:57:56.000Z
|
2022-03-29T06:28:36.000Z
|
HLTrigger/Configuration/python/HLT_75e33/paths/L1T_SingleTkMuon_22_cfi.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 1
|
2021-11-30T16:16:05.000Z
|
2021-11-30T16:16:05.000Z
|
import FWCore.ParameterSet.Config as cms
#from ..modules.hltL1TkMuons_cfi import *
from ..modules.hltL1TkSingleMuFiltered22_cfi import *
from ..sequences.HLTBeginSequence_cfi import *
from ..sequences.HLTEndSequence_cfi import *
L1T_SingleTkMuon_22 = cms.Path(
HLTBeginSequence +
# hltL1TkMuons +
hltL1TkSingleMuFiltered22 +
HLTEndSequence
)
| 25.642857
| 53
| 0.788301
|
f5b575448dfd3070de7e8cc30de61a51b143522f
| 927
|
py
|
Python
|
strategies/forest.py
|
aladics/DeepBugHunter
|
564f2417eafc50e99de60d5d6c0a1b4193d1bf8b
|
[
"Apache-2.0"
] | 6
|
2019-03-01T13:17:09.000Z
|
2022-03-07T04:07:04.000Z
|
strategies/forest.py
|
aladics/DeepBugHunter
|
564f2417eafc50e99de60d5d6c0a1b4193d1bf8b
|
[
"Apache-2.0"
] | null | null | null |
strategies/forest.py
|
aladics/DeepBugHunter
|
564f2417eafc50e99de60d5d6c0a1b4193d1bf8b
|
[
"Apache-2.0"
] | 2
|
2020-08-02T07:36:00.000Z
|
2021-01-13T15:04:00.000Z
|
import os
import math
import argparse
import dbh_util as util
from sklearn.ensemble import RandomForestClassifier
parser = argparse.ArgumentParser()
parser.add_argument('--n-estimators', type=int, default=10, help='The number of trees in the forest')
parser.add_argument('--max-depth', type=int, default=5, help='Max decision tree leaf node depth')
parser.add_argument('--criterion', default='gini', help='Split quality criterion, "gini" or "entropy"')
#
# Random Forest approach
#
| 34.333333
| 103
| 0.73247
|
f5b7476abd3046a860b7d297b7e32e4ae0dcc3db
| 9,476
|
py
|
Python
|
vitrage_tempest_plugin/tests/e2e/test_overlapping_actions.py
|
openstack/vitrage-tempest-plugin
|
69acc7f3ea26f8c3a652cdf9d1fd842dbf9af58f
|
[
"Apache-2.0"
] | 6
|
2018-08-02T12:11:09.000Z
|
2019-03-05T11:45:09.000Z
|
vitrage_tempest_plugin/tests/e2e/test_overlapping_actions.py
|
openstack/vitrage-tempest-plugin
|
69acc7f3ea26f8c3a652cdf9d1fd842dbf9af58f
|
[
"Apache-2.0"
] | null | null | null |
vitrage_tempest_plugin/tests/e2e/test_overlapping_actions.py
|
openstack/vitrage-tempest-plugin
|
69acc7f3ea26f8c3a652cdf9d1fd842dbf9af58f
|
[
"Apache-2.0"
] | 1
|
2018-08-22T12:29:54.000Z
|
2018-08-22T12:29:54.000Z
|
# Copyright 2017 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_log import log as logging
from vitrage_tempest_plugin.tests.base import IsEmpty
from vitrage_tempest_plugin.tests.common.constants import DOCTOR_DATASOURCE
from vitrage_tempest_plugin.tests.common.constants import EntityCategory
from vitrage_tempest_plugin.tests.common.constants import VertexProperties \
as VProps
from vitrage_tempest_plugin.tests.common.constants import VITRAGE_DATASOURCE
from vitrage_tempest_plugin.tests.common import general_utils as g_utils
from vitrage_tempest_plugin.tests.common.tempest_clients import TempestClients
from vitrage_tempest_plugin.tests.common import vitrage_utils as v_utils
from vitrage_tempest_plugin.tests.e2e.test_actions_base import TestActionsBase
from vitrage_tempest_plugin.tests import utils
LOG = logging.getLogger(__name__)
TRIGGER_ALARM_1 = 'e2e.test_overlapping_actions.trigger.alarm1'
TRIGGER_ALARM_2 = 'e2e.test_overlapping_actions.trigger.alarm2'
TRIGGER_ALARM_3 = 'e2e.test_overlapping_actions.trigger.alarm3'
TRIGGER_ALARM_4 = 'e2e.test_overlapping_actions.trigger.alarm4'
DEDUCED = 'e2e.test_overlapping_actions.deduced.alarm'
TRIGGER_ALARM_1_PROPS = {
VProps.NAME: TRIGGER_ALARM_1,
VProps.VITRAGE_CATEGORY: EntityCategory.ALARM,
VProps.VITRAGE_TYPE: DOCTOR_DATASOURCE,
}
TRIGGER_ALARM_2_PROPS = {
VProps.NAME: TRIGGER_ALARM_2,
VProps.VITRAGE_CATEGORY: EntityCategory.ALARM,
VProps.VITRAGE_TYPE: DOCTOR_DATASOURCE,
}
DEDUCED_PROPS = {
VProps.NAME: DEDUCED,
VProps.VITRAGE_CATEGORY: EntityCategory.ALARM,
VProps.VITRAGE_TYPE: VITRAGE_DATASOURCE,
}
| 40.495726
| 78
| 0.646264
|
f5b80f86d6e5672de1791e2d08c1fbaf96195a02
| 4,137
|
py
|
Python
|
clone_tests/clone_compilation_errors.py
|
dcz-purism/glib
|
eccd097166cdf7dfea9be17869868d45f8ef4ef6
|
[
"MIT-0",
"MIT"
] | null | null | null |
clone_tests/clone_compilation_errors.py
|
dcz-purism/glib
|
eccd097166cdf7dfea9be17869868d45f8ef4ef6
|
[
"MIT-0",
"MIT"
] | null | null | null |
clone_tests/clone_compilation_errors.py
|
dcz-purism/glib
|
eccd097166cdf7dfea9be17869868d45f8ef4ef6
|
[
"MIT-0",
"MIT"
] | null | null | null |
import json
import os
import subprocess
import sys
TEST_FILENAME = "tmp_py_file"
TEST_FOLDER = "clone_tests"
TESTS = [
("clone!( => move || {})",
"If you have nothing to clone, no need to use this macro!"),
("clone!(|| {})",
"If you have nothing to clone, no need to use this macro!"),
("clone!(|a, b| {})",
"If you have nothing to clone, no need to use this macro!"),
("clone!(@strong self => move |x| {})",
"Can't use `self` as variable name. Try storing it in a temporary variable or rename it using `as`."),
("clone!(@strong self.v => move |x| {})",
"Field accesses are not allowed as is, you must rename it!"),
("clone!(@weak v => @default-return false, || {})",
"Closure needs to be \"moved\" so please add `move` before closure"),
("clone!(@weak v => @default-return false, |bla| {})",
"Closure needs to be \"moved\" so please add `move` before closure"),
("clone!(@weak v => default-return false, move || {})",
"Missing `@` before `default-return`"),
("clone!(@weak v => @default-return false move || {})",
"Missing comma after `@default-return`'s value"),
("clone!(@yolo v => move || {})",
"Unknown keyword, only `weak` and `strong` are allowed"),
("clone!(v => move || {})",
"You need to specify if this is a weak or a strong clone."),
]
if __name__ == "__main__":
sys.exit(run_tests())
| 35.358974
| 117
| 0.578922
|
f5b9371efb3fb18aace487077f47abfd7957e4b2
| 2,437
|
py
|
Python
|
tests/test_tags.py
|
wbcsmarteezgithub/django-snakeoil
|
ae1a8dab9e14194e48963101ff3349f45aee0ccf
|
[
"BSD-2-Clause"
] | 1
|
2020-07-03T15:52:25.000Z
|
2020-07-03T15:52:25.000Z
|
tests/test_tags.py
|
wbcsmarteezgithub/django-snakeoil
|
ae1a8dab9e14194e48963101ff3349f45aee0ccf
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_tags.py
|
wbcsmarteezgithub/django-snakeoil
|
ae1a8dab9e14194e48963101ff3349f45aee0ccf
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import unicode_literals
from django.http import HttpRequest
from django.template import Context, Template, TemplateSyntaxError
from django.test import TestCase
from snakeoil.models import SeoUrl
from .models import TestModel
| 29.719512
| 66
| 0.531801
|
f5b9906a08803c2fec8e92b95456e8a8ee69c95c
| 50
|
py
|
Python
|
src/runner/__init__.py
|
Tung-I/nips2019_template
|
a1fcf35b7633d192d2706a533731cb8c457ac230
|
[
"MIT"
] | 11
|
2020-08-09T08:08:56.000Z
|
2022-01-18T14:25:22.000Z
|
src/runner/__init__.py
|
Tung-I/nips2019_template
|
a1fcf35b7633d192d2706a533731cb8c457ac230
|
[
"MIT"
] | 2
|
2021-09-13T09:48:41.000Z
|
2021-11-08T14:20:58.000Z
|
src/runner/__init__.py
|
Tung-I/nips2019_template
|
a1fcf35b7633d192d2706a533731cb8c457ac230
|
[
"MIT"
] | 4
|
2020-08-30T14:13:35.000Z
|
2021-09-14T09:26:55.000Z
|
from .trainers import *
from .predictors import *
| 16.666667
| 25
| 0.76
|
f5ba98b5a8a467c1237f20ea32bee34cf54cde58
| 420
|
py
|
Python
|
test/nn/conv/test_gravnet_conv.py
|
shrey-bansal/pytorch_geometric
|
17108a08066b0a73530544d01719b186f2625ef2
|
[
"MIT"
] | 2
|
2020-09-08T15:22:08.000Z
|
2020-09-08T15:22:09.000Z
|
test/nn/conv/test_gravnet_conv.py
|
shrey-bansal/pytorch_geometric
|
17108a08066b0a73530544d01719b186f2625ef2
|
[
"MIT"
] | null | null | null |
test/nn/conv/test_gravnet_conv.py
|
shrey-bansal/pytorch_geometric
|
17108a08066b0a73530544d01719b186f2625ef2
|
[
"MIT"
] | 1
|
2021-07-06T06:50:21.000Z
|
2021-07-06T06:50:21.000Z
|
import torch
from torch_geometric.nn import GravNetConv
| 32.307692
| 69
| 0.688095
|
f5baf25c3fc1ee4bca1c0e0df333ed41bd65f476
| 2,216
|
py
|
Python
|
base/CrossPlotter.py
|
pulsatrixwx/PulsatrixWx
|
aae6ac36e2460dcf7f4a592d709139cd0d6a2e91
|
[
"MIT"
] | 3
|
2016-03-27T00:21:46.000Z
|
2018-06-01T09:20:57.000Z
|
base/CrossPlotter.py
|
pulsatrixwx/PulsatrixWx
|
aae6ac36e2460dcf7f4a592d709139cd0d6a2e91
|
[
"MIT"
] | null | null | null |
base/CrossPlotter.py
|
pulsatrixwx/PulsatrixWx
|
aae6ac36e2460dcf7f4a592d709139cd0d6a2e91
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from hootpy import HootPy
if __name__ == "__main__":
cfg = {
'forecast_hours':[0, 3, 6, 9, 12],
'product_title':"NAM Forecast Cross Section KDRT-KGRB",
'image_file_name':"nam_fcross_KDRT-KGRB_f%02d.png"
}
hpc = CrossPlotter(cfg)
hpc.loadData()
hpc.plot()
| 28.410256
| 108
| 0.581679
|
f5bb1ebe52102d71c8810bac844699880019ddf3
| 3,072
|
py
|
Python
|
management/commands/syncldap.py
|
LUH-CHI/chiffee
|
78ec85d36a6c757e5f56113089f1b56fdb0ed494
|
[
"MIT"
] | 1
|
2018-03-22T09:53:06.000Z
|
2018-03-22T09:53:06.000Z
|
management/commands/syncldap.py
|
LUH-CHI/chiffee
|
78ec85d36a6c757e5f56113089f1b56fdb0ed494
|
[
"MIT"
] | 4
|
2019-04-01T08:44:40.000Z
|
2020-02-07T17:44:16.000Z
|
management/commands/syncldap.py
|
LUH-CHI/chiffee
|
78ec85d36a6c757e5f56113089f1b56fdb0ed494
|
[
"MIT"
] | 4
|
2018-05-04T12:01:50.000Z
|
2019-10-11T09:47:33.000Z
|
import logging
import ldap
from django.conf import settings
from django.contrib.auth.models import Group
from django.core.management.base import BaseCommand
from django_auth_ldap.backend import LDAPBackend
from chiffee.models import User
logger = logging.getLogger('syncldap')
# This command synchronizes local database with the LDAP server.
# New LDAP user -> new user in the local database.
# Deleted LDAP user -> local user is set to inactive.
| 36.571429
| 79
| 0.595378
|
f5bc7050656c4c3afee2238a72f86661143054d5
| 598
|
py
|
Python
|
pysal/spreg/__init__.py
|
cubensys/pysal
|
8d50990f6e6603ba79ae1a887a20a1e3a0734e51
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
pysal/spreg/__init__.py
|
cubensys/pysal
|
8d50990f6e6603ba79ae1a887a20a1e3a0734e51
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
pysal/spreg/__init__.py
|
cubensys/pysal
|
8d50990f6e6603ba79ae1a887a20a1e3a0734e51
|
[
"MIT",
"BSD-3-Clause"
] | 1
|
2021-07-19T01:46:17.000Z
|
2021-07-19T01:46:17.000Z
|
from ols import *
from diagnostics import *
from diagnostics_sp import *
from user_output import *
from twosls import *
from twosls_sp import *
from error_sp import *
from error_sp_het import *
from error_sp_hom import *
from ols_regimes import *
from twosls_regimes import *
from twosls_sp_regimes import *
from error_sp_regimes import *
from error_sp_het_regimes import *
from error_sp_hom_regimes import *
from probit import *
from ml_lag import *
from ml_lag_regimes import *
from ml_error import *
from ml_error_regimes import *
from sur import *
from sur_error import *
from sur_lag import *
| 24.916667
| 34
| 0.807692
|
f5bdaf65264833d8c298cbab96f3a7c910693f18
| 209
|
py
|
Python
|
tests/conftest.py
|
lambertsbennett/Encountertk
|
708aedb38cb1689da8d2f39c68bd8694c64a79da
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
lambertsbennett/Encountertk
|
708aedb38cb1689da8d2f39c68bd8694c64a79da
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
lambertsbennett/Encountertk
|
708aedb38cb1689da8d2f39c68bd8694c64a79da
|
[
"MIT"
] | null | null | null |
from pytest import fixture
from encountertk.e_model import EncounterModel, ps_encounter, mean_vol_encountered
| 26.125
| 82
| 0.789474
|
f5beb267f6635aef6117ff273b49cdca310125ca
| 367
|
py
|
Python
|
jp.atcoder/abc045/abc045_b/8983851.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-09T03:06:25.000Z
|
2022-02-09T03:06:25.000Z
|
jp.atcoder/abc045/abc045_b/8983851.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-05T22:53:18.000Z
|
2022-02-09T01:29:30.000Z
|
jp.atcoder/abc045/abc045_b/8983851.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | null | null | null |
import sys
from collections import deque
a, b, c = sys.stdin.read().split()
if __name__ == "__main__":
ans = main()
print(ans)
| 17.47619
| 69
| 0.46594
|
f5bed273a043f28510a7c31520baff8cb6ddab43
| 16,504
|
py
|
Python
|
src/pipelines/azureml/lightgbm_training.py
|
microsoft/lightgbm-benchmark
|
286668d698d9d166857f924ecb775d5de224d489
|
[
"MIT"
] | 13
|
2021-08-20T01:03:51.000Z
|
2022-02-12T05:34:46.000Z
|
src/pipelines/azureml/lightgbm_training.py
|
microsoft/lightgbm-benchmark
|
286668d698d9d166857f924ecb775d5de224d489
|
[
"MIT"
] | 199
|
2021-08-21T21:18:53.000Z
|
2022-03-27T23:08:44.000Z
|
src/pipelines/azureml/lightgbm_training.py
|
microsoft/lightgbm-benchmark
|
286668d698d9d166857f924ecb775d5de224d489
|
[
"MIT"
] | 4
|
2021-08-20T06:53:26.000Z
|
2022-01-24T22:22:39.000Z
|
"""
Runs LightGBM using distributed (mpi) training.
to execute:
> python src/pipelines/azureml/lightgbm_training.py --exp-config conf/experiments/lightgbm_training/cpu.yaml
"""
# pylint: disable=no-member
# NOTE: because it raises 'dict' has no 'outputs' member in dsl.pipeline construction
import os
import sys
import json
import logging
import argparse
# config management
from dataclasses import dataclass
from omegaconf import OmegaConf, MISSING
from typing import Optional, Any, List
# AzureML
from azure.ml.component import Component
from azure.ml.component import dsl
from azure.ml.component.environment import Docker
# when running this script directly, needed to import common
LIGHTGBM_REPO_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
SCRIPTS_SOURCES_ROOT = os.path.join(LIGHTGBM_REPO_ROOT, 'src')
if SCRIPTS_SOURCES_ROOT not in sys.path:
logging.info(f"Adding {SCRIPTS_SOURCES_ROOT} to path")
sys.path.append(str(SCRIPTS_SOURCES_ROOT))
from common.tasks import training_task, training_variant
from common.sweep import SweepParameterParser
from common.aml import load_dataset_from_data_input_spec
from common.aml import apply_sweep_settings
from common.pipelines import (
parse_pipeline_config,
azureml_connect,
pipeline_submit,
COMPONENTS_ROOT
)
### CONFIG DATACLASS ###
# Step 1 : to configure your pipeline, add all your fields inside a
# properly defined dataclass, pipeline_cli_main will figure out how
# to read that config from a given yaml file + hydra override commands
### PIPELINE COMPONENTS ###
# Step 2 : your pipeline consists in assembling components
# load those components from local yaml specifications
# use COMPONENTS_ROOT as base folder
lightgbm_train_module = Component.from_yaml(yaml_file=os.path.join(COMPONENTS_ROOT, "training", "lightgbm_python", "spec.yaml"))
lightgbm_train_sweep_module = Component.from_yaml(yaml_file=os.path.join(COMPONENTS_ROOT, "training", "lightgbm_python", "sweep_spec.yaml"))
partition_data_module = Component.from_yaml(yaml_file=os.path.join(COMPONENTS_ROOT, "data_processing", "partition_data", "spec.yaml"))
lightgbm_data2bin_module = Component.from_yaml(yaml_file=os.path.join(COMPONENTS_ROOT, "data_processing", "lightgbm_data2bin", "spec.yaml"))
### PIPELINE SPECIFIC CODE ###
def process_sweep_parameters(params_dict, sweep_algorithm):
"""Parses config and spots sweepable paraneters
Args:
params_dict (dict): configuration object (see get_config_class())
sweep_algorithm (str): random, grid, bayesian
Returns:
tunable_params (dict)
"""
# the class below automates parsing of sweepable parameters
sweep_param_parser = SweepParameterParser(
tunable_parameters=[
# those are keys and their default values
"num_iterations",
"num_leaves",
"min_data_in_leaf",
"learning_rate",
"max_bin",
"feature_fraction"
],
cli_prefix=None, # this is not argparse
parameter_sampling=sweep_algorithm
)
# provide config as a dictionary to the parser
sweep_parameters = {
"num_iterations": params_dict['num_iterations'],
"num_leaves": params_dict['num_leaves'],
"min_data_in_leaf": params_dict['min_data_in_leaf'],
"learning_rate": params_dict['learning_rate'],
"max_bin": params_dict['max_bin'],
"feature_fraction": params_dict['feature_fraction'],
}
# parser gonna parse
sweep_param_parser.parse_from_dict(sweep_parameters)
# and return params as we want them
tunable_params = sweep_param_parser.get_tunable_params()
fixed_params = sweep_param_parser.get_fixed_params()
# return dictionaries to fed as params into our pipeline
return tunable_params, fixed_params
### TRAINING PIPELINE ###
# Step 3: your pipeline consists in creating a python function
# decorated with @dsl.pipeline.
# You can create as many subgraphs as you want,
# but `pipeline_cli_main` will need one pipeline function
# taking a single config argument, not a pipeline parameter.
# Here you should create an instance of a pipeline function (using your custom config dataclass)
# creating an overall pipeline using pipeline_function for each task given
### MAIN BLOCK ###
# Step 4: implement main block using helper functions
def main():
# use parse helper function to get arguments from CLI
config = parse_pipeline_config(lightgbm_training_config)
# you'll need a workspace object to connect
workspace = azureml_connect(config)
# run the pipeline function with the given arguments
pipeline_instance = training_all_tasks(workspace, config)
# generate a nice markdown description
experiment_description="\n".join([
"Training on all specified tasks (see yaml below).",
"```yaml""",
"data_generation_config:",
OmegaConf.to_yaml(config.lightgbm_training_config),
"```"
])
# validate/submit the pipeline (if run.submit=True)
pipeline_submit(
workspace,
config,
pipeline_instance,
experiment_description=experiment_description
)
if __name__ == "__main__":
main()
| 39.961259
| 154
| 0.673534
|
f5bf990b580312d748c5534bd056ce7638df5fe7
| 3,319
|
py
|
Python
|
twinfield/metadata.py
|
zypp-io/twinfield
|
b4306e79f514ae691584c2d47ce072a3619469b8
|
[
"Apache-2.0"
] | 4
|
2020-12-20T23:02:33.000Z
|
2022-01-13T19:40:13.000Z
|
twinfield/metadata.py
|
zypp-io/twinfield
|
b4306e79f514ae691584c2d47ce072a3619469b8
|
[
"Apache-2.0"
] | 9
|
2020-12-18T07:27:07.000Z
|
2022-02-17T09:23:51.000Z
|
twinfield/metadata.py
|
zypp-io/twinfield
|
b4306e79f514ae691584c2d47ce072a3619469b8
|
[
"Apache-2.0"
] | null | null | null |
from xml.etree import ElementTree as Et
import pandas as pd
import requests
from twinfield.core import Base
from twinfield.exceptions import ServerError
from twinfield.messages import METADATA_XML
| 27.658333
| 98
| 0.577584
|
f5c0bee32dd9418b4866fcc07b3ab0eea9c2d30b
| 172
|
py
|
Python
|
sru_lm/load_data/__init__.py
|
Fast-LM-WordEvalRu/SRU-LM
|
dd69d6c7b7b6c0164e83a874aee5e6f6766070d5
|
[
"Apache-2.0"
] | null | null | null |
sru_lm/load_data/__init__.py
|
Fast-LM-WordEvalRu/SRU-LM
|
dd69d6c7b7b6c0164e83a874aee5e6f6766070d5
|
[
"Apache-2.0"
] | null | null | null |
sru_lm/load_data/__init__.py
|
Fast-LM-WordEvalRu/SRU-LM
|
dd69d6c7b7b6c0164e83a874aee5e6f6766070d5
|
[
"Apache-2.0"
] | 2
|
2019-11-06T13:07:30.000Z
|
2020-02-04T11:21:19.000Z
|
# Author: Artem Skiba
# Created: 20/01/2020
from .dataset import FastDataset
from .dataloader import get_dataloader
__all__ = [
'FastDataset', 'get_dataloader'
]
| 17.2
| 38
| 0.726744
|
f5c48e8b3a21158680b98773692e8c83b730ba87
| 5,053
|
py
|
Python
|
libs/complex2epz.py
|
ledummy/CoMPlEx
|
f315df7a1b13cfcbdafd9879ff93a974f2e2c38b
|
[
"MIT"
] | null | null | null |
libs/complex2epz.py
|
ledummy/CoMPlEx
|
f315df7a1b13cfcbdafd9879ff93a974f2e2c38b
|
[
"MIT"
] | 1
|
2020-04-08T12:55:50.000Z
|
2020-04-08T12:55:50.000Z
|
libs/complex2epz.py
|
ledummy/CoMPlEx
|
f315df7a1b13cfcbdafd9879ff93a974f2e2c38b
|
[
"MIT"
] | 1
|
2020-04-08T12:44:47.000Z
|
2020-04-08T12:44:47.000Z
|
INIT = 1
REST = ['START_MODSAFE',[0,0]]
NEUTRAL = ['START_MODSAFE',[1,INIT]]
FDBK = ['START_MODSAFE',[2,INIT]]
LIN = ['START_MODSAFE',[3,INIT]]
SIN = ['START_MODSAFE',[4,INIT]]
TYPES = {'Vconst':LIN,'Fconst':FDBK,'Zconst':NEUTRAL}
try:
import epz as tempEpz
import inspect
_,_,keys,_ = inspect.getargspec(tempEpz.CMD.__init__())
if 'tag' not in keys:
from libs.epz import epz as tempEpz
epz = tempEpz
except:
from libs.epz import epz
# N set the triggers. The triggers are, in order, adc (deflection), dac (z position), time
# 1 = used, 0 = not used
#Triggers
# K = set adc (deflection) stop trigger (Volts)
# L = set dac (z position) stop trigger (Volts)
# M = set time stop trigger in microseconds
# P = set the setpoint for the feedback (-1, +1)
# Q = set the proportional gain for the feedback (0.0 to 1.0)
# R = set the integral gain for the feedback (0.0 to 1.0)
# S = set the differential gain for the feedback (0.0 to 1.0)
# B = set DAC output (Volts)
# D = set the piezo speed (Volt/s)
# C = set the piezo speed sign
'''
SET_DACSTEP:D
SET_NUMT6TRIG:T
SET_TIMETRIG:M
SET_DAC_SOFT:B
SET_DAC_HARD:U
SET_TRIGGERS:N
SET_ZTRIG:L
SET_FTRIG:K
SET_TIM8PER:8
SET_SETPOINT:P
SET_PGAIN:Q
SET_IGAIN:R
SET_DGAIN:S
START_MODSAFE:O
SET_DACMODE:F
SET_TESTPIN:H
INIT_SPI2:I
SET_RAMPSIGN:C
SET_USECIRCBUFF:G
SET_MODEDBG:E
SET_DACTO0:J
SET_DAC_2OR4:A
SWITCH_SPI2:g
KILL:k
'''
| 23.723005
| 90
| 0.660993
|
f5c4f96d849731c4a186b3fef06e21bef4391f32
| 1,177
|
py
|
Python
|
test/device/test_brakes.py
|
uOstar/barista
|
ab62ec6320fb9b5e9c305f23be7fc7e828c25ab1
|
[
"MIT"
] | 4
|
2017-11-05T19:37:23.000Z
|
2018-06-18T13:18:11.000Z
|
test/device/test_brakes.py
|
uOstar/barista
|
ab62ec6320fb9b5e9c305f23be7fc7e828c25ab1
|
[
"MIT"
] | 24
|
2017-11-05T19:22:08.000Z
|
2018-06-14T13:50:39.000Z
|
test/device/test_brakes.py
|
uorocketry/barista
|
ab62ec6320fb9b5e9c305f23be7fc7e828c25ab1
|
[
"MIT"
] | 1
|
2022-03-25T04:01:25.000Z
|
2022-03-25T04:01:25.000Z
|
import pytest
from mock import patch
from app.device.brakes import Brakes
from app.utils.servo import Servo
from app.utils.exceptions import InvalidArguments
| 28.02381
| 75
| 0.773152
|
f5c957427e5b93fcfc4229d7e7efbe7a5cf8ce25
| 601
|
py
|
Python
|
4 kyu/Most_frequently_used_words_in_a_text.py
|
jonathansnolan/Codewars
|
9d6a3fd10ffb2c61ae292961f384067cdede0470
|
[
"MIT"
] | null | null | null |
4 kyu/Most_frequently_used_words_in_a_text.py
|
jonathansnolan/Codewars
|
9d6a3fd10ffb2c61ae292961f384067cdede0470
|
[
"MIT"
] | null | null | null |
4 kyu/Most_frequently_used_words_in_a_text.py
|
jonathansnolan/Codewars
|
9d6a3fd10ffb2c61ae292961f384067cdede0470
|
[
"MIT"
] | null | null | null |
from collections import Counter
| 22.259259
| 75
| 0.425957
|
f5cb0863a83b32aad95be43c48206bffad748391
| 33
|
py
|
Python
|
test/__init__.py
|
rbn920/robosync
|
1d430f64f6c7156920f92546770a1d2ddb558fea
|
[
"MIT"
] | null | null | null |
test/__init__.py
|
rbn920/robosync
|
1d430f64f6c7156920f92546770a1d2ddb558fea
|
[
"MIT"
] | null | null | null |
test/__init__.py
|
rbn920/robosync
|
1d430f64f6c7156920f92546770a1d2ddb558fea
|
[
"MIT"
] | null | null | null |
'''Test package for robosync'''
| 16.5
| 32
| 0.666667
|
f5cc6aee2d43d9f8f6fc9d61aea78cd19c169feb
| 4,921
|
py
|
Python
|
tadpole/template/app/lib/auth.py
|
echoyuanliang/pine
|
22175e6aea0ca9b02d6542677b27a690c1501c9c
|
[
"MIT"
] | 2
|
2017-12-02T07:02:31.000Z
|
2020-10-13T02:20:18.000Z
|
tadpole/template/app/lib/auth.py
|
echoyuanliang/pine
|
22175e6aea0ca9b02d6542677b27a690c1501c9c
|
[
"MIT"
] | null | null | null |
tadpole/template/app/lib/auth.py
|
echoyuanliang/pine
|
22175e6aea0ca9b02d6542677b27a690c1501c9c
|
[
"MIT"
] | 1
|
2018-04-23T04:59:38.000Z
|
2018-04-23T04:59:38.000Z
|
#!/usr/bin/env python
# coding: utf-8
"""
create at 2017/11/22 by allen
"""
import re
from flask import request, session, current_app
from app.lib.constant import ResourceType
from app.models.auth import Resource, role_resource, Role, user_role, User
from app.lib.exceptions import AuthError, PermissionError
_auth_db_loader = AuthDbLoader()
_http_basic_auth = HttpBasicAuth(user_loader=_auth_db_loader.load_user)
| 30.190184
| 79
| 0.65251
|
f5d03f80ba9950414b41050d76a8ec9d43425ee6
| 656
|
py
|
Python
|
src/easy/plus_one_66.py
|
ahmet9cengiz/leetCode
|
9e9a61f059072d7791dd19706b7a3e0d0a446669
|
[
"MIT"
] | null | null | null |
src/easy/plus_one_66.py
|
ahmet9cengiz/leetCode
|
9e9a61f059072d7791dd19706b7a3e0d0a446669
|
[
"MIT"
] | null | null | null |
src/easy/plus_one_66.py
|
ahmet9cengiz/leetCode
|
9e9a61f059072d7791dd19706b7a3e0d0a446669
|
[
"MIT"
] | null | null | null |
if __name__ == '__main__':
s = Solution()
print(s.plus_one([9,9,9]))
| 24.296296
| 69
| 0.464939
|
f5d07d12c4b5747b9b1b9f630c617df1ba338e16
| 1,607
|
py
|
Python
|
timetracker/vms/test/models/test_client_admin_invite_model.py
|
comp523-jarvis/timetracker-web
|
af638f0b3aab8a69a974bdb9a18118198488657c
|
[
"Apache-2.0"
] | 1
|
2019-04-09T16:46:53.000Z
|
2019-04-09T16:46:53.000Z
|
timetracker/vms/test/models/test_client_admin_invite_model.py
|
comp523-jarvis/timetracker-web
|
af638f0b3aab8a69a974bdb9a18118198488657c
|
[
"Apache-2.0"
] | 105
|
2018-10-12T17:57:20.000Z
|
2020-06-05T19:35:21.000Z
|
timetracker/vms/test/models/test_client_admin_invite_model.py
|
comp523-jarvis/timetracker-web
|
af638f0b3aab8a69a974bdb9a18118198488657c
|
[
"Apache-2.0"
] | 1
|
2019-04-11T14:43:42.000Z
|
2019-04-11T14:43:42.000Z
|
from django.conf import settings
from django.template.loader import render_to_string
from vms import models
def test_accept(client_admin_invite_factory, user_factory):
"""
Accepting the invitation should create a new client admin for the
user who accepts.
"""
invite = client_admin_invite_factory()
user = user_factory()
admin = invite.accept(user)
assert admin.client == invite.client
assert models.ClientAdminInvite.objects.count() == 0
def test_send(client_admin_invite_factory, request_factory, mailoutbox):
"""
Sending the invitation should send an email to the email address
attached to the invite.
"""
request = request_factory.get('/')
invite = client_admin_invite_factory()
invite.send(request)
context = {
'accept_url': f'{request.get_host()}{invite.accept_url}',
'client': invite.client,
}
expected_msg = render_to_string(
'vms/emails/client-admin-invite.txt',
context=context,
)
assert len(mailoutbox) == 1
msg = mailoutbox[0]
assert msg.body == expected_msg
assert msg.from_email == settings.DEFAULT_FROM_EMAIL
assert msg.subject == 'Client Administrator Invitation'
assert msg.to == [invite.email]
def test_string_conversion(client_admin_invite_factory):
"""
Converting an invite to a string should return a string containing
the email it was sent to and the linked client.
"""
invite = client_admin_invite_factory()
expected = f'Admin invite for {invite.email} from {invite.client}'
assert str(invite) == expected
| 27.706897
| 72
| 0.701929
|
f5d0bd552a2206b2e1b134ade80b6b88f2ce3b53
| 3,489
|
py
|
Python
|
_from_pydot/lambdas/dev/pyppeteer.py
|
owasp-sbot/pbx-gs-python-utils
|
f448aa36c4448fc04d30c3a5b25640ea4d44a267
|
[
"Apache-2.0"
] | 3
|
2018-12-14T15:43:46.000Z
|
2019-04-25T07:44:58.000Z
|
_from_pydot/lambdas/dev/pyppeteer.py
|
owasp-sbot/pbx-gs-python-utils
|
f448aa36c4448fc04d30c3a5b25640ea4d44a267
|
[
"Apache-2.0"
] | 1
|
2019-05-11T14:19:37.000Z
|
2019-05-11T14:51:04.000Z
|
_from_pydot/lambdas/dev/pyppeteer.py
|
owasp-sbot/pbx-gs-python-utils
|
f448aa36c4448fc04d30c3a5b25640ea4d44a267
|
[
"Apache-2.0"
] | 4
|
2018-12-27T04:54:14.000Z
|
2019-05-11T14:07:47.000Z
|
import base64
import os
import asyncio
from pbx_gs_python_utils.utils.Process import Process
from osbot_aws.Dependencies import load_dependency
| 67.096154
| 162
| 0.509888
|
f5d23a181d6fd76675487606efe26f43a22cb25e
| 2,757
|
py
|
Python
|
filter_plugins/net_textfsm_parse.py
|
iamroddo/ansible_helpers
|
420b9d7a1bb637f52209aeeea4cd424d03cf4eef
|
[
"Apache-2.0"
] | 44
|
2017-05-19T19:55:39.000Z
|
2022-02-08T17:21:22.000Z
|
filter_plugins/net_textfsm_parse.py
|
iamroddo/ansible_helpers
|
420b9d7a1bb637f52209aeeea4cd424d03cf4eef
|
[
"Apache-2.0"
] | 2
|
2017-07-17T14:28:23.000Z
|
2020-12-11T15:54:00.000Z
|
filter_plugins/net_textfsm_parse.py
|
iamroddo/ansible_helpers
|
420b9d7a1bb637f52209aeeea4cd424d03cf4eef
|
[
"Apache-2.0"
] | 18
|
2017-07-27T07:58:34.000Z
|
2021-06-06T04:06:33.000Z
|
"""
Filter to convert results from network device show commands obtained from ios_command,
eos_command, et cetera to structured data using TextFSM templates.
"""
from __future__ import unicode_literals
from __future__ import print_function
import os
from textfsm.clitable import CliTableError
import textfsm.clitable as clitable
def get_template_dir():
"""Find and return the ntc-templates/templates dir."""
try:
template_dir = os.environ['NET_TEXTFSM']
index = os.path.join(template_dir, 'index')
if not os.path.isfile(index):
# Assume only base ./ntc-templates specified
template_dir = os.path.join(template_dir, 'templates')
except KeyError:
# Construct path ~/ntc-templates/templates
home_dir = os.path.expanduser("~")
template_dir = os.path.join(home_dir, 'ntc-templates', 'templates')
index = os.path.join(template_dir, 'index')
if not os.path.isdir(template_dir) or not os.path.isfile(index):
msg = """
Valid ntc-templates not found, please install https://github.com/networktocode/ntc-templates
and then set the NET_TEXTFSM environment variable to point to the ./ntc-templates/templates
directory."""
raise ValueError(msg)
return template_dir
def get_structured_data(raw_output, platform, command):
"""Convert raw CLI output to structured data using TextFSM template."""
template_dir = get_template_dir()
index_file = os.path.join(template_dir, 'index')
textfsm_obj = clitable.CliTable(index_file, template_dir)
attrs = {'Command': command, 'Platform': platform}
try:
# Parse output through template
textfsm_obj.ParseCmd(raw_output, attrs)
return clitable_to_dict(textfsm_obj)
except CliTableError:
return raw_output
def clitable_to_dict(cli_table):
"""Converts TextFSM cli_table object to list of dictionaries."""
objs = []
for row in cli_table:
temp_dict = {}
for index, element in enumerate(row):
temp_dict[cli_table.header[index].lower()] = element
objs.append(temp_dict)
return objs
def net_textfsm_parse(output, platform, command):
"""Process config find interfaces using ip helper."""
try:
output = output['stdout'][0]
except (KeyError, IndexError, TypeError):
pass
return get_structured_data(output, platform, command)
if __name__ == "__main__":
# Test code
pass
| 32.821429
| 93
| 0.696772
|
f5d2d84344ef95aeed5c0f078a4e133508f0ccd9
| 5,705
|
py
|
Python
|
firebaseClient/firebaseClientGPIO.py
|
tabris2015/personCounter
|
0cd7f8698afefdd9e913a97820b9ff9c01752274
|
[
"MIT"
] | null | null | null |
firebaseClient/firebaseClientGPIO.py
|
tabris2015/personCounter
|
0cd7f8698afefdd9e913a97820b9ff9c01752274
|
[
"MIT"
] | null | null | null |
firebaseClient/firebaseClientGPIO.py
|
tabris2015/personCounter
|
0cd7f8698afefdd9e913a97820b9ff9c01752274
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import threading
import Queue
import serial
import time
from datetime import datetime
from firebase import firebase
import sqlite3
from datetime import datetime, timedelta
from gpiozero import Button, LED
#///////////////////////////////////////////
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
#/////////////////////////////////////////////////
missed_events = []
DB_INTERVAL = 180
##### pin definitions
FAULT = LED(5)
FALLA = False
IN1 = 13
OUT1 = 6
IN2 = 26
OUT2 = 19
in1_button = Button(IN1, pull_up=False)
out1_button = Button(OUT1, pull_up=False)
in2_button = Button(IN2, pull_up=False)
out2_button = Button(OUT2, pull_up=False)
eventQueue = Queue.Queue()
####
connected = False
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='contador de personas')
parser.add_argument('-key', required=True, action='store',help='path to key for remote connection')
args = parser.parse_args()
keyPath = ""
if args.key != None:
keyPath = args.key
#first_event = False
dbTh = threading.Thread(target=periodicDBInsert, args=(keyPath,))
#dbTh = threading.Timer(5, periodicDBInsert, args=(db,))
dbTh.daemon = True
# -----
dbTh.start()
###
#firebase = firebase.FirebaseApplication(URL, authentication=authentication)
in1_button.when_pressed = in1Event
out1_button.when_pressed = out1Event
in2_button.when_pressed = in2Event
out2_button.when_pressed = out2Event
while True:
if not FALLA:
FAULT.on()
time.sleep(0.1)
FAULT.off()
time.sleep(0.9)
else:
FAULT.on()
time.sleep(1)
FAULT.on()
FAULT.on()
| 26.784038
| 103
| 0.540053
|
f5d40b58d32d09631a74deab03cacd263794a4ed
| 3,204
|
py
|
Python
|
look-for.py
|
barnesrobert/find-aws-resource-in-all-accounts
|
5f02aacca3ce3a28894d7d497c4158ed9b08c238
|
[
"Apache-2.0"
] | null | null | null |
look-for.py
|
barnesrobert/find-aws-resource-in-all-accounts
|
5f02aacca3ce3a28894d7d497c4158ed9b08c238
|
[
"Apache-2.0"
] | null | null | null |
look-for.py
|
barnesrobert/find-aws-resource-in-all-accounts
|
5f02aacca3ce3a28894d7d497c4158ed9b08c238
|
[
"Apache-2.0"
] | null | null | null |
#--------------------------------------------------------------------------------------------------
# Function: look-for
# Purpose: Loops through all AWS accounts and regions within an Organization to find a specific resource
# Inputs:
#
# {
# "view_only": "true|false",
# "regions": ["us-east-1", ...]
# }
#
# Leave the regions sections blank to apply to all regions
#
#--------------------------------------------------------------------------------------------------
import json
import boto3
import botocore
from botocore.exceptions import ClientError
from botocore.exceptions import EndpointConnectionError
sts_client = boto3.client('sts')
organizations_client = boto3.client('organizations')
#--------------------------------------------------------------------------------------------------
# Function handler
#--------------------------------------------------------------------------------------------------
#--------------------------------------------------
# function: loop_through_account
#--------------------------------------------------
| 32.693878
| 105
| 0.542447
|
f5d6cff69b0e62527106143d8be0c05d4bcd4fe7
| 2,972
|
py
|
Python
|
opennem/spiders/aemo/monitoring.py
|
paulculmsee/opennem
|
9ebe4ab6d3b97bdeebc352e075bbd5c22a8ddea1
|
[
"MIT"
] | 22
|
2020-06-30T05:27:21.000Z
|
2022-02-21T12:13:51.000Z
|
opennem/spiders/aemo/monitoring.py
|
paulculmsee/opennem
|
9ebe4ab6d3b97bdeebc352e075bbd5c22a8ddea1
|
[
"MIT"
] | 71
|
2020-08-07T13:06:30.000Z
|
2022-03-15T06:44:49.000Z
|
opennem/spiders/aemo/monitoring.py
|
paulculmsee/opennem
|
9ebe4ab6d3b97bdeebc352e075bbd5c22a8ddea1
|
[
"MIT"
] | 13
|
2020-06-30T03:28:32.000Z
|
2021-12-30T08:17:16.000Z
|
import logging
from typing import Any, Dict
from pydantic import ValidationError
from scrapy import Spider
from scrapy.http import Response
from opennem.pipelines.aemo.downloads import DownloadMonitorPipeline
from opennem.schema.aemo.downloads import AEMOFileDownloadSection
from opennem.utils.dates import parse_date
from opennem.utils.numbers import filesize_from_string
from opennem.utils.url import strip_query_string
| 37.620253
| 175
| 0.657133
|
f5d87e21f9ec6f8ae018914ba1e9c0e382bc83dd
| 319
|
py
|
Python
|
python/13/servo.py
|
matsujirushi/raspi_parts_kouryaku
|
35cd6f34d21c5e3160636671175fa8d5aff2d4dc
|
[
"Apache-2.0"
] | 6
|
2022-03-05T02:36:57.000Z
|
2022-03-12T12:31:27.000Z
|
python/13/servo.py
|
matsujirushi/raspi_parts_kouryaku
|
35cd6f34d21c5e3160636671175fa8d5aff2d4dc
|
[
"Apache-2.0"
] | null | null | null |
python/13/servo.py
|
matsujirushi/raspi_parts_kouryaku
|
35cd6f34d21c5e3160636671175fa8d5aff2d4dc
|
[
"Apache-2.0"
] | null | null | null |
import wiringpi as pi
pi.wiringPiSetupGpio()
pi.pinMode(18, pi.PWM_OUTPUT)
pi.pwmSetMode(pi.PWM_MODE_MS)
pi.pwmSetClock(2)
pi.pwmSetRange(192000)
while True:
for i in list(range(-90, 90, 10)) + list(range(90, -90, -10)):
pi.pwmWrite(18, int(((i + 90) / 180 * (2.4 - 0.5) + 0.5) / 20 * 192000))
pi.delay(200)
| 26.583333
| 76
| 0.652038
|
f5d9d9ea4f3e787d1de8f24aa36d4dcbede900ec
| 2,549
|
py
|
Python
|
src/vswarm/object_detection/blob_detector.py
|
Faust-Wang/vswarm
|
d18ce643218c18ef1e762f40562104b2a0926ad7
|
[
"MIT"
] | 21
|
2021-03-03T10:51:46.000Z
|
2022-03-28T11:00:35.000Z
|
src/vswarm/object_detection/blob_detector.py
|
Faust-Wang/vswarm
|
d18ce643218c18ef1e762f40562104b2a0926ad7
|
[
"MIT"
] | 2
|
2021-07-21T07:57:16.000Z
|
2022-03-17T12:41:51.000Z
|
src/vswarm/object_detection/blob_detector.py
|
hvourtsis/vswarm
|
d18ce643218c18ef1e762f40562104b2a0926ad7
|
[
"MIT"
] | 8
|
2021-02-27T14:29:55.000Z
|
2022-01-05T19:40:38.000Z
|
import cv2 as cv
from geometry_msgs.msg import Pose2D
from vision_msgs.msg import (BoundingBox2D, Detection2D, Detection2DArray,
ObjectHypothesisWithPose)
THRESHOLD_MAX = 255
THRESHOLD = 240
| 32.265823
| 85
| 0.59592
|
f5dc231bdf053f390dc67dc11fbefb6147ad20d2
| 188
|
py
|
Python
|
setup.py
|
wicrep/triplet-reid
|
251c24d828e223de75b45ae65aa3f38171f9676b
|
[
"MIT"
] | null | null | null |
setup.py
|
wicrep/triplet-reid
|
251c24d828e223de75b45ae65aa3f38171f9676b
|
[
"MIT"
] | null | null | null |
setup.py
|
wicrep/triplet-reid
|
251c24d828e223de75b45ae65aa3f38171f9676b
|
[
"MIT"
] | null | null | null |
from setuptools import find_packages, setup
setup(
name="triplet-reid",
version="0.1.0",
description="Triplet-based Person Re-Identification",
packages=find_packages(),
)
| 20.888889
| 57
| 0.712766
|
f5dc6d973bebdd28a311046ec3c5d189663906f8
| 530
|
py
|
Python
|
sentences.py
|
vanatteveldt/perspectives
|
6d537082b915ccde15031d94983bd2d575cdc380
|
[
"MIT"
] | null | null | null |
sentences.py
|
vanatteveldt/perspectives
|
6d537082b915ccde15031d94983bd2d575cdc380
|
[
"MIT"
] | null | null | null |
sentences.py
|
vanatteveldt/perspectives
|
6d537082b915ccde15031d94983bd2d575cdc380
|
[
"MIT"
] | null | null | null |
import csv
import sys
from KafNafParserPy import KafNafParser
from naflib import *
woorden = [r['original'] for r in csv.DictReader(open("klimaatwoorden.csv"))]
o = csv.writer(sys.stdout)
o.writerow(["file", "sentence", "term", "text"])
for fn in sys.argv[1:]:
naf = KafNafParser(fn)
for klimaterm in find_terms(naf, woorden):
sent = get_sentence(naf, klimaterm)
text = " ".join([get_word(naf, t) for t in get_terms_in_sentence(naf, sent)])
o.writerow([fn, sent, klimaterm.get_lemma(), text])
| 27.894737
| 85
| 0.677358
|
f5dd11fe9a9263410d61440cc6794ca854255416
| 1,127
|
py
|
Python
|
view/user/__init__.py
|
archever/flask-web
|
cd120f64deec31fd1a87285372abaa22fc379b9f
|
[
"MIT"
] | null | null | null |
view/user/__init__.py
|
archever/flask-web
|
cd120f64deec31fd1a87285372abaa22fc379b9f
|
[
"MIT"
] | null | null | null |
view/user/__init__.py
|
archever/flask-web
|
cd120f64deec31fd1a87285372abaa22fc379b9f
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from flask import Blueprint, render_template, redirect
from controlers.user import UserCtr
from libs.login import login_user, logout_user, current_user
bp = Blueprint("user", __name__, url_prefix="/user")
| 23.978723
| 60
| 0.668146
|
f5deb3f2744fe175063b1c389f169973e74ce044
| 9,607
|
py
|
Python
|
recipes/Python/52275_sparse_dictionary_based_sparse_matrix/recipe-52275.py
|
tdiprima/code
|
61a74f5f93da087d27c70b2efe779ac6bd2a3b4f
|
[
"MIT"
] | 2,023
|
2017-07-29T09:34:46.000Z
|
2022-03-24T08:00:45.000Z
|
recipes/Python/52275_sparse_dictionary_based_sparse_matrix/recipe-52275.py
|
unhacker/code
|
73b09edc1b9850c557a79296655f140ce5e853db
|
[
"MIT"
] | 32
|
2017-09-02T17:20:08.000Z
|
2022-02-11T17:49:37.000Z
|
recipes/Python/52275_sparse_dictionary_based_sparse_matrix/recipe-52275.py
|
unhacker/code
|
73b09edc1b9850c557a79296655f140ce5e853db
|
[
"MIT"
] | 780
|
2017-07-28T19:23:28.000Z
|
2022-03-25T20:39:41.000Z
|
#!/usr/bin/env python
import vector
import math, types, operator
"""
A sparse matrix class based on a dictionary, supporting matrix (dot)
product and a conjugate gradient solver.
In this version, the sparse class inherits from the dictionary; this
requires Python 2.2 or later.
"""
###############################################################################
def isSparse(x):
return hasattr(x,'__class__') and x.__class__ is sparse
def transp(a):
" transpose "
new = sparse({})
for ij in a:
new[(ij[1], ij[0])] = a[ij]
return new
def dotDot(y,a,x):
" double dot product y^+ *A*x "
if vector.isVector(y) and isSparse(a) and vector.isVector(x):
res = 0.
for ij in a.keys():
i,j = ij
res += y[i]*a[ij]*x[j]
return res
else:
print 'sparse::Error: dotDot takes vector, sparse , vector as args'
def dot(a, b):
" vector-matrix, matrix-vector or matrix-matrix product "
if isSparse(a) and vector.isVector(b):
new = vector.zeros(a.size()[0])
for ij in a.keys():
new[ij[0]] += a[ij]* b[ij[1]]
return new
elif vector.isVector(a) and isSparse(b):
new = vector.zeros(b.size()[1])
for ij in b.keys():
new[ij[1]] += a[ij[0]]* b[ij]
return new
elif isSparse(a) and isSparse(b):
if a.size()[1] != b.size()[0]:
print '**Warning shapes do not match in dot(sparse, sparse)'
new = sparse({})
n = min([a.size()[1], b.size()[0]])
for i in range(a.size()[0]):
for j in range(b.size()[1]):
sum = 0.
for k in range(n):
sum += a.get((i,k),0.)*b.get((k,j),0.)
if sum != 0.:
new[(i,j)] = sum
return new
else:
raise TypeError, 'in dot'
def diag(b):
# given a sparse matrix b return its diagonal
res = vector.zeros(b.size()[0])
for i in range(b.size()[0]):
res[i] = b.get((i,i), 0.)
return res
def identity(n):
if type(n) != types.IntType:
raise TypeError, ' in identity: # must be integer'
else:
new = sparse({})
for i in range(n):
new[(i,i)] = 1+0.
return new
###############################################################################
if __name__ == "__main__":
print 'a = sparse()'
a = sparse()
print 'a.__doc__=',a.__doc__
print 'a[(0,0)] = 1.0'
a[(0,0)] = 1.0
a.out()
print 'a[(2,3)] = 3.0'
a[(2,3)] = 3.0
a.out()
print 'len(a)=',len(a)
print 'a.size()=', a.size()
b = sparse({(0,0):2.0, (0,1):1.0, (1,0):1.0, (1,1):2.0, (1,2):1.0, (2,1):1.0, (2,2):2.0})
print 'a=', a
print 'b=', b
b.out()
print 'a+b'
c = a + b
c.out()
print '-a'
c = -a
c.out()
a.out()
print 'a-b'
c = a - b
c.out()
print 'a*1.2'
c = a*1.2
c.out()
print '1.2*a'
c = 1.2*a
c.out()
print 'a=', a
print 'dot(a, b)'
print 'a.size()[1]=',a.size()[1],' b.size()[0]=', b.size()[0]
c = dot(a, b)
c.out()
print 'dot(b, a)'
print 'b.size()[1]=',b.size()[1],' a.size()[0]=', a.size()[0]
c = dot(b, a)
c.out()
try:
print 'dot(b, vector.vector([1,2,3]))'
c = dot(b, vector.vector([1,2,3]))
c.out()
print 'dot(vector.vector([1,2,3]), b)'
c = dot(vector.vector([1,2,3]), b)
c.out()
print 'b.size()=', b.size()
except: pass
print 'a*b -> element by element product'
c = a*b
c.out()
print 'b*a -> element by element product'
c = b*a
c.out()
print 'a/1.2'
c = a/1.2
c.out()
print 'c = identity(4)'
c = identity(4)
c.out()
print 'c = transp(a)'
c = transp(a)
c.out()
b[(2,2)]=-10.0
b[(2,0)]=+10.0
try:
import vector
print 'Check conjugate gradient solver'
s = vector.vector([1, 0, 0])
print 's'
s.out()
x0 = s
print 'x = b.biCGsolve(x0, s, 1.0e-10, len(b)+1)'
x = b.biCGsolve(x0, s, 1.0e-10, len(b)+1)
x.out()
print 'check validity of CG'
c = dot(b, x) - s
c.out()
except: pass
print 'plot b matrix'
b.out()
b.plot()
print 'del b[(2,2)]'
del b[(2,2)]
print 'del a'
del a
#a.out()
| 22.819477
| 90
| 0.565525
|
f5dedc85895871ad1a7086cfc4fa5d80500516b2
| 7,557
|
py
|
Python
|
bibref_parser/parser.py
|
glooney/python-bibref-parser
|
9ca6b99a917659425fe7b4759f523c78f0180124
|
[
"MIT"
] | null | null | null |
bibref_parser/parser.py
|
glooney/python-bibref-parser
|
9ca6b99a917659425fe7b4759f523c78f0180124
|
[
"MIT"
] | null | null | null |
bibref_parser/parser.py
|
glooney/python-bibref-parser
|
9ca6b99a917659425fe7b4759f523c78f0180124
|
[
"MIT"
] | null | null | null |
import re
| 34.040541
| 93
| 0.382162
|
f5e083f241a88c8c9d72629bf0fc59c5c51dd648
| 392
|
py
|
Python
|
FlaskApp/sql_connection.py
|
pjneelam/pjneelam.eportfolio2022
|
3f55c1da6214e3eabab949ff83b34c0553c52866
|
[
"CC-BY-3.0"
] | null | null | null |
FlaskApp/sql_connection.py
|
pjneelam/pjneelam.eportfolio2022
|
3f55c1da6214e3eabab949ff83b34c0553c52866
|
[
"CC-BY-3.0"
] | null | null | null |
FlaskApp/sql_connection.py
|
pjneelam/pjneelam.eportfolio2022
|
3f55c1da6214e3eabab949ff83b34c0553c52866
|
[
"CC-BY-3.0"
] | null | null | null |
#https://www.youtube.com/watch?v=f9PR1qcwOyg
#create global convention
import mysql.connector
__cnx=None
| 30.153846
| 74
| 0.584184
|
f5e2b3958e10bba2c1126d9063cd6d9ca99a6bc2
| 1,217
|
py
|
Python
|
kernellib/utils/visualization.py
|
jejjohnson/kernellib
|
eb9f80c1b605c8a6b5e8a324efd4ef07d8f59050
|
[
"MIT"
] | 1
|
2021-02-04T08:52:04.000Z
|
2021-02-04T08:52:04.000Z
|
kernellib/utils/visualization.py
|
jejjohnson/kernellib
|
eb9f80c1b605c8a6b5e8a324efd4ef07d8f59050
|
[
"MIT"
] | null | null | null |
kernellib/utils/visualization.py
|
jejjohnson/kernellib
|
eb9f80c1b605c8a6b5e8a324efd4ef07d8f59050
|
[
"MIT"
] | 1
|
2018-04-17T06:42:09.000Z
|
2018-04-17T06:42:09.000Z
|
import matplotlib.pyplot as plt
| 25.354167
| 97
| 0.612161
|
f5e3743f51af18cff1772397d3d93a0c7e89bca0
| 2,780
|
py
|
Python
|
edit/editseries.py
|
lokal-profil/isfdb_site
|
0ce20d6347849926d4eda961ea9249c31519eea5
|
[
"BSD-3-Clause"
] | null | null | null |
edit/editseries.py
|
lokal-profil/isfdb_site
|
0ce20d6347849926d4eda961ea9249c31519eea5
|
[
"BSD-3-Clause"
] | null | null | null |
edit/editseries.py
|
lokal-profil/isfdb_site
|
0ce20d6347849926d4eda961ea9249c31519eea5
|
[
"BSD-3-Clause"
] | null | null | null |
#!_PYTHONLOC
#
# (C) COPYRIGHT 2004-2021 Al von Ruff, Bill Longley and Ahasuerus
# ALL RIGHTS RESERVED
#
# The copyright notice above does not evidence any actual or
# intended publication of such source code.
#
# Version: $Revision$
# Date: $Date$
from isfdb import *
from isfdblib import *
from isfdblib_help import *
from isfdblib_print import *
from library import *
from SQLparsing import *
if __name__ == '__main__':
series_number = SESSION.Parameter(0, 'int')
series = SQLget1Series(series_number)
if not series:
SESSION.DisplayError('Record Does Not Exist')
PrintPreSearch('Series Editor')
PrintNavBar('edit/editseries.cgi', series_number)
help = HelpSeries()
printHelpBox('series', 'SeriesData')
print "Note:"
print "<ul>"
print "<li>Changing the Name field changes the name of the series for all books currently in this series."
print "<li>Changing the Parent field does NOT change the name of the parent series."
print "<li>If the Parent exists, changing the Parent field relinks the Named series to that parent."
print "<li>If the Parent does not exist, a new Parent series will be created and the Named series will be linked to that parent."
print "</ul>"
print "<hr>"
print "<p>"
print '<form id="data" METHOD="POST" ACTION="/cgi-bin/edit/submitseries.cgi">'
print '<table border="0">'
print '<tbody id="tagBody">'
# Display the series name
printfield("Name", "series_name", help, series[SERIES_NAME])
trans_series_names = SQLloadTransSeriesNames(series[SERIES_PUBID])
printmultiple(trans_series_names, "Transliterated Name", "trans_series_names", help)
# Display the name of this series' parent (if one exists)
parent_series_name = ''
if series[SERIES_PARENT]:
parent_series = SQLget1Series(series[SERIES_PARENT])
parent_series_name = parent_series[SERIES_NAME]
printfield("Parent", "series_parent", help, parent_series_name)
# Display this series' ordering position within its superseries
printfield("Series Parent Position", "series_parentposition", help, series[SERIES_PARENT_POSITION])
webpages = SQLloadSeriesWebpages(series[SERIES_PUBID])
printWebPages(webpages, 'series', help)
printtextarea('Note', 'series_note', help, SQLgetNotes(series[SERIES_NOTE]))
printtextarea('Note to Moderator', 'mod_note', help, '')
print '</tbody>'
print '</table>'
print '<p>'
print '<hr>'
print '<p>'
print '<input NAME="series_id" VALUE="%d" TYPE="HIDDEN">' % series_number
print '<input TYPE="SUBMIT" VALUE="Submit Data" tabindex="1">'
print '</form>'
print '<p>'
print '<hr>'
PrintPostSearch(0, 0, 0, 0, 0, 0)
| 32.325581
| 130
| 0.685612
|
f5e3d0985186fbf72ce1898f6d250fd384de7e07
| 2,154
|
py
|
Python
|
sound.py
|
ITNano/soundserver
|
b84cbfd821987ad8af72a6c2677caa0b949abff6
|
[
"MIT"
] | null | null | null |
sound.py
|
ITNano/soundserver
|
b84cbfd821987ad8af72a6c2677caa0b949abff6
|
[
"MIT"
] | null | null | null |
sound.py
|
ITNano/soundserver
|
b84cbfd821987ad8af72a6c2677caa0b949abff6
|
[
"MIT"
] | null | null | null |
import pyaudio
import numpy as np
import mixer
| 32.149254
| 176
| 0.596565
|
f5e5cd56b7a8f566083c50626d4a1f1f2165bd63
| 2,284
|
py
|
Python
|
noxutils.py
|
sphinx-contrib/zopeext
|
b749d0023f4fb8b8eea3a8f3216f63397c6272de
|
[
"BSD-2-Clause"
] | 1
|
2020-03-16T07:20:58.000Z
|
2020-03-16T07:20:58.000Z
|
noxutils.py
|
sphinx-contrib/zopeext
|
b749d0023f4fb8b8eea3a8f3216f63397c6272de
|
[
"BSD-2-Clause"
] | 3
|
2021-12-19T09:39:45.000Z
|
2022-01-06T05:05:03.000Z
|
noxutils.py
|
sphinx-contrib/zopeext
|
b749d0023f4fb8b8eea3a8f3216f63397c6272de
|
[
"BSD-2-Clause"
] | null | null | null |
"""
From https://github.com/brechtm/rinohtype/blob/master/noxutil.py
https://github.com/cjolowicz/nox-poetry/discussions/289
"""
import json
from collections.abc import Iterable
from pathlib import Path
from typing import Optional
from urllib.request import urlopen, Request
from poetry.core.factory import Factory
from poetry.core.semver import parse_single_constraint as parse_version
VERSION_PARTS = ("major", "minor", "patch")
def get_versions(
dependency: str,
granularity: str = "minor",
# ascending: bool = False, limit: Optional[int] = None,
# allow_prerelease: bool = False,
) -> Iterable[str]:
"""Yield all versions of `dependency` considering version constraints
Args:
dependency: the name of the dependency
granularity: yield only the newest patch version of each major/minor
release
ascending: count backwards from latest version, by default (not much
use without the 'limit' arg)
limit: maximum number of entries to return
allow_prerelease: whether to include pre-release versions
Yields:
All versions of `dependency` that match the version constraints defined
and in this project's pyproject.toml and the given `granularity`.
"""
package = Factory().create_poetry(Path(__file__).parent).package
for requirement in package.requires:
if requirement.name == dependency:
break
else:
raise ValueError(f"{package.name} has no dependency '{dependency}'")
filtered_versions = [
version
for version in all_versions(dependency)
if requirement.constraint.allows(version)
]
parts = VERSION_PARTS[: VERSION_PARTS.index(granularity) + 1]
result = {}
for version in filtered_versions:
key = tuple(getattr(version, part) for part in parts)
result[key] = max((result[key], version)) if key in result else version
return [str(version) for version in result.values()]
| 35.138462
| 79
| 0.700088
|
f5e6032fc8e0c3163e2cd3542bdd970f3cb1268b
| 423
|
py
|
Python
|
tbutton_maker/admin.py
|
codefisher/tbutton_web
|
357bddc26b42c8511e7b5ce087bb0ac115f97e4c
|
[
"MIT"
] | null | null | null |
tbutton_maker/admin.py
|
codefisher/tbutton_web
|
357bddc26b42c8511e7b5ce087bb0ac115f97e4c
|
[
"MIT"
] | null | null | null |
tbutton_maker/admin.py
|
codefisher/tbutton_web
|
357bddc26b42c8511e7b5ce087bb0ac115f97e4c
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from tbutton_web.tbutton_maker.models import Application, Button, DownloadSession, UpdateSession
admin.site.register(DownloadSession, DownloadSessionAdmin)
admin.site.register(UpdateSession, UpdateSessionAdmin)
| 42.3
| 96
| 0.820331
|
f5e6080e840c71c64f246a6744ac59598bb42ed0
| 1,359
|
py
|
Python
|
abi_recursion.py
|
Abirami33/python-75-hackathon
|
c15505615d92cf304c27eabd3136406b08c59078
|
[
"MIT"
] | null | null | null |
abi_recursion.py
|
Abirami33/python-75-hackathon
|
c15505615d92cf304c27eabd3136406b08c59078
|
[
"MIT"
] | null | null | null |
abi_recursion.py
|
Abirami33/python-75-hackathon
|
c15505615d92cf304c27eabd3136406b08c59078
|
[
"MIT"
] | null | null | null |
#PASCALS TRIANGLE USING RECURSION
if __name__ == "__main__":
print("Enter the number of rows:")
n=int(input()) #getting user input
print(pascal(n)) #call the pascal triangle function
''' OUTPUT:Enter the number of rows:5
[[1], [1, 1], [1, 2, 1], [1, 3, 3, 1], [1, 4, 6, 4, 1]] '''
| 48.535714
| 106
| 0.40103
|
f5e6d7bb0bd30f9540f1c0b749f54516092b6ca3
| 3,806
|
py
|
Python
|
nodes/centered_mocap_and_tag_rebroadcaster.py
|
rislab/apriltag_tracker
|
41c4deb4b5bcd94e5f666f3d4b1f1d141c705582
|
[
"BSD-3-Clause"
] | null | null | null |
nodes/centered_mocap_and_tag_rebroadcaster.py
|
rislab/apriltag_tracker
|
41c4deb4b5bcd94e5f666f3d4b1f1d141c705582
|
[
"BSD-3-Clause"
] | null | null | null |
nodes/centered_mocap_and_tag_rebroadcaster.py
|
rislab/apriltag_tracker
|
41c4deb4b5bcd94e5f666f3d4b1f1d141c705582
|
[
"BSD-3-Clause"
] | 1
|
2019-02-18T00:40:20.000Z
|
2019-02-18T00:40:20.000Z
|
#!/usr/bin/env python2.7
from __future__ import division
import roslib
import rospy
import tf
from nav_msgs.msg import Odometry
from nav_msgs.msg import Path
from geometry_msgs.msg import PoseStamped
import numpy as np
import pdb
from message_filters import Subscriber, ApproximateTimeSynchronizer
if __name__ == '__main__':
rospy.init_node('gt_cleaner', anonymous=True)
cleaner_obj = GT_cleaner()
rospy.spin()
| 37.313725
| 142
| 0.59196
|
f5e74389c152886253bc86c73ff3f6d23bab1e6e
| 3,266
|
py
|
Python
|
garage.py
|
DidymusRex/garage-pi
|
4f4dcc0251f8cb5f5150ddaff7dac01a64eac948
|
[
"CC0-1.0"
] | null | null | null |
garage.py
|
DidymusRex/garage-pi
|
4f4dcc0251f8cb5f5150ddaff7dac01a64eac948
|
[
"CC0-1.0"
] | null | null | null |
garage.py
|
DidymusRex/garage-pi
|
4f4dcc0251f8cb5f5150ddaff7dac01a64eac948
|
[
"CC0-1.0"
] | null | null | null |
from datetime import datetime
from gpiozero import DistanceSensor
from garage_door import garage_door
from garage_camera import garage_camera
import MQTT_Config
import paho.mqtt.client as mqtt
from temp_sensor import temp_sensor
from time import sleep
"""
GPIO pin assignments:
relays
range finder sensor (echo passes thru voltage converter)
DHT11 temperature/huidity sensor
"""
GPIO_Pins = {'temp_1':21,
'relay_1':6,
'relay_2':12,
'trig_1':17,
'echo_1':18,
'trig_2':22,
'echo_2':23}
"""
MQTT connect callback
Subscribing in on_connect() means that if we lose the connection and
reconnect then subscriptions will be renewed.
"""
"""
MQTT receive message callback (garage/command)
Take action on a subject
"""
"""
MQTT publish callback
Mainly for debugging
"""
"""
Just in case
"""
"""
Create client and connect it to the MQTT broker
"""
mqc = mqtt.Client("garage-pi", clean_session=True)
mqc.on_connect = on_connect
mqc.on_message = on_message
mqc.on_publish = on_publish
mqc.username_pw_set(mqtt_account, mqtt_passwd)
mqc.connect(mqtt_broker)
mqc.loop_start()
mqc.publish("garage/foo", "go!")
"""
Create temperature sensor object
"""
dht11 = temp_sensor(mqc, GPIO_Pins['temp_1'])
"""
Create garage camera object
"""
garage_cam = garage_camera(mqc)
"""
Create garage door objects
"""
garage_doors = dict()
garage_doors["left"] = garage_door(mqc,
"left",
GPIO_Pins['relay_1'],
GPIO_Pins['echo_1'],
GPIO_Pins['trig_1'])
garage_doors["right"] = garage_door(mqc,
"right",
GPIO_Pins['relay_2'],
GPIO_Pins['echo_2'],
GPIO_Pins['trig_2'])
if __name__ == "__main__":
main()
| 26.33871
| 72
| 0.580527
|
f5e7ef3d480cf9bb53271fcd48200dc95c179ef9
| 5,887
|
py
|
Python
|
app.py
|
leemengtaiwan/gist-evernote
|
90d8573870ded37dc82575ba25968d7a06efe219
|
[
"MIT"
] | 35
|
2018-01-29T00:50:36.000Z
|
2021-04-04T13:59:26.000Z
|
app.py
|
leemengtaiwan/gist-evernote
|
90d8573870ded37dc82575ba25968d7a06efe219
|
[
"MIT"
] | 5
|
2021-02-08T20:18:24.000Z
|
2022-03-11T23:15:12.000Z
|
app.py
|
leemengtaiwan/gist-evernote
|
90d8573870ded37dc82575ba25968d7a06efe219
|
[
"MIT"
] | 4
|
2018-02-06T12:13:09.000Z
|
2019-12-20T09:12:41.000Z
|
# encoding: utf-8
import os
import time
from multiprocessing import Pool, cpu_count
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException
from enote.util import get_note, get_notebook, get_notebooks, \
create_resource, create_note, create_notebook, update_note
from github.util import get_user_name, get_all_gists
from web.util import fullpage_screenshot, get_gist_hash, create_chrome_driver
from settings import NOTEBOOK_TO_SYNC
from db import get_db
DATE_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
GIST_BASE_URL = 'https://gist.github.com'
notebook = None
github_user = get_user_name() # get current login github user for fetching gist content
db = get_db() # database to store synchronization info
def sync_gist(gist, driver):
"""Sync the Github gist to the corresponding Evernote note.
Create a new Evernote note if there is no corresponding one with the gist.
Overwrite existing note's content if gist has been changed.
Parameters
----------
gist : dict
A Gist acquired by Github GraphQL API with format like:
{
'id': 'gist_id',
'name': 'gist_name',
'description': 'description',
'pushAt': '2018-01-15T00:48:23Z'
}
driver : selenium.webdriver
The web driver used to access gist url
Returns
-------
note : evernote.edam.type.ttpyes.Note
None if no new note created or updated
"""
note_exist = False
gist_url = '/'.join((GIST_BASE_URL, gist['name']))
# check existing gist hash before fetch if available
prev_hash = db.get_hash_by_id(gist['id'])
note_guid = db.get_note_guid_by_id(gist['id'])
if prev_hash and note_guid:
note_exist = True
cur_hash = get_gist_hash(github_user, gist['name'])
if prev_hash == cur_hash:
print('Gist {} remain the same, ignore.'.format(gist_url))
db.update_gist(gist, note_guid, cur_hash)
return None
driver.get(gist_url)
# wait at most x seconds for Github rendering gist context
delay_seconds = 10
try:
WebDriverWait(driver, delay_seconds).until(EC.presence_of_element_located((By.CLASS_NAME, 'is-render-ready')))
except TimeoutException:
print("Take longer than {} seconds to load page.".format(delay_seconds))
# get first file name as default note title
gist_title = driver.find_element(By.CLASS_NAME, 'gist-header-title>a').text
# take screen shot for the gist and save it temporally
image_path = 'images/{}.png'.format(gist['name'])
fullpage_screenshot(driver, image_path)
# build skeleton for note (including screenshot)
resource, _ = create_resource(image_path)
note_title = gist['description'] if gist['description'] else gist_title
note_body = format_note_body(gist)
# get hash of raw gist content and save gist info to database
gist_hash = get_gist_hash(github_user, gist['name'])
# create new note / update existing note
if not note_exist:
note = create_note(note_title, note_body, [resource], parent_notebook=notebook)
db.save_gist(gist, note.guid, gist_hash)
else:
note = get_note(note_guid)
update_note(note, note_title, note_body, note_guid, [resource])
db.update_gist(gist, note_guid, gist_hash)
os.remove(image_path)
print("Finish creating note for gist {}".format(gist_url))
return note
def format_note_body(gist):
"""Create the note content that will be shown before attachments.
Parameters
----------
gist : dict
Dict that contains all information of the gist
Returns
-------
note_body : str
"""
blocks = []
desc = gist['description']
if desc:
blocks.append(desc)
gist_url = '/'.join((GIST_BASE_URL, gist['name']))
blocks.append('<a href="{}">Gist on Github</a>'.format(gist_url))
note_body = '<br/>'.join(blocks)
return note_body
if __name__ == '__main__':
app()
| 31.821622
| 118
| 0.674367
|
f5e7fdab1587e4d6e66ab3defb25c9ecd73fb773
| 20
|
py
|
Python
|
hello-fortran-dependency/hello/__init__.py
|
Nicholaswogan/skbuild-f2py-examples
|
e47d0a9ce483e54b678e31789dbfcc90ff4a8e74
|
[
"MIT"
] | 4
|
2021-07-28T02:16:52.000Z
|
2021-12-23T00:20:21.000Z
|
hello-fortran-dependency/hello/__init__.py
|
Nicholaswogan/skbuild-f2py-examples
|
e47d0a9ce483e54b678e31789dbfcc90ff4a8e74
|
[
"MIT"
] | 1
|
2021-09-14T21:17:49.000Z
|
2021-09-14T23:17:47.000Z
|
hello-fortran-dependency/hello/__init__.py
|
Nicholaswogan/skbuild-f2py-examples
|
e47d0a9ce483e54b678e31789dbfcc90ff4a8e74
|
[
"MIT"
] | null | null | null |
from .hola import *
| 10
| 19
| 0.7
|
f5e81680dbe98070292ce77eaa7479aa8b7e1630
| 326
|
py
|
Python
|
python-leetcode/350.py
|
MDGSF/interviews
|
9faa9aacdb0cfbb777d4d3d4d1b14b55ca2c9f76
|
[
"MIT"
] | 12
|
2020-01-16T08:55:27.000Z
|
2021-12-02T14:52:39.000Z
|
python-leetcode/350.py
|
MDGSF/interviews
|
9faa9aacdb0cfbb777d4d3d4d1b14b55ca2c9f76
|
[
"MIT"
] | null | null | null |
python-leetcode/350.py
|
MDGSF/interviews
|
9faa9aacdb0cfbb777d4d3d4d1b14b55ca2c9f76
|
[
"MIT"
] | 1
|
2019-12-11T12:00:38.000Z
|
2019-12-11T12:00:38.000Z
|
import collections
| 21.733333
| 71
| 0.546012
|
f5e9dfce4e604e5d08d5833b9e96482b6754ad47
| 217
|
py
|
Python
|
finally.py
|
rkjin/algorithm
|
5661dd621a43bcbb37b4113fd0918854e7a24310
|
[
"Apache-2.0"
] | null | null | null |
finally.py
|
rkjin/algorithm
|
5661dd621a43bcbb37b4113fd0918854e7a24310
|
[
"Apache-2.0"
] | null | null | null |
finally.py
|
rkjin/algorithm
|
5661dd621a43bcbb37b4113fd0918854e7a24310
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
if __name__ == '__main__':
try:
f = open('test_file.txt', 'w')
f.write('this is exception finally')
except Exception as e:
pass
finally:
f.close
pass
| 15.5
| 44
| 0.557604
|
f5ea24e7021ff1af76d60fe6869f59dd63386b1e
| 198
|
py
|
Python
|
autokey/data/Emacs/c_g.py
|
Curiosidad-Racional/.config
|
af5a8901510e4b87dff1be024d3d29987c148f3f
|
[
"MIT"
] | 2
|
2021-05-29T18:11:26.000Z
|
2021-10-21T20:53:16.000Z
|
autokey/data/Emacs/c_g.py
|
Curiosidad-Racional/.config
|
af5a8901510e4b87dff1be024d3d29987c148f3f
|
[
"MIT"
] | null | null | null |
autokey/data/Emacs/c_g.py
|
Curiosidad-Racional/.config
|
af5a8901510e4b87dff1be024d3d29987c148f3f
|
[
"MIT"
] | null | null | null |
import os
store.set_global_value("ctrl-space", False)
with open(os.path.expanduser("~/.config/polybar/keys.fifo"), "wb") as f:
f.write(b"TITLE:\n")
store.set_global_value("emacs-chain-keys", [])
| 39.6
| 72
| 0.712121
|
f5eaea013c4c8e9169d5648e9946cf1e2ab0fb60
| 520
|
py
|
Python
|
lupin/fields/__init__.py
|
Clustaar/lupin
|
9ef73642d84a99adb80abf5a922a9422ddae9254
|
[
"MIT"
] | 22
|
2017-10-18T08:27:20.000Z
|
2022-03-25T18:53:43.000Z
|
lupin/fields/__init__.py
|
Clustaar/lupin
|
9ef73642d84a99adb80abf5a922a9422ddae9254
|
[
"MIT"
] | 5
|
2019-09-16T15:31:55.000Z
|
2022-02-10T08:29:14.000Z
|
lupin/fields/__init__.py
|
Clustaar/lupin
|
9ef73642d84a99adb80abf5a922a9422ddae9254
|
[
"MIT"
] | null | null | null |
from .field import Field # NOQA
from .datetime_field import DateTime # NOQA
from .date import Date # NOQA
from .string import String # NOQA
from .object import Object # NOQA
from .list import List # NOQA
from .polymorphic_object import PolymorphicObject # NOQA
from .polymorphic_list import PolymorphicList # NOQA
from .constant import Constant # NOQA
from .int import Int # NOQA
from .float import Float # NOQA
from .number import Number # NOQA
from .bool import Bool # NOQA
from .dict import Dict # NOQA
| 34.666667
| 57
| 0.757692
|
f5edd88e2d458d89d6714005f92ae5a2d900050e
| 564
|
py
|
Python
|
polls/urls.py
|
SkyFlame00/webpolls
|
d137da1aaaa8af78520af7762b8002428842d617
|
[
"MIT"
] | null | null | null |
polls/urls.py
|
SkyFlame00/webpolls
|
d137da1aaaa8af78520af7762b8002428842d617
|
[
"MIT"
] | null | null | null |
polls/urls.py
|
SkyFlame00/webpolls
|
d137da1aaaa8af78520af7762b8002428842d617
|
[
"MIT"
] | null | null | null |
from django.urls import path
from django.conf.urls import url
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('logout/', views.logoutView, name='logout'),
path('signup/', views.signup, name='signup'),
url(r'^activate/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', views.activate, name='activate'),
path('myprofile/', views.myprofile, name='myprofile'),
path('myprofile/edit/', views.myprofile_edit, name='myprofile_edit'),
path('testing', views.testing, name='testing')
]
| 37.6
| 132
| 0.654255
|
f5ee0fc5d74aae0b09b30c0e37603f02a2ea4deb
| 14,918
|
py
|
Python
|
forceDAQ/gui/plotter.py
|
gftabor/pyForceDAQ
|
3eababb41d855b961d228d8366fdd154bb6314ea
|
[
"MIT"
] | null | null | null |
forceDAQ/gui/plotter.py
|
gftabor/pyForceDAQ
|
3eababb41d855b961d228d8366fdd154bb6314ea
|
[
"MIT"
] | null | null | null |
forceDAQ/gui/plotter.py
|
gftabor/pyForceDAQ
|
3eababb41d855b961d228d8366fdd154bb6314ea
|
[
"MIT"
] | null | null | null |
__version__ = "0.2"
import threading
import numpy as np
import pygame
from expyriment.stimuli import Canvas, Rectangle, TextLine
from expyriment.stimuli._visual import Visual
from expyriment.misc import constants
lock_expyriment = threading.Lock()
Numpy_array_type = type(np.array([]))
def data2pixel(self, values):
""" values: numeric or numpy array
pixel_min_max: 2D array"""
return (values - self._zero_shift) * \
(self.pixel_max - self.pixel_min) / self._range # pixel_factor
def trim(self, value):
"""trims value to the range, ie. set to min or max if <min or > max """
if value < self.min:
return self.min
elif value > self.max:
return self.max
return value
class PGSurface(Canvas):
"""PyGame Surface: Expyriment Stimulus for direct Pygame operations and
PixelArrays
In contrast to other Expyriment stimuli the class does not generate temporary
surfaces.
"""
def unlock_pixel_array(self):
"""todo"""
self._px_array = None
def preload(self, inhibit_ogl_compress=False):
self.unlock_pixel_array()
return Canvas.preload(self, inhibit_ogl_compress)
def compress(self):
self.unlock_pixel_array()
return Canvas.compress(self)
def decompress(self):
self.unlock_pixel_array()
return Canvas.decompress(self)
def plot(self, stimulus):
self.unlock_pixel_array()
return Canvas.plot(self, stimulus)
def clear_surface(self):
self.unlock_pixel_array()
return Canvas.clear_surface(self)
def copy(self):
self.unlock_pixel_array()
return Canvas.copy(self)
def unload(self, keep_surface=False):
if not keep_surface:
self.unlock_pixel_array()
return Canvas.unload(self, keep_surface)
def rotate(self, degree):
self.unlock_pixel_array()
return Canvas.rotate(self, degree)
def scale(self, factors):
self.unlock_pixel_array()
return Canvas.scale(self, factors)
# expyriment 0.8.0
# def scale_to_fullscreen(self, keep_aspect_ratio=True):
# self.unlock_pixel_array()
# return Canvas.scale_to_fullscreen(self, keep_aspect_ratio)
class Plotter(PGSurface):
"""Pygame Plotter"""
def clear_area(self):
self.pixel_array[:, :] = self._background_colour
def set_horizontal_line(self, y_values):
"""y_values: array"""
try:
self._horizontal_lines = np.array(y_values, dtype=int)
except:
self._horizontal_lines = None
def write_values(self, position, values, set_marker=False,
set_point_marker=False):
"""
additional points: np.array
"""
if set_marker:
self.pixel_array[position, :] = self.marker_colour
else:
self.pixel_array[position, :] = self._background_colour
if set_point_marker:
self.pixel_array[position, 0:2] = self.marker_colour
if self._horizontal_lines is not None:
for c in (self._y_range[1] - self._horizontal_lines):
self.pixel_array[:, c:c+1] = self.marker_colour
for c, plot_value in enumerate(self._y_range[1] - \
np.array(values, dtype=int)):
if plot_value >= 0 and self._previous[c] >= 0 \
and plot_value <= self._height and \
self._previous[c] <= self._height:
if self._previous[c] > plot_value:
self.pixel_array[position,
plot_value:self._previous[c] + 1] = \
self._data_row_colours[c]
else:
self.pixel_array[position,
self._previous[c]:plot_value + 1] = \
self._data_row_colours[c]
self._previous[c] = plot_value
def add_values(self, values, set_marker=False):
""" high level function of write values with type check and shifting to left
not used by plotter thread
"""
if type(values) is not Numpy_array_type and \
not isinstance(values, tuple) and \
not isinstance(values, list):
values = [values]
if len(values) != self.n_data_rows:
raise RuntimeError('Number of data values does not match the ' +
'defined number of data rows!')
# move plot one pixel to the left
self.pixel_array[:-1, :] = self.pixel_array[1:, :]
self.write_values(position=-1, values=values, set_marker=set_marker)
def level_indicator(value, text, scaling, width=20,
text_size=14, text_gap=20, position=(0,0), thresholds = None,
colour=constants.C_EXPYRIMENT_ORANGE):
"""make an level indicator in for of an Expyriment stimulus
text_gap: gap between indicator and text
scaling: Scaling object
Returns
--------
expyriment.Canvas
"""
value = scaling.trim(value)
# indicator
height = scaling.pixel_max - scaling.pixel_min
indicator = Canvas(size=[width + 2, height + 2],
colour=(30, 30, 30))
zero = scaling.data2pixel(0)
px_bar_height = scaling.data2pixel(value) - zero
bar = Rectangle(size=(width, abs(px_bar_height)),
position=(0, zero + int((px_bar_height + 1) / 2)),
colour=colour)
bar.plot(indicator)
# levels & horizontal lines
try:
px_horizontal_lines = scaling.data2pixel(values=np.array(thresholds.thresholds))
except:
px_horizontal_lines = None
if px_horizontal_lines is not None:
for px in px_horizontal_lines:
level = Rectangle(size=(width+6, 2),
position=(0, px),
colour=constants.C_WHITE)
level.plot(indicator)
# text labels
txt = TextLine(text=text, text_size=text_size,
position=(0, -1 * (int(height / 2.0) + text_gap)),
text_colour=constants.C_YELLOW)
# make return canvas
w = max(txt.surface_size[0], indicator.size[0])
h = height + 2 * (txt.surface_size[1]) + text_gap
rtn = Canvas(size=(w, h), colour=(0, 0, 0), position=position)
indicator.plot(rtn)
txt.plot(rtn)
return rtn
if __name__ == "__main__":
pass
| 32.714912
| 88
| 0.58292
|
f5eeb057bded5c49089e78a2d6eb892367d91cd2
| 3,528
|
py
|
Python
|
gcp/extract/lib/weights_vcv.py
|
dylanhogan/prospectus-tools
|
662b2629290cd27c74cd34769773e0d6e73c7048
|
[
"MIT"
] | null | null | null |
gcp/extract/lib/weights_vcv.py
|
dylanhogan/prospectus-tools
|
662b2629290cd27c74cd34769773e0d6e73c7048
|
[
"MIT"
] | null | null | null |
gcp/extract/lib/weights_vcv.py
|
dylanhogan/prospectus-tools
|
662b2629290cd27c74cd34769773e0d6e73c7048
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
################################################################################
# Copyright 2014, Distributed Meta-Analysis System
################################################################################
"""
This file provides methods for handling weighting across GCMs under
delta method calculations.
"""
__copyright__ = "Copyright 2014, Distributed Meta-Analysis System"
__author__ = "James Rising"
__credits__ = ["James Rising"]
__maintainer__ = "James Rising"
__email__ = "j.a.rising@lse.ac.uk"
__status__ = "Production"
__version__ = "$Revision$"
# $Source$
import numpy as np
from scipy.optimize import brentq
from scipy.stats import norm
if __name__ == '__main__':
## Example between R and python
## R:
# means <- rnorm(10)
# sds <- rexp(10)
# weights <- runif(10)
# weights <- weights / sum(weights)
# draws <- sapply(1:100000, function(ii) sample(rnorm(10, means, sds), 1, prob=weights))
# pp <- runif(10)
# quantile(draws, pp)
## For the values below:
# > quantile(draws, pp)
# 4.261865% 57.54305% 9.961645% 13.1325% 68.3729% 89.93871% 37.68216% 25.06827% 72.6134% 92.35501%
# -2.70958468 0.77240194 -2.15403320 -1.90146370 1.17428553 1.95475922 -0.06482985 -0.92293638 1.36865349 2.00405179
## Python:
means = [-1.10402809, 1.91300947, -2.21007153, 0.65175650, 0.56314868, -0.28337581, 0.98788803, 1.10211432, -0.06220629, -1.45807086]
variances = np.array([0.65422226, 0.13413332, 0.61493262, 0.29639041, 2.20748648, 1.69513869, 1.15008972, 0.41550756, 0.03384455, 1.07446232])**2
weights = [0.07420341, 0.16907337, 0.11439943, 0.08439015, 0.01868190, 0.14571485, 0.07630478, 0.17063990, 0.09951820, 0.04707401]
pp = [0.04261865, 0.57543051, 0.09961645, 0.13132502, 0.68372897, 0.89938713, 0.37682157, 0.25068274, 0.72613404, 0.92355014]
dist = WeightedGMCDF(means, variances, weights)
print dist.inverse(pp)
# [-2.708582712985005, 0.7720415676939508, -2.152969315647189, -1.8999500392063315, 1.1698917665106159, 1.955783738182657, -0.0641650435162273, -0.9150700927430755, 1.3660161904436894, 2.004650382993468]
| 40.551724
| 207
| 0.614229
|
f5efba2cc27e11d0b24ffd544963fe1fe77b60d3
| 764
|
py
|
Python
|
ecojunk/users/api/v1/resources.py
|
PIN-UPV/EcoJunkWebServer
|
53a42687c303ffe345f59dc1f11fa41c3526f6d7
|
[
"MIT"
] | 1
|
2018-10-02T11:54:26.000Z
|
2018-10-02T11:54:26.000Z
|
ecojunk/users/api/v1/resources.py
|
PIN-UPV/EcoJunkWebServer
|
53a42687c303ffe345f59dc1f11fa41c3526f6d7
|
[
"MIT"
] | 8
|
2018-10-03T08:02:39.000Z
|
2018-11-21T07:42:26.000Z
|
ecojunk/users/api/v1/resources.py
|
PIN-UPV/EcoJunkWebServer
|
53a42687c303ffe345f59dc1f11fa41c3526f6d7
|
[
"MIT"
] | 1
|
2018-10-02T11:54:32.000Z
|
2018-10-02T11:54:32.000Z
|
from rest_framework import status
from rest_framework.generics import RetrieveUpdateAPIView
from rest_framework.response import Response
from ecojunk.users.api.v1.serializers import UserSerializer
| 31.833333
| 67
| 0.740838
|
f5f03ea17d8bc72c5ae1602cba0dbeef3ed61e6b
| 2,905
|
py
|
Python
|
app/modules/payments/resources.py
|
almlys/sample_paymentsapi
|
d7ba4d2effeb7654ee06aab6dbb15e22f8d213cc
|
[
"MIT"
] | null | null | null |
app/modules/payments/resources.py
|
almlys/sample_paymentsapi
|
d7ba4d2effeb7654ee06aab6dbb15e22f8d213cc
|
[
"MIT"
] | null | null | null |
app/modules/payments/resources.py
|
almlys/sample_paymentsapi
|
d7ba4d2effeb7654ee06aab6dbb15e22f8d213cc
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
# pylint: disable=bad-continuation
"""
RESTful API Payments resources
--------------------------
"""
import logging
from flask_login import current_user
from flask_restplus_patched import Resource
from flask_restplus._http import HTTPStatus
from app.extensions import db
from app.extensions.api import Namespace, abort
from app.extensions.api.parameters import PaginationParameters
from . import parameters, schemas
from .models import Payment
log = logging.getLogger(__name__) # pylint: disable=invalid-name
api = Namespace('payments', description="Payments") # pylint: disable=invalid-name
| 27.666667
| 85
| 0.640275
|
f5f344323771b9cf37b06554ddc6a58b22178367
| 1,616
|
py
|
Python
|
bin/list-teams.py
|
kws/python-msgraphy
|
a5dad8bd834c476974fae151f30865c229e0f798
|
[
"MIT"
] | 1
|
2022-01-06T08:06:47.000Z
|
2022-01-06T08:06:47.000Z
|
bin/list-teams.py
|
kws/python-msgraphy
|
a5dad8bd834c476974fae151f30865c229e0f798
|
[
"MIT"
] | null | null | null |
bin/list-teams.py
|
kws/python-msgraphy
|
a5dad8bd834c476974fae151f30865c229e0f798
|
[
"MIT"
] | null | null | null |
import msgraphy_util
import argparse
from msgraphy import GraphApi
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='List or search for MS team'
)
parser.add_argument("name", type=str, nargs="?", help="show only teams which contains [name]")
parser.add_argument("--starts_with", "-s", type=str, nargs="?", metavar="value", help="only teams starting with [value]")
parser.add_argument("--exact", "-e", type=str, nargs="?", metavar="value", help="only teams exactly matching [value]")
parser.add_argument("--channels", "-c", action='store_true', help="include channels")
parser.add_argument("--folder", "-f", action='store_true', help="include channel folder (implies -c)")
args = parser.parse_args()
main(**vars(args))
| 41.435897
| 125
| 0.603342
|
f5f35c0e3a98205f6d6bd8dde9d15ab552f7d436
| 21,372
|
py
|
Python
|
tileEditor.py
|
haywireSSC/Level-Editor
|
34fedbe36b90afeb8c0d995fcecbed845ffd6253
|
[
"CC0-1.0"
] | null | null | null |
tileEditor.py
|
haywireSSC/Level-Editor
|
34fedbe36b90afeb8c0d995fcecbed845ffd6253
|
[
"CC0-1.0"
] | null | null | null |
tileEditor.py
|
haywireSSC/Level-Editor
|
34fedbe36b90afeb8c0d995fcecbed845ffd6253
|
[
"CC0-1.0"
] | null | null | null |
import pygame as p
from math import floor
from copy import deepcopy
import Tkinter, tkFileDialog
root = Tkinter.Tk()
root.withdraw()
p.init()
running = True
tileWidth = 16
tileHeight = 16
mapWidth = 100
mapHeight = 100
camX = 0
camY = 0
scale = 2
uiScale = 2
hand = 1
layerStack = True
file_path = ''
file_path = tkFileDialog.askopenfilename()
if file_path[-3:] != 'png':
exit()
layers = []
currentLayer = 1
layers.append([-1] * (mapWidth * mapHeight))
layers.append([-1] * (mapWidth * mapHeight))
prevLayers = deepcopy(layers)
prevLayerLists = []
prevLayerListsRedo = []
brush = p.image.load('brush.png')
brushHover = p.image.load('brushHover.png')
square = p.image.load('square.png')
squareHover = p.image.load('squareHover.png')
brushRect = brush.get_rect()
squareRect = square.get_rect()
brushRect.width, brushRect.height = brushRect.width * uiScale, brushRect.height * uiScale
squareRect.width, squareRect.height = squareRect.width * uiScale, squareRect.height * uiScale
(width, height) = (480, 360)
p.display.set_caption('Tile Editor')
font = p.font.Font('Minecraftia-Regular.ttf', 8)
s = p.display.set_mode((width, height), p.RESIZABLE)
clock = p.time.Clock()
middleClick = False
leftClick = False
leftClickPrev = False
rightClick = False
rightClickDown = False
rightClickPrev = False
mouseOffset = (0, 0)
mousePos = (0, 0)
buttonClick = False
buttonHover = False
sDown = False
squareT = False
sDownStart = False
startPos = (0,0)
tiles = []
sheetHeight = 0
sheetWidth = 0
load_sheet(file_path)
while running:
windowResize = False
for event in p.event.get():
if event.type == p.QUIT:
running = False
elif event.type == p.MOUSEMOTION:
mousePos = p.mouse.get_pos()
elif event.type == p.MOUSEBUTTONDOWN:
mousePos = p.mouse.get_pos()
if event.button == 2:
mouseOffset = (mousePos[0] - camX, mousePos[1] - camY);
middleClick = True
elif event.button == 1:
leftClick = True
elif event.button == 3:
rightClick = True
rightClickDown = True
elif event.type == p.MOUSEBUTTONUP:
if event.button == 2:
middleClick = False
elif event.button == 1:
leftClick = False
elif event.button == 3:
rightClick = False
elif event.type == p.MOUSEWHEEL and not middleClick:
scale += event.y
if(scale < 1):
scale = 1
elif event.type == p.VIDEORESIZE:
width = event.w
height = event.h
windowResize = True
elif event.type == p.KEYDOWN:
if event.key == p.K_z and p.key.get_mods() & p.KMOD_CTRL:
if len(prevLayerLists) != 0:
prevLayerListsRedo.append(layers)
layers = prevLayerLists[-1]
del prevLayerLists[-1]
elif event.key == p.K_y and p.key.get_mods() & p.KMOD_CTRL:
if len(prevLayerListsRedo) != 0:
prevLayerLists.append(layers)
layers = prevLayerListsRedo[-1]
del prevLayerListsRedo[-1]
elif event.key == p.K_s:
sDown = True
elif event.type == p.KEYUP:
if event.key == p.K_s:
sDown = False
prevLayers = deepcopy(layers)
if middleClick:
camX, camY = mousePos[0] - mouseOffset[0], mousePos[1] - mouseOffset[1]
x = int(round((mousePos[0] - camX) / (tileWidth * scale)))
y = int(round((mousePos[1] - camY) / (tileHeight * scale)))
layers[0][(y * mapWidth) + x] = hand
if leftClick and not sDownStart:
if(mousePos[0] > (9 * uiScale) and mousePos[0] < (sheetWidth + 9) * uiScale and mousePos[1] > (9 * uiScale) and mousePos[1] < (sheetHeight + 9) * uiScale):
x = int(round((mousePos[0] - (9 * uiScale)) / (tileWidth * uiScale)))
y = int(round((mousePos[1] - (9 * uiScale)) / (tileHeight * uiScale)))
hand = (y * (sheetWidth // (tileWidth))) + x
else:
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[currentLayer][(y * mapWidth) + x] = hand
elif rightClick and not sDown:
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[currentLayer][(y * mapWidth) + x] = -1
s.fill((41,48,50))
renderList = []
for i in range(0, len(layers)):
if not i == 0:
for x in range(mapWidth):
for y in range(mapHeight):
if (x * tileWidth * scale) + camX > tileWidth * -scale and (x * tileWidth * scale) + camX < width and (y * tileHeight * scale) + camY > tileHeight * -scale and (y * tileHeight * scale) + camY < height:
tile = layers[0][y * mapWidth + x]
if not layerStack:
if i == currentLayer and tile != -1 and not [x,y] in renderList:
renderList.append([x,y])
s.blit(p.transform.scale(tiles[tile][0], (tileWidth * scale, tileHeight * scale)), ((x * tileWidth * scale) + camX, (y * tileHeight * scale) + camY))
else:
tile = layers[i][y * mapWidth + x]
if not [x,y] in renderList:
if tile == -1 and i == currentLayer:
if uiScale >= scale:
p.draw.rect(s, (86,92,86), p.Rect((x * tileWidth * scale) + camX, (y * tileHeight * scale) + camY, tileWidth * scale, tileHeight * scale), 1)
else:
p.draw.rect(s, (86,92,86), p.Rect((x * tileWidth * scale) + camX, (y * tileHeight * scale) + camY, tileWidth * scale, tileHeight * scale), uiScale)
elif tile != -1:
renderList.append([x,y])
s.blit(p.transform.scale(tiles[tile][0], (tileWidth * scale, tileHeight * scale)), ((x * tileWidth * scale) + camX, (y * tileHeight * scale) + camY))
else:
if i == currentLayer and tile != -1:
renderList.append([x,y,tile])
else:
tile = layers[i][y * mapWidth + x]
if tile == -1 and i == currentLayer:
if uiScale >= scale:
p.draw.rect(s, (86,92,86), p.Rect((x * tileWidth * scale) + camX, (y * tileHeight * scale) + camY, tileWidth * scale, tileHeight * scale), 1)
else:
p.draw.rect(s, (86,92,86), p.Rect((x * tileWidth * scale) + camX, (y * tileHeight * scale) + camY, tileWidth * scale, tileHeight * scale), uiScale)
elif tile != -1:
renderList.append([x,y,tile])
if layerStack:
for i in range(len(renderList)-1, 0, -1):
s.blit(p.transform.scale(tiles[renderList[i][2]][0], (tileWidth * scale, tileHeight * scale)), ((renderList[i][0] * tileWidth * scale) + camX, (renderList[i][1] * tileHeight * scale) + camY))
i = sheetHeight + int(tileHeight * 1.5 + 12)
s.blit(drawBox(sheetWidth + 12, i, True), (3 * uiScale, 3 * uiScale))
drawButton('New Layer', 3 * uiScale, (i + 6) * uiScale)
if buttonClick:
layers.append([-1] * (mapWidth * mapHeight))
currentLayer = len(layers)-1
for layer in range(0, len(layers)-1):
drawButton('Layer ' + str(layer + 1), 3 * uiScale, (i + 26 * (layer + 1)) * uiScale)
if buttonClick:
currentLayer = layer + 1
if buttonHover and rightClickDown and len(layers) > 2:
prevLayerLists.append(deepcopy(layers))
del layers[layer + 1]
if currentLayer > len(layers) - 1:
currentLayer -= 1
prevLayers = layers
for image in tiles:
s.blit(p.transform.scale(image[0], (tileWidth * uiScale, tileHeight * uiScale)), ((image[1] + 9) * uiScale, (image[2] + 9) * uiScale))
s.blit(p.transform.scale(tiles[hand][0], (tileWidth * uiScale, tileHeight * uiScale)), (9 * uiScale, (sheetHeight + tileHeight) * uiScale))
drawButton('Open Tilesheet', (sheetWidth + 18) * uiScale, 3 * uiScale)
if buttonClick:
file_path = tkFileDialog.askopenfilename()
if file_path[-3:] == 'png':
load_sheet(file_path)
drawButton('Layer Stack', (sheetWidth + 18) * uiScale, 23 * uiScale)
if buttonClick:
layerStack = not layerStack
layers[0] = [-1] * (mapWidth * mapHeight)
if not leftClick and leftClickPrev and sDownStart:
sDownStart = False
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) + 1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) + 1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[currentLayer][(y * mapWidth) + x] = hand
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) - 1, -1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) - 1, -1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[currentLayer][(y * mapWidth) + x] = hand
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) + 1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) - 1, -1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[currentLayer][(y * mapWidth) + x] = hand
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) - 1, -1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) + 1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[currentLayer][(y * mapWidth) + x] = hand
elif leftClick and sDownStart:
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) + 1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) + 1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[0][(y * mapWidth) + x] = hand
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) - 1, -1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) - 1, -1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[0][(y * mapWidth) + x] = hand
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) + 1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) - 1, -1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[0][(y * mapWidth) + x] = hand
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) - 1, -1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) + 1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[0][(y * mapWidth) + x] = hand
if not rightClick and rightClickPrev and sDownStart:
sDownStart = False
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) + 1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) + 1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[currentLayer][(y * mapWidth) + x] = -1
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) - 1, -1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) - 1, -1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[currentLayer][(y * mapWidth) + x] = -1
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) + 1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) - 1, -1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[currentLayer][(y * mapWidth) + x] = -1
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) - 1, -1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) + 1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[currentLayer][(y * mapWidth) + x] = -1
elif rightClick and sDownStart:
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) + 1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) + 1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[0][(y * mapWidth) + x] = -2
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) - 1, -1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) - 1, -1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[0][(y * mapWidth) + x] = -2
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) + 1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) - 1, -1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[0][(y * mapWidth) + x] = -2
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) - 1, -1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) + 1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[0][(y * mapWidth) + x] = -2
if leftClick and not leftClickPrev or rightClick and not rightClickPrev:
if sDown:
sDownStart = True
startPos = (int(round((mousePos[0] - camX) / (tileWidth * scale))), int(round((mousePos[1] - camY) / (tileHeight * scale))))
if prevLayers != layers:
prevLayerLists.append(deepcopy(prevLayers))
leftClickPrev = leftClick
backDown = False
rightClickDown = False
brushRect.x,brushRect.y = (sheetWidth + 18) * uiScale, 43 * uiScale
if brushRect.collidepoint(mousePos[0], mousePos[1]) or not squareT:
if leftClick and brushRect.collidepoint(mousePos[0], mousePos[1]):
squareT = False
sDown = False
s.blit(p.transform.scale(brushHover, (brushRect.width, brushRect.height)), (brushRect.x, brushRect.y + uiScale))
else:
s.blit(p.transform.scale(brushHover, (brushRect.width, brushRect.height)), brushRect)
else:
s.blit(p.transform.scale(brush, (brushRect.width, brushRect.height)), brushRect)
squareRect.x,squareRect.y = (sheetWidth + 34) * uiScale, 43 * uiScale
if squareRect.collidepoint(mousePos[0], mousePos[1]) or squareT:
if leftClick and squareRect.collidepoint(mousePos[0], mousePos[1]):
squareT = True
s.blit(p.transform.scale(squareHover, (squareRect.width, squareRect.height)), (squareRect.x, squareRect.y + uiScale))
else:
s.blit(p.transform.scale(squareHover, (squareRect.width, squareRect.height)), squareRect)
else:
s.blit(p.transform.scale(square, (squareRect.width, squareRect.height)), squareRect)
if squareT:
sDown = True
rightClickPrev = rightClick
p.display.update()
clock.tick(60)
| 48.794521
| 221
| 0.561623
|
f5f4c4714755e8b9549c5e4949c349f3b753fe90
| 5,148
|
py
|
Python
|
EditGroupWindow.py
|
TheYargonaut/lucre
|
1abd472993df01b443ab4811379dfe52e18cf790
|
[
"MIT"
] | null | null | null |
EditGroupWindow.py
|
TheYargonaut/lucre
|
1abd472993df01b443ab4811379dfe52e18cf790
|
[
"MIT"
] | null | null | null |
EditGroupWindow.py
|
TheYargonaut/lucre
|
1abd472993df01b443ab4811379dfe52e18cf790
|
[
"MIT"
] | null | null | null |
import tkinter as tk
from tkinter.colorchooser import askcolor
from tkinter import ttk
from Scrollable import Scrollable
from ViewLedgerWidget import ViewLedgerWidget
from List import ListView
from Group import Group
# window for editing a group
prevLens = [ 10, 25, 100 ]
| 43.260504
| 165
| 0.633061
|
f5f611d50ecae53133cd83f244cc01c20777a693
| 261
|
py
|
Python
|
day_07/task_1.py
|
Korred/advent_of_code_2021
|
89afcaae3343653106d36fb7ad08558c0fbb4732
|
[
"Unlicense"
] | null | null | null |
day_07/task_1.py
|
Korred/advent_of_code_2021
|
89afcaae3343653106d36fb7ad08558c0fbb4732
|
[
"Unlicense"
] | null | null | null |
day_07/task_1.py
|
Korred/advent_of_code_2021
|
89afcaae3343653106d36fb7ad08558c0fbb4732
|
[
"Unlicense"
] | null | null | null |
crabs = sorted(map(int, open("input.txt", "r").readline().strip().split(",")))
# position with minimal fuel usage is at the median position
median_pos = crabs[len(crabs) // 2]
min_fuel = sum([abs(crab_pos - median_pos) for crab_pos in crabs])
print(min_fuel)
| 32.625
| 78
| 0.704981
|
f5f839cc33260b873ad589657cb5b87f8a948df8
| 5,172
|
py
|
Python
|
dialmonkey/nlu/basketball.py
|
alexandergazo/NPFL123
|
c52b6a880abf9fe694ce6a2d775c7db1bd765fba
|
[
"Apache-2.0"
] | null | null | null |
dialmonkey/nlu/basketball.py
|
alexandergazo/NPFL123
|
c52b6a880abf9fe694ce6a2d775c7db1bd765fba
|
[
"Apache-2.0"
] | null | null | null |
dialmonkey/nlu/basketball.py
|
alexandergazo/NPFL123
|
c52b6a880abf9fe694ce6a2d775c7db1bd765fba
|
[
"Apache-2.0"
] | null | null | null |
# Author: Matej Mik
from ..component import Component
from ..da import DAI
import re
| 37.478261
| 97
| 0.552204
|
f5f954fff242094361f8f329de47188d709c63c7
| 1,447
|
py
|
Python
|
test_SSstache.py
|
jonschull/Lyte
|
e9ba2bb1b07c9398b81a6f591898d2474d1a4609
|
[
"MIT"
] | 1
|
2018-06-07T17:54:27.000Z
|
2018-06-07T17:54:27.000Z
|
test_SSstache.py
|
jonschull/Lyte
|
e9ba2bb1b07c9398b81a6f591898d2474d1a4609
|
[
"MIT"
] | 1
|
2018-06-28T05:08:57.000Z
|
2018-06-28T05:08:57.000Z
|
test_SSstache.py
|
jonschull/Lyte
|
e9ba2bb1b07c9398b81a6f591898d2474d1a4609
|
[
"MIT"
] | null | null | null |
from SSstache import *
from plumbum.path.utils import delete
from plumbum.cmd import ls, touch, mkdir
#prepareHTMLdir(dirName='xyz')
#test_makeHTMLdir()
| 27.301887
| 148
| 0.608846
|
f5fc2d7fa7991a4448eb7eb0d16d8da0aa0e1f7e
| 173
|
py
|
Python
|
graphic/introductions/graficoNormal.py
|
jonathanccardoso/data-science
|
d5977e5cd26b6a9ad05ef8940841158911a91586
|
[
"MIT"
] | null | null | null |
graphic/introductions/graficoNormal.py
|
jonathanccardoso/data-science
|
d5977e5cd26b6a9ad05ef8940841158911a91586
|
[
"MIT"
] | null | null | null |
graphic/introductions/graficoNormal.py
|
jonathanccardoso/data-science
|
d5977e5cd26b6a9ad05ef8940841158911a91586
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
x = [1, 2, 5]
y = [2, 3, 7]
plt.title("1 grafico com python")
# Eixos
plt.xlabel("Eixo X")
plt.ylabel("Eixo Y")
plt.plot(x,y)
plt.show()
| 12.357143
| 33
| 0.630058
|
f5fc99298c4f8aba96ad5b5882efa8fbf637939b
| 421
|
py
|
Python
|
makevideo.py
|
bitrogen/sorting-algorithms
|
f7eada32db9e0ce385878f49d79b3d6b8c09280a
|
[
"CC0-1.0"
] | null | null | null |
makevideo.py
|
bitrogen/sorting-algorithms
|
f7eada32db9e0ce385878f49d79b3d6b8c09280a
|
[
"CC0-1.0"
] | 1
|
2021-04-05T20:20:30.000Z
|
2021-04-05T20:22:41.000Z
|
makevideo.py
|
bitrogen/sorting-algorithms
|
f7eada32db9e0ce385878f49d79b3d6b8c09280a
|
[
"CC0-1.0"
] | null | null | null |
import cv2
import numpy
import glob
import os
images = []
path = os.getcwd()+"\\frames\\"
myVideo = cv2.VideoWriter("quicksort-1.mkv", cv2.VideoWriter_fourcc(*"DIVX"), 60, (1920,1080))
for filename in range(len(os.listdir(path))):
filename = f"frame-{filename}.png"
img = cv2.imread(f"{path}{filename}")
height, width, layers = img.shape
myVideo.write(img)
myVideo.release()
| 20.047619
| 95
| 0.638955
|
f5fce2318bd81cf7ddc8f556365d8f472f7cc726
| 18,008
|
py
|
Python
|
darknet.py
|
sugey/pytorch-yolov3
|
cb6b46fd798debca5d8d066eabb2bd2e6c679953
|
[
"MIT"
] | 3
|
2019-10-21T16:05:15.000Z
|
2019-10-25T00:43:17.000Z
|
darknet.py
|
sugey/pytorch-yolov3
|
cb6b46fd798debca5d8d066eabb2bd2e6c679953
|
[
"MIT"
] | null | null | null |
darknet.py
|
sugey/pytorch-yolov3
|
cb6b46fd798debca5d8d066eabb2bd2e6c679953
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from model.layers import *
from model.build import *
import cv2
from model.utils import *
model = Darknet("cfg/yolov3.cfg")
model.load_weights("yolov3.weights")
inp = get_test_input()
pred = model(inp, torch.cuda.is_available())
| 44.907731
| 108
| 0.549034
|
f5fd8ae5a3e3e11874751c948747bc877e5305d4
| 1,131
|
py
|
Python
|
src/icemac/addressbook/browser/search/result/handler/test_manager.py
|
icemac/icemac.addressbook
|
6197e6e01da922feb100dd0943576523050cd703
|
[
"BSD-2-Clause"
] | 1
|
2020-03-26T20:16:44.000Z
|
2020-03-26T20:16:44.000Z
|
src/icemac/addressbook/browser/search/result/handler/test_manager.py
|
icemac/icemac.addressbook
|
6197e6e01da922feb100dd0943576523050cd703
|
[
"BSD-2-Clause"
] | 2
|
2020-02-21T13:04:23.000Z
|
2020-02-21T13:06:10.000Z
|
src/icemac/addressbook/browser/search/result/handler/test_manager.py
|
icemac/icemac.addressbook
|
6197e6e01da922feb100dd0943576523050cd703
|
[
"BSD-2-Clause"
] | null | null | null |
from icemac.addressbook.browser.search.result.handler.manager import (
SearchResultHandler)
def makeSRHandler(viewName):
"""Create a `SearchResultHandler` with the specified `viewName`."""
handler = SearchResultHandler(None, None, None, None)
handler.viewName = viewName
return handler
def test_manager__SearchResultHandler____eq____1():
"""It is equal when `viewName` is equal."""
assert makeSRHandler('@@asdf.html') == makeSRHandler('@@asdf.html')
def test_manager__SearchResultHandler____eq____2():
"""It is not equal with unequal `viewName`."""
# There is no __neq__ implemented!
assert not(makeSRHandler('@@foo.html') == makeSRHandler('@@bar.html'))
def test_manager__SearchResultHandler____eq____3():
"""It is not equal to anything else."""
# There is no __neq__ implemented!
assert not(makeSRHandler(None) == object())
def test_manager__SearchResultHandler____hash____1():
"""It is hashable.
It is only needed for Python 3 where classes having an __eq__ method do
not have a __hash__ method.
"""
assert hash(makeSRHandler(None)) is not None
| 31.416667
| 75
| 0.72237
|
eb03b18815a588a66491abb92833213166f65e34
| 2,271
|
py
|
Python
|
superset/shuju_into_mysql.py
|
LCM1999/superset_secondary_dev
|
293e3df9d46ef6096d35ee7d523ce5c7898902bc
|
[
"Apache-2.0"
] | 1
|
2021-06-29T05:36:30.000Z
|
2021-06-29T05:36:30.000Z
|
superset/shuju_into_mysql.py
|
LCM1999/superset_secondary_dev
|
293e3df9d46ef6096d35ee7d523ce5c7898902bc
|
[
"Apache-2.0"
] | null | null | null |
superset/shuju_into_mysql.py
|
LCM1999/superset_secondary_dev
|
293e3df9d46ef6096d35ee7d523ce5c7898902bc
|
[
"Apache-2.0"
] | null | null | null |
import json
import pymysql
import random
import string
import time
# def get_data():
# with open('E:\\QQ\\1420944066\\FileRecv\\Code (2)\\data\\nice looking data\\gooddata\\20_30(1).json', 'r') as f:
# camera_text = json.load(f) #
# print(camera_text)
# return camera_text
# def data_insert(text):
# db = pymysql.connect(host = "localhost",user = "root",password = "lxyroot",database = "superset-test")
# cur = db.cursor()
# try:
# cur.execute("drop table liutu_data")
# cur.execute("create table liutu_data(id int,name char(20),fillcolor char(20),time char(20),size_data TINYTEXT)")
# except:
# cur.execute("create table liutu_data(id int,name char(20),fillcolor char(20),time char(20),size_data TINYTEXT)")
# for i in text:
# for j in range(0,len(text[0]['size'])):
# sql="INSERT INTO liutu_data (id,name,fillcolor,time,size_data) VALUES ('"+str(i['id'])+"','"+i['name']+"','"+i['fillcolor']+"','"+str(j)+"','"+str(i['size'][j])+"');"
# cur.execute(sql)
# db.commit()
# cur.close()
if __name__ == "__main__":
cur,db = new_table()
i = 0
while 1==1:
time.sleep(5)
print('one update')
data_update(cur,20,db)
i = i+1
| 37.85
| 180
| 0.607221
|
eb03b84ad235ef7df8266830a1654259db309611
| 3,290
|
py
|
Python
|
Experiments/create_mean_optimization_sets.py
|
ariel415el/PerceptualLossGLO-Pytorch
|
7caa743b719cd95066103a69f3e78a70507de8b5
|
[
"MIT"
] | null | null | null |
Experiments/create_mean_optimization_sets.py
|
ariel415el/PerceptualLossGLO-Pytorch
|
7caa743b719cd95066103a69f3e78a70507de8b5
|
[
"MIT"
] | null | null | null |
Experiments/create_mean_optimization_sets.py
|
ariel415el/PerceptualLossGLO-Pytorch
|
7caa743b719cd95066103a69f3e78a70507de8b5
|
[
"MIT"
] | null | null | null |
import os
import random
import cv2
import numpy as np
import torch
from Experiments.all import load_models, embedd_data, save_batch
from GenerativeModels.utils.data_utils import get_dataset
device = torch.device("cuda")
def sample_latent_neighbors(outputs_dir, models_dir):
"""Find nearest latent neighbors of data samples and create sets of original/reconstructed similar images """
# Load models
n = 32
train_dataset = get_dataset('ffhq', split='train', resize=128, val_percent=0.15)
encoder, generator = load_models(device, models_dir)
embeddings = embedd_data(train_dataset, encoder, 32, device)
for i in [11, 15, 16, 25, 48, 53, 60, 67, 68, 78, 122]:
os.makedirs(os.path.join(outputs_dir, os.path.basename(models_dir), f"data_neighbors{i}"), exist_ok=True)
dists = torch.norm(embeddings - embeddings[i], dim=1)
neighbor_indices = torch.argsort(dists)[:n]
neighbors = torch.from_numpy(np.array([train_dataset[x][1] for x in neighbor_indices]))
save_batch(neighbors, os.path.join(outputs_dir, os.path.basename(models_dir), f"data_neighbors{i}"))
if __name__ == '__main__':
# sample_latent_neighbors("latent_neighbors_sets", 'trained_models/VGG-None_PT')
# sample_latent_neighbors("latent_neighbors_sets", 'trained_models/VGG-random')
make_shift_sets('/home/ariel/university/PerceptualLoss/PerceptualLossExperiments/style_transfer/imgs/textures')
# create_shifted_colorfull_box_images()
| 39.166667
| 115
| 0.643161
|
eb03e3a050ceea7bb9cd25f052a0aa3154068c30
| 1,830
|
py
|
Python
|
run-length-encoding/run_length_encoding.py
|
geekmuse/exercism-python
|
089efc0382147bd48f1e2d68c33ba4cbd58d3dfd
|
[
"MIT"
] | null | null | null |
run-length-encoding/run_length_encoding.py
|
geekmuse/exercism-python
|
089efc0382147bd48f1e2d68c33ba4cbd58d3dfd
|
[
"MIT"
] | null | null | null |
run-length-encoding/run_length_encoding.py
|
geekmuse/exercism-python
|
089efc0382147bd48f1e2d68c33ba4cbd58d3dfd
|
[
"MIT"
] | null | null | null |
def decode(to_be_decoded):
"""
Decodes a run-length encoded string.
:param to_be_decoded: run-length encoded string
:return: run-length decoded string
"""
to_be_decoded_list = list(to_be_decoded)
decoded_str_as_list = list()
num_to_print_as_list = list()
for c in to_be_decoded_list:
if c.isdigit():
num_to_print_as_list.append(c)
else:
if len(num_to_print_as_list) > 0:
num_to_print = int(''.join(num_to_print_as_list))
append = c * num_to_print
decoded_str_as_list.append(append)
num_to_print_as_list = list()
else:
decoded_str_as_list.append(c)
return ''.join(decoded_str_as_list)
def encode(to_be_encoded):
"""
Run-length encodes a string
:param to_be_encoded: string to be run-length encoded
:return: run-length encoded string
"""
last_seen = None
last_seen_count = 0
to_be_encoded_as_list = list(to_be_encoded)
encoded_str_as_list = list()
for c in to_be_encoded_as_list:
if last_seen:
if last_seen == c:
last_seen_count += 1
else:
if last_seen_count > 1:
encoded_str_as_list.append('{}{}'.format(last_seen_count, last_seen))
else:
encoded_str_as_list.append('{}'.format(last_seen))
last_seen_count = 1
else:
last_seen_count += 1
last_seen = c
if last_seen_count > 1:
encoded_str_as_list.append('{}{}'.format(last_seen_count, last_seen))
else:
if last_seen:
encoded_str_as_list.append('{}'.format(last_seen))
else:
encoded_str_as_list = list()
return ''.join(encoded_str_as_list)
| 30
| 89
| 0.595082
|
eb0791e28d8a88a76f9e3bcff8a0767061c1499e
| 3,816
|
py
|
Python
|
pytorch/benchmarks/operator_benchmark/pt/conv_test.py
|
raghavnauhria/whatmt
|
c20483a437c82936cb0fb8080925e37b9c4bba87
|
[
"MIT"
] | null | null | null |
pytorch/benchmarks/operator_benchmark/pt/conv_test.py
|
raghavnauhria/whatmt
|
c20483a437c82936cb0fb8080925e37b9c4bba87
|
[
"MIT"
] | 1
|
2019-07-22T09:48:46.000Z
|
2019-07-22T09:48:46.000Z
|
pytorch/benchmarks/operator_benchmark/pt/conv_test.py
|
raghavnauhria/whatmt
|
c20483a437c82936cb0fb8080925e37b9c4bba87
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import operator_benchmark as op_bench
import torch
import torch.nn as nn
"""
Microbenchmarks for Conv1d and ConvTranspose1d operators.
"""
# Configs for conv-1d ops
conv_1d_configs = op_bench.config_list(
attrs=[
[16, 33, 3, 1, 1, 64],
[16, 33, 3, 2, 16, 128],
],
attr_names=[
"in_c", "out_c", "kernel", "stride", "N", "L"
],
tags=["short"]
)
op_bench.generate_pt_test(conv_1d_configs, Conv1dBenchmark)
op_bench.generate_pt_test(conv_1d_configs, ConvTranspose1dBenchmark)
"""
Microbenchmarks for Conv2d and ConvTranspose2d operators.
"""
# Configs for Conv2d and ConvTranspose1d
conv_2d_configs = op_bench.config_list(
attrs=[
[16, 33, 3, 1, 1, 32, 32],
[16, 33, 3, 2, 16, 64, 64],
],
attr_names=[
"in_c", "out_c", "kernel", "stride", "N", "H", "W"
],
tags=["short"]
)
op_bench.generate_pt_test(conv_2d_configs, Conv2dBenchmark)
op_bench.generate_pt_test(conv_2d_configs, ConvTranspose2dBenchmark)
"""
Microbenchmarks for Conv3d and ConvTranspose3d operators.
"""
# Configs for Conv3d and ConvTranspose3d
conv_3d_configs = op_bench.config_list(
attrs=[
[16, 33, 3, 1, 8, 4, 32, 32],
[16, 33, 3, 2, 16, 8, 64, 64],
],
attr_names=[
"in_c", "out_c", "kernel", "stride", "N", "D", "H", "W"
],
tags=["short"]
)
op_bench.generate_pt_test(conv_3d_configs, Conv3dBenchmark)
op_bench.generate_pt_test(conv_3d_configs, ConvTranspose3dBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| 27.453237
| 85
| 0.673742
|
eb083967d51239e917a7b39eeaa1d72f732ba81d
| 1,605
|
py
|
Python
|
local_test/course_search/nyuapi/request.py
|
NYUSHer/Widgets
|
b630d01331ca0101778fc7ca44fff7b65412f9ef
|
[
"MIT"
] | 1
|
2018-05-01T06:04:39.000Z
|
2018-05-01T06:04:39.000Z
|
local_test/course_search/nyuapi/request.py
|
NYUSHer/Widgets
|
b630d01331ca0101778fc7ca44fff7b65412f9ef
|
[
"MIT"
] | null | null | null |
local_test/course_search/nyuapi/request.py
|
NYUSHer/Widgets
|
b630d01331ca0101778fc7ca44fff7b65412f9ef
|
[
"MIT"
] | null | null | null |
import requests as R
| 32.1
| 87
| 0.544548
|
eb0939a06759c9dcb9a5c2eda6614c361061cde9
| 857
|
py
|
Python
|
pySINGLE/setup.py
|
piomonti/pySINGLE
|
2eae0b31334d8eae08fd7f96f591262c4abcf3d9
|
[
"MIT"
] | 3
|
2015-12-21T15:14:08.000Z
|
2018-12-29T10:15:03.000Z
|
pySINGLE/setup.py
|
piomonti/pySINGLE
|
2eae0b31334d8eae08fd7f96f591262c4abcf3d9
|
[
"MIT"
] | null | null | null |
pySINGLE/setup.py
|
piomonti/pySINGLE
|
2eae0b31334d8eae08fd7f96f591262c4abcf3d9
|
[
"MIT"
] | null | null | null |
#from distutils.core import setup
#from distutils.extension import Extension
#from Cython.Distutils import build_ext
#import numpy
#setup(
#cmdclass = {'build_ext': build_ext},
#ext_modules = [Extension("Z_shooting", ["Z_shooting.c"],)],
#include_dirs=[numpy.get_include(),'.', ],
#)
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
from Cython.Build import cythonize
import numpy
#extension = [Extension("Z_shooting", ["Z_shooting.c"],),]
setup(
cmdclass = {'build_ext': build_ext},
ext_modules = [Extension("FastFused_01", ["FastFused_01.pyx"], include_dirs=[numpy.get_include()])]
)
#setup(
#cmdclass = {'build_ext': build_ext},
#ext_modules = cythonize("FastFused_01.pyx"),
#include_dirs=[numpy.get_include(),'.', ],
#)
| 27.645161
| 104
| 0.687281
|
eb0a67e0dac6431fa8a950d7b99db76a91a069c7
| 11,877
|
py
|
Python
|
cnnlstm/preprocessing.py
|
mingjiewong/Kaggle-M5-Forecasting-Accuracy-2020
|
6467a08640990f2d07e517adf7bacd566fb442c4
|
[
"MIT"
] | null | null | null |
cnnlstm/preprocessing.py
|
mingjiewong/Kaggle-M5-Forecasting-Accuracy-2020
|
6467a08640990f2d07e517adf7bacd566fb442c4
|
[
"MIT"
] | null | null | null |
cnnlstm/preprocessing.py
|
mingjiewong/Kaggle-M5-Forecasting-Accuracy-2020
|
6467a08640990f2d07e517adf7bacd566fb442c4
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import os
from sklearn.preprocessing import MinMaxScaler
from data_processing.helpers import Config
| 46.214008
| 126
| 0.667088
|
eb0ac6a6f7fdd1cf17fa0a0d491c03fde96fdfc1
| 331
|
py
|
Python
|
Physics250-ME3738/timeIntervalBlinks.py
|
illusion173/Physics250
|
69f2ffdb8af013e8b0739779861c1455b579ddaf
|
[
"MIT"
] | null | null | null |
Physics250-ME3738/timeIntervalBlinks.py
|
illusion173/Physics250
|
69f2ffdb8af013e8b0739779861c1455b579ddaf
|
[
"MIT"
] | null | null | null |
Physics250-ME3738/timeIntervalBlinks.py
|
illusion173/Physics250
|
69f2ffdb8af013e8b0739779861c1455b579ddaf
|
[
"MIT"
] | null | null | null |
import math
speedofLight = 2.9979*pow(10,8)
timeIntervalBlinks()
| 18.388889
| 56
| 0.592145
|
eb1051fc036fc84c631af126f696c0417323ff9f
| 419
|
py
|
Python
|
daemons/area-deletion-daemon/app.py
|
sampierson/upload-service
|
b7c470706f729bdee34a4254555f798558877095
|
[
"MIT"
] | 6
|
2018-01-31T19:44:17.000Z
|
2020-02-20T13:03:09.000Z
|
daemons/area-deletion-daemon/app.py
|
sampierson/upload-service
|
b7c470706f729bdee34a4254555f798558877095
|
[
"MIT"
] | 379
|
2018-03-21T21:29:15.000Z
|
2020-01-28T14:20:48.000Z
|
daemons/area-deletion-daemon/app.py
|
HumanCellAtlas/staging-service
|
b7c470706f729bdee34a4254555f798558877095
|
[
"MIT"
] | 5
|
2018-03-09T14:13:15.000Z
|
2020-01-30T15:49:46.000Z
|
import json
from upload.common.upload_area import UploadArea
# This lambda function is invoked by messages in the the area_deletion_queue (AWS SQS).
# The queue and the lambda function are connected via aws_lambda_event_source_mapping
| 38.090909
| 87
| 0.785203
|
eb10a721ce3034d767a4ccb9040dc682a3ffb0b4
| 3,089
|
py
|
Python
|
engine/geometry/tests/test_overlap_detection_2d.py
|
codehearts/pickles-fetch-quest
|
ca9b3c7fe26acb50e1e2d654d068f5bb953bc427
|
[
"MIT"
] | 3
|
2017-12-07T19:17:36.000Z
|
2021-07-29T18:24:25.000Z
|
engine/geometry/tests/test_overlap_detection_2d.py
|
codehearts/pickles-fetch-quest
|
ca9b3c7fe26acb50e1e2d654d068f5bb953bc427
|
[
"MIT"
] | 41
|
2017-11-11T06:00:08.000Z
|
2022-03-28T23:27:25.000Z
|
engine/geometry/tests/test_overlap_detection_2d.py
|
codehearts/pickles-fetch-quest
|
ca9b3c7fe26acb50e1e2d654d068f5bb953bc427
|
[
"MIT"
] | 2
|
2018-08-31T23:49:00.000Z
|
2021-09-21T00:42:48.000Z
|
from ..overlap_detection_2d import detect_overlap_2d
from unittest.mock import call, Mock, patch
import unittest
| 42.315068
| 76
| 0.642279
|
eb10c1e56faa83018c15d8d04331071eb6bc524c
| 786
|
py
|
Python
|
PythonTest/Aula18A.py
|
MatthewsTomts/Python_Class
|
f326d521d62c45a4fcb429d2a22cf2ab958492cb
|
[
"MIT"
] | null | null | null |
PythonTest/Aula18A.py
|
MatthewsTomts/Python_Class
|
f326d521d62c45a4fcb429d2a22cf2ab958492cb
|
[
"MIT"
] | null | null | null |
PythonTest/Aula18A.py
|
MatthewsTomts/Python_Class
|
f326d521d62c45a4fcb429d2a22cf2ab958492cb
|
[
"MIT"
] | null | null | null |
teste = list()
teste.append('Matheus')
teste.append(17)
galera = [teste[:]] # Cria uma copia de teste dentro de galera
teste[0] = 'Oliver'
teste[1] = 22
galera.append(teste) # Cria um vnculo entre teste e galera
print(galera)
pessoas = [['Harvey', 23], ['Madeleine', 19], ['Roger', 250], ['Mark', 20]]
print(pessoas[0][0]) # Mostra o primeiro valor da primeira lista desta lista
for p in pessoas:
print(f'{p[0]} tem {p[1]} anos de idade.')
dados = []
pes = []
for i in range(0, 3):
print('-='*10)
dados.append(input('Nome: '))
dados.append(int(input('Idade: ')))
pes.append(dados[:])
dados.clear() # Exclu os valores dentro de dados
for p in pes:
print(f'{p[0]} maior de idade.' if p[1] > 20 else f'{p[0]} menor de idade.')
# Exerccio 84 -89
| 27.103448
| 84
| 0.624682
|
eb15265b18824ec201a18ed59e673270072c7e83
| 756
|
py
|
Python
|
src/bio2bel/exc.py
|
aman527/bio2bel
|
631328261a8e7ebddf2eab6c271bc4bc42fbcba4
|
[
"MIT"
] | 16
|
2018-05-18T13:25:44.000Z
|
2022-03-15T02:32:28.000Z
|
src/bio2bel/exc.py
|
aman527/bio2bel
|
631328261a8e7ebddf2eab6c271bc4bc42fbcba4
|
[
"MIT"
] | 42
|
2017-09-13T20:16:46.000Z
|
2021-05-08T19:24:30.000Z
|
src/bio2bel/exc.py
|
aman527/bio2bel
|
631328261a8e7ebddf2eab6c271bc4bc42fbcba4
|
[
"MIT"
] | 5
|
2020-03-14T17:08:12.000Z
|
2021-04-13T20:19:19.000Z
|
# -*- coding: utf-8 -*-
"""Bio2BEL custom errors."""
| 31.5
| 111
| 0.747354
|
eb17d457b2e3da5e9c6ce129bda974e0910d6212
| 1,967
|
py
|
Python
|
tencentcloud/cat/v20180409/errorcodes.py
|
HS-Gray/tencentcloud-sdk-python
|
b28b19c4beebc9f361aa3221afa36ad1ee047ccc
|
[
"Apache-2.0"
] | 37
|
2017-10-12T01:50:42.000Z
|
2022-02-24T02:44:45.000Z
|
tencentcloud/cat/v20180409/errorcodes.py
|
HS-Gray/tencentcloud-sdk-python
|
b28b19c4beebc9f361aa3221afa36ad1ee047ccc
|
[
"Apache-2.0"
] | null | null | null |
tencentcloud/cat/v20180409/errorcodes.py
|
HS-Gray/tencentcloud-sdk-python
|
b28b19c4beebc9f361aa3221afa36ad1ee047ccc
|
[
"Apache-2.0"
] | 12
|
2018-07-31T10:04:56.000Z
|
2022-02-07T00:08:06.000Z
|
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
FAILEDOPERATION = 'FailedOperation'
#
FAILEDOPERATION_DBQUERYFAILED = 'FailedOperation.DbQueryFailed'
#
FAILEDOPERATION_DBRECORDCREATEFAILED = 'FailedOperation.DbRecordCreateFailed'
#
FAILEDOPERATION_DBRECORDUPDATEFAILED = 'FailedOperation.DbRecordUpdateFailed'
# ES
FAILEDOPERATION_ESQUERYERROR = 'FailedOperation.ESQueryError'
#
FAILEDOPERATION_NOVALIDNODES = 'FailedOperation.NoValidNodes'
#
FAILEDOPERATION_ORDEROUTOFCREDIT = 'FailedOperation.OrderOutOfCredit'
#
FAILEDOPERATION_RESOURCENOTFOUND = 'FailedOperation.ResourceNotFound'
#
FAILEDOPERATION_TASKNOTRUNNING = 'FailedOperation.TaskNotRunning'
#
FAILEDOPERATION_TASKNOTSUSPENDED = 'FailedOperation.TaskNotSuspended'
#
FAILEDOPERATION_TASKOPERATIONNOTALLOW = 'FailedOperation.TaskOperationNotAllow'
#
FAILEDOPERATION_TASKTYPENOTSAME = 'FailedOperation.TaskTypeNotSame'
#
FAILEDOPERATION_TRIALTASKEXCEED = 'FailedOperation.TrialTaskExceed'
#
INTERNALERROR = 'InternalError'
#
INVALIDPARAMETER = 'InvalidParameter'
#
INVALIDPARAMETERVALUE = 'InvalidParameterValue'
#
MISSINGPARAMETER = 'MissingParameter'
#
RESOURCENOTFOUND = 'ResourceNotFound'
#
UNKNOWNPARAMETER = 'UnknownParameter'
| 26.945205
| 82
| 0.804779
|
eb198ab0970d6a0544631c5838cde74baffd6fdc
| 8,851
|
py
|
Python
|
Website/Ogre/points/tests.py
|
pringyy/OGRE
|
26bd1bc06e1d14129b8922da8c9bb7f21b7ec457
|
[
"MIT"
] | null | null | null |
Website/Ogre/points/tests.py
|
pringyy/OGRE
|
26bd1bc06e1d14129b8922da8c9bb7f21b7ec457
|
[
"MIT"
] | null | null | null |
Website/Ogre/points/tests.py
|
pringyy/OGRE
|
26bd1bc06e1d14129b8922da8c9bb7f21b7ec457
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.contrib.staticfiles import finders
from django.urls import reverse
from .models import StudentProfileInfo, User
from .forms import UserForm, ContactForm, UserProfileInfoForm
| 35.979675
| 119
| 0.711219
|
eb1aab5b6a3a998c629d8d9ed3c85dc9531c3cbf
| 6,248
|
py
|
Python
|
py2.5/processing/reduction.py
|
geofft/multiprocess
|
d998ffea9e82d17662b12b94a236182e7fde46d5
|
[
"BSD-3-Clause"
] | 356
|
2015-06-21T21:05:10.000Z
|
2022-03-30T11:57:08.000Z
|
py2.5/processing/reduction.py
|
geofft/multiprocess
|
d998ffea9e82d17662b12b94a236182e7fde46d5
|
[
"BSD-3-Clause"
] | 103
|
2015-06-22T01:44:14.000Z
|
2022-03-01T03:44:25.000Z
|
py2.5/processing/reduction.py
|
geofft/multiprocess
|
d998ffea9e82d17662b12b94a236182e7fde46d5
|
[
"BSD-3-Clause"
] | 72
|
2015-09-02T14:10:24.000Z
|
2022-03-25T06:49:43.000Z
|
#
# Module to support the pickling of different types of connection
# objects and file objects so that they can be transferred between
# different processes.
#
# processing/reduction.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
__all__ = []
import os
import sys
import socket
import threading
import copy_reg
import processing
from processing import _processing
from processing.logger import debug, subDebug, subWarning
from processing.forking import thisThreadIsSpawning
from processing.process import _registerAfterFork
#
#
#
connections_are_picklable = (
sys.platform == 'win32' or hasattr(_processing, 'recvFd')
)
try:
fromfd = socket.fromfd
except AttributeError:
#
# Platform specific definitions
#
if sys.platform == 'win32':
import _subprocess
from processing._processing import win32
closeHandle = win32.CloseHandle
else:
closeHandle = os.close
duplicateHandle = os.dup
#
# Support for a per-process server thread which caches pickled handles
#
_cache = set()
_reset(None)
_registerAfterFork(_reset, _reset)
#
# Functions to be used for pickling/unpickling objects with handles
#
#
# Register `_processing.Connection` with `copy_reg`
#
copy_reg.pickle(_processing.Connection, reduceConnection)
#
# Register `socket.socket` with `copy_reg`
#
copy_reg.pickle(socket.socket, reduceSocket)
#
# Register `_processing.PipeConnection` with `copy_reg`
#
if sys.platform == 'win32':
copy_reg.pickle(_processing.PipeConnection, reducePipeConnection)
| 28.52968
| 80
| 0.639725
|
eb1afd11fd2f6d89e9d5a3d5e84072981f86d593
| 570
|
py
|
Python
|
data-structures/print-the-elements-of-a-linked-list-in-reverse.py
|
gajubadge11/HackerRank-1
|
7b136ccaa1ed47ae737467ace6b494c720ccb942
|
[
"MIT"
] | 340
|
2018-06-17T19:45:56.000Z
|
2022-03-22T02:26:15.000Z
|
data-structures/print-the-elements-of-a-linked-list-in-reverse.py
|
gajubadge11/HackerRank-1
|
7b136ccaa1ed47ae737467ace6b494c720ccb942
|
[
"MIT"
] | 3
|
2021-02-02T17:17:29.000Z
|
2021-05-18T10:06:04.000Z
|
data-structures/print-the-elements-of-a-linked-list-in-reverse.py
|
gajubadge11/HackerRank-1
|
7b136ccaa1ed47ae737467ace6b494c720ccb942
|
[
"MIT"
] | 229
|
2019-04-20T08:28:49.000Z
|
2022-03-31T04:23:52.000Z
|
"""
Print elements of a linked list in reverse order as standard output
head could be None as well for empty list
Node is defined as
class Node(object):
def __init__(self, data=None, next_node=None):
self.data = data
self.next = next_node
"""
| 16.285714
| 68
| 0.522807
|
eb1bfe5091ca2f0f84f38e9d762348c024630c00
| 9,088
|
py
|
Python
|
cfd/cfd_rel_perms.py
|
lanetszb/vofpnm
|
520544db894fb13e44a86e989bd17b4690e996d3
|
[
"MIT"
] | null | null | null |
cfd/cfd_rel_perms.py
|
lanetszb/vofpnm
|
520544db894fb13e44a86e989bd17b4690e996d3
|
[
"MIT"
] | null | null | null |
cfd/cfd_rel_perms.py
|
lanetszb/vofpnm
|
520544db894fb13e44a86e989bd17b4690e996d3
|
[
"MIT"
] | null | null | null |
# MIT License
#
# Copyright (c) 2020 Aleksandr Zhuravlyov and Zakhar Lanets
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import os
import numpy as np
import json
import pandas as pd
import copy
import matplotlib.pyplot as plt
import time as tm
from matplotlib import rc
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_path, '../../'))
from netgrid import save_files_collection_to_file
from matplotlib.ticker import FormatStrFormatter
from vofpnm.cfd.ini_class import Ini
from vofpnm.cfd.cfd_class import Cfd
from vofpnm.helpers import plot_rel_perms, plot_conesrvation_check, plot_viscs_vels, plot_av_sat, \
plot_capillary_pressure_curve, plot_capillary_pressures
# rc('text', usetex=True)
# plt.rcParams["font.family"] = "Times New Roman"
start_time = tm.time()
ini = Ini(config_file=sys.argv[1])
cfd = Cfd(ini)
visc_0 = ini.paramsPnm['visc_0']
visc_1 = ini.paramsPnm['visc_1']
ini.throats_viscs = np.tile(visc_0, ini.netgrid.throats_N)
cfd.run_pnm()
throats_volumes = cfd.ini.throats_volumes
# ### validation with openFoam ###
test_case_vofpnm = dict()
times_alpha_avs = dict()
times_u_mgn_avs = dict()
times_F_avs = dict()
times_F_avs_new = dict()
times_V_in = dict()
thrs_velocities_to_output = dict()
thrs_alphas_to_output = dict()
nus = {'1': visc_0, '2': visc_1}
rhos = {'1': ini.paramsPnm['b_dens_fluid1'], '2': ini.paramsPnm['b_dens_fluid1']}
test_case_vofpnm['mus'] = nus
test_case_vofpnm['rhos'] = rhos
test_case_vofpnm['sigma'] = ini.ift
# ### validation with openfoam one-phase ###
throats_vels = np.absolute(np.array(list(cfd.ini.throats_velocities.values())))
u_mgn_av = np.sum((throats_volumes * throats_vels)) / np.sum(throats_volumes)
test_case_vofpnm['ref_u_mgn'] = u_mgn_av
print('ref_u_mgn', u_mgn_av)
throats_widths = np.absolute(np.array(list(cfd.ini.throats_widths.values())))
av_width = np.sum((throats_volumes * throats_widths)) / np.sum(throats_volumes)
test_case_vofpnm['width'] = av_width
ini.flow_0_ref = cfd.calc_rel_flow_rate()
print('flow_0_ref', ini.flow_0_ref)
visc_1 = ini.paramsPnm['visc_1']
ini.throats_viscs = np.tile(visc_1, ini.netgrid.throats_N)
cfd.run_pnm()
ini.flow_1_ref = cfd.calc_rel_flow_rate()
cfd.calc_coupling_params()
cfd.run_pnm()
rel_perms_0 = []
rel_perms_1 = []
capillary_numbers = []
capillary_pressures = []
av_sats = []
throats_volumes = cfd.ini.throats_volumes
throats_av_sats = cfd.ini.equation.throats_av_sats
dens_0 = cfd.ini.paramsPnm['dens_0']
mass_already_in = copy.deepcopy(np.sum(throats_volumes * throats_av_sats * dens_0))
mass_rates_in = []
mass_rates_out = []
masses_inside = []
times = []
viscs = []
vol_rates_in = []
vol_rates_out = []
#################
# Paraview output
#################
os.system('rm -r inOut/*.vtu')
os.system('rm -r inOut/*.pvd')
sats_dict = dict()
file_name = 'inOut/collection.pvd'
files_names = list()
files_descriptions = list()
cells_arrays = cfd.process_paraview_data()
cfd.ini.netgrid.cells_arrays = cells_arrays
files_names.append(str(0) + '.vtu')
files_descriptions.append(str(0))
cfd.ini.netgrid.save_cells('inOut/' + files_names[-1])
save_files_collection_to_file(file_name, files_names, files_descriptions)
#################
time = [0]
time_steps = []
cour_number = np.empty([])
time_curr = 0
time_step_curr = 0
time_output_freq = cfd.ini.time_period / 500.
round_output_time = int(ini.round_output_time)
output_time_step = ini.output_time_step
time_bound = output_time_step
is_output_step = False
is_last_step = False
out_idx = int(0)
while True:
if cfd.ini.time_step_type == 'const':
cfd.ini.time_step = cfd.ini.const_time_step
elif cfd.ini.time_step_type == 'flow_variable':
cfd.ini.time_step = cfd.ini.local.calc_flow_variable_time_step(
cfd.ini.throats_velocities)
elif cfd.ini.time_step_type == 'div_variable':
cfd.ini.time_step = cfd.ini.local.calc_div_variable_time_step(
cfd.ini.equation.sats[cfd.ini.equation.i_curr], cfd.ini.throats_velocities)
time_step_curr = cfd.ini.time_step
if time_curr + time_step_curr >= time_bound:
time_step_curr = time_bound - time_curr
time_bound += output_time_step
is_output_step = True
if time_curr + time_step_curr >= cfd.ini.time_period:
is_last_step = True
if not is_output_step:
time_step_curr = cfd.ini.time_period - time_curr
time_steps.append(time_step_curr)
time_curr += time_step_curr
cfd.ini.equation.cfd_procedure_one_step(cfd.ini.throats_velocities, time_step_curr)
cfd.calc_coupling_params()
mass_inside = copy.deepcopy(np.sum(throats_volumes * throats_av_sats * dens_0))
masses_inside.append(mass_inside)
vol_rate_in, vol_rate_out, vol_rate_in_0, vol_rate_out_1 = cfd.calc_flow_rates(mass_rates_in,
mass_rates_out)
vol_rates_out.append(vol_rate_out_1)
cfd.calc_rel_perms(rel_perms_0, rel_perms_1, capillary_numbers, capillary_pressures,
av_sats, ini.flow_0_ref, ini.flow_1_ref, vol_rate_in_0)
print('time_step: ', round(time_step_curr, round_output_time))
time.append(time_curr)
cfd.ini.equation.print_cour_numbers(cfd.ini.throats_velocities, cfd.ini.time_step)
print(' percentage executed:', round((time_curr / cfd.ini.time_period * 100.), 2), '%.', '\n')
cfd.run_pnm()
cells_arrays = cfd.process_paraview_data()
if is_output_step:
cfd.ini.netgrid.cells_arrays = cells_arrays
files_names.append(str(round(time_curr, round_output_time)) + '.vtu')
files_descriptions.append(str(round(time_curr, round_output_time)))
cfd.ini.netgrid.save_cells('inOut/' + files_names[-1])
save_files_collection_to_file(file_name, files_names, files_descriptions)
out_idx += 1
is_output_step = False
####### validation with openfoam #######
throats_vels = np.absolute(np.array(list(cfd.ini.throats_velocities.values())))
u_mgn_av = np.sum(throats_volumes * throats_vels) / np.sum(throats_volumes)
alpha_av = np.sum(throats_volumes * throats_av_sats) / np.sum(throats_volumes)
F_av = np.sum(throats_volumes * throats_vels * throats_av_sats) / np.sum(
throats_volumes * throats_vels)
times_u_mgn_avs[str(round(time_curr, round_output_time))] = u_mgn_av
times_alpha_avs[str(round(time_curr, round_output_time))] = alpha_av
times_F_avs[str(round(time_curr, round_output_time))] = F_av
times_F_avs_new[str(round(time_curr, round_output_time))] = (
vol_rate_out - vol_rate_out_1) / vol_rate_out
times_V_in[str(round(time_curr, round_output_time))] = vol_rate_in
####### validation with openfoam #######
print(str(round(time_curr, round_output_time)), time_curr)
throats_vels = np.absolute(np.array(list(cfd.ini.throats_velocities.values())))
throats_viscs = cfd.ini.throats_viscs
visc = np.sum(cfd.ini.throats_volumes * throats_viscs) / np.sum(cfd.ini.throats_volumes)
times.append(time_curr)
viscs.append(visc)
vol_rates_in.append(vol_rate_in)
if is_last_step:
break
execution_time = tm.time() - start_time
print("--- %s seconds ---" % execution_time)
#############
# Rel perms validation output
#############
test_case_vofpnm['times_alpha_avs'] = times_alpha_avs
test_case_vofpnm['times_u_mgn_avs'] = times_u_mgn_avs
test_case_vofpnm['times_F_avs'] = times_F_avs
test_case_vofpnm['times_F_avs_new'] = times_F_avs_new
test_case_vofpnm['execution_time'] = execution_time
test_case_vofpnm['time_step'] = cfd.ini.output_time_step
test_case_vofpnm['grid_volume'] = cfd.ini.grid_volume
test_case_vofpnm['total_volume'] = np.sum(throats_volumes)
test_case_vofpnm['times_V_in'] = times_V_in
json_file_u_mgns = 'inOut/validation/tmp.json'
with open(json_file_u_mgns, 'w') as f:
json.dump(test_case_vofpnm, f, sort_keys=False, indent=4 * ' ', ensure_ascii=False)
| 36.943089
| 125
| 0.725682
|
eb1c6dbf88d8c9f286de25fdab3e7030a01a795a
| 10,209
|
py
|
Python
|
python/gameduino/base.py
|
Godzil/gameduino
|
3a9d04b9820ca7edb04df4583fa14300e913fdb9
|
[
"BSD-3-Clause"
] | 17
|
2016-09-13T09:11:03.000Z
|
2020-09-30T03:31:15.000Z
|
python/gameduino/base.py
|
lambdamikel/gameduino
|
3a9d04b9820ca7edb04df4583fa14300e913fdb9
|
[
"BSD-3-Clause"
] | null | null | null |
python/gameduino/base.py
|
lambdamikel/gameduino
|
3a9d04b9820ca7edb04df4583fa14300e913fdb9
|
[
"BSD-3-Clause"
] | 12
|
2017-07-03T21:57:41.000Z
|
2021-11-02T17:47:23.000Z
|
import struct
ascii_glyphs = [
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, 0x18, 0x00,
0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x7f, 0x36, 0x7f, 0x36, 0x36, 0x00,
0x0c, 0x3f, 0x68, 0x3e, 0x0b, 0x7e, 0x18, 0x00, 0x60, 0x66, 0x0c, 0x18, 0x30, 0x66, 0x06, 0x00,
0x38, 0x6c, 0x6c, 0x38, 0x6d, 0x66, 0x3b, 0x00, 0x0c, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00,
0x0c, 0x18, 0x30, 0x30, 0x30, 0x18, 0x0c, 0x00, 0x30, 0x18, 0x0c, 0x0c, 0x0c, 0x18, 0x30, 0x00,
0x00, 0x18, 0x7e, 0x3c, 0x7e, 0x18, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, 0x18, 0x18, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x00, 0x00,
0x3c, 0x66, 0x6e, 0x7e, 0x76, 0x66, 0x3c, 0x00, 0x18, 0x38, 0x18, 0x18, 0x18, 0x18, 0x7e, 0x00,
0x3c, 0x66, 0x06, 0x0c, 0x18, 0x30, 0x7e, 0x00, 0x3c, 0x66, 0x06, 0x1c, 0x06, 0x66, 0x3c, 0x00,
0x0c, 0x1c, 0x3c, 0x6c, 0x7e, 0x0c, 0x0c, 0x00, 0x7e, 0x60, 0x7c, 0x06, 0x06, 0x66, 0x3c, 0x00,
0x1c, 0x30, 0x60, 0x7c, 0x66, 0x66, 0x3c, 0x00, 0x7e, 0x06, 0x0c, 0x18, 0x30, 0x30, 0x30, 0x00,
0x3c, 0x66, 0x66, 0x3c, 0x66, 0x66, 0x3c, 0x00, 0x3c, 0x66, 0x66, 0x3e, 0x06, 0x0c, 0x38, 0x00,
0x00, 0x00, 0x18, 0x18, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x18, 0x18, 0x30,
0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x7e, 0x00, 0x00, 0x00,
0x30, 0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x00, 0x3c, 0x66, 0x0c, 0x18, 0x18, 0x00, 0x18, 0x00,
0x3c, 0x66, 0x6e, 0x6a, 0x6e, 0x60, 0x3c, 0x00, 0x3c, 0x66, 0x66, 0x7e, 0x66, 0x66, 0x66, 0x00,
0x7c, 0x66, 0x66, 0x7c, 0x66, 0x66, 0x7c, 0x00, 0x3c, 0x66, 0x60, 0x60, 0x60, 0x66, 0x3c, 0x00,
0x78, 0x6c, 0x66, 0x66, 0x66, 0x6c, 0x78, 0x00, 0x7e, 0x60, 0x60, 0x7c, 0x60, 0x60, 0x7e, 0x00,
0x7e, 0x60, 0x60, 0x7c, 0x60, 0x60, 0x60, 0x00, 0x3c, 0x66, 0x60, 0x6e, 0x66, 0x66, 0x3c, 0x00,
0x66, 0x66, 0x66, 0x7e, 0x66, 0x66, 0x66, 0x00, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 0x7e, 0x00,
0x3e, 0x0c, 0x0c, 0x0c, 0x0c, 0x6c, 0x38, 0x00, 0x66, 0x6c, 0x78, 0x70, 0x78, 0x6c, 0x66, 0x00,
0x60, 0x60, 0x60, 0x60, 0x60, 0x60, 0x7e, 0x00, 0x63, 0x77, 0x7f, 0x6b, 0x6b, 0x63, 0x63, 0x00,
0x66, 0x66, 0x76, 0x7e, 0x6e, 0x66, 0x66, 0x00, 0x3c, 0x66, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x00,
0x7c, 0x66, 0x66, 0x7c, 0x60, 0x60, 0x60, 0x00, 0x3c, 0x66, 0x66, 0x66, 0x6a, 0x6c, 0x36, 0x00,
0x7c, 0x66, 0x66, 0x7c, 0x6c, 0x66, 0x66, 0x00, 0x3c, 0x66, 0x60, 0x3c, 0x06, 0x66, 0x3c, 0x00,
0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x00,
0x66, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x18, 0x00, 0x63, 0x63, 0x6b, 0x6b, 0x7f, 0x77, 0x63, 0x00,
0x66, 0x66, 0x3c, 0x18, 0x3c, 0x66, 0x66, 0x00, 0x66, 0x66, 0x66, 0x3c, 0x18, 0x18, 0x18, 0x00,
0x7e, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x7e, 0x00, 0x7c, 0x60, 0x60, 0x60, 0x60, 0x60, 0x7c, 0x00,
0x00, 0x60, 0x30, 0x18, 0x0c, 0x06, 0x00, 0x00, 0x3e, 0x06, 0x06, 0x06, 0x06, 0x06, 0x3e, 0x00,
0x18, 0x3c, 0x66, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
0x1c, 0x36, 0x30, 0x7c, 0x30, 0x30, 0x7e, 0x00, 0x00, 0x00, 0x3c, 0x06, 0x3e, 0x66, 0x3e, 0x00,
0x60, 0x60, 0x7c, 0x66, 0x66, 0x66, 0x7c, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x60, 0x66, 0x3c, 0x00,
0x06, 0x06, 0x3e, 0x66, 0x66, 0x66, 0x3e, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x7e, 0x60, 0x3c, 0x00,
0x1c, 0x30, 0x30, 0x7c, 0x30, 0x30, 0x30, 0x00, 0x00, 0x00, 0x3e, 0x66, 0x66, 0x3e, 0x06, 0x3c,
0x60, 0x60, 0x7c, 0x66, 0x66, 0x66, 0x66, 0x00, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x3c, 0x00,
0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x70, 0x60, 0x60, 0x66, 0x6c, 0x78, 0x6c, 0x66, 0x00,
0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x36, 0x7f, 0x6b, 0x6b, 0x63, 0x00,
0x00, 0x00, 0x7c, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x66, 0x66, 0x3c, 0x00,
0x00, 0x00, 0x7c, 0x66, 0x66, 0x7c, 0x60, 0x60, 0x00, 0x00, 0x3e, 0x66, 0x66, 0x3e, 0x06, 0x07,
0x00, 0x00, 0x6c, 0x76, 0x60, 0x60, 0x60, 0x00, 0x00, 0x00, 0x3e, 0x60, 0x3c, 0x06, 0x7c, 0x00,
0x30, 0x30, 0x7c, 0x30, 0x30, 0x30, 0x1c, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x3e, 0x00,
0x00, 0x00, 0x66, 0x66, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x63, 0x6b, 0x6b, 0x7f, 0x36, 0x00,
0x00, 0x00, 0x66, 0x3c, 0x18, 0x3c, 0x66, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x3e, 0x06, 0x3c,
0x00, 0x00, 0x7e, 0x0c, 0x18, 0x30, 0x7e, 0x00, 0x0c, 0x18, 0x18, 0x70, 0x18, 0x18, 0x0c, 0x00,
0x18, 0x18, 0x18, 0x00, 0x18, 0x18, 0x18, 0x00, 0x30, 0x18, 0x18, 0x0e, 0x18, 0x18, 0x30, 0x00,
0x31, 0x6b, 0x46, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
]
from gameduino.registers import *
# BaseGameduino is the common base for the Gameduino objects in remote and sim
| 49.081731
| 134
| 0.54687
|
eb1e1aaec21c57363587a62326c0cc891182c577
| 183
|
py
|
Python
|
sdm/__init__.py
|
DarthNoxix/noxixcogs
|
794571b7d155e40f6bfb6ba7c31b0a7f025e3d59
|
[
"MIT"
] | null | null | null |
sdm/__init__.py
|
DarthNoxix/noxixcogs
|
794571b7d155e40f6bfb6ba7c31b0a7f025e3d59
|
[
"MIT"
] | null | null | null |
sdm/__init__.py
|
DarthNoxix/noxixcogs
|
794571b7d155e40f6bfb6ba7c31b0a7f025e3d59
|
[
"MIT"
] | null | null | null |
from .sdm import Sdm
__red_end_user_data_statement__ = (
"This cog does not persistently store data or metadata about users."
)
| 18.3
| 72
| 0.73224
|
eb1e990c875a84c89463cedf50afc813143a16f2
| 1,330
|
py
|
Python
|
GUI/WifiMonitor/UDP/Utils/gpio_mapping.py
|
gchinellato/XD
|
f6c0134030c5e229a7b9c2621311c5204aed77af
|
[
"MIT"
] | 1
|
2019-10-15T20:31:39.000Z
|
2019-10-15T20:31:39.000Z
|
GUI/WifiMonitor/Utils/gpio_mapping.py
|
gchinellato/XD
|
f6c0134030c5e229a7b9c2621311c5204aed77af
|
[
"MIT"
] | null | null | null |
GUI/WifiMonitor/Utils/gpio_mapping.py
|
gchinellato/XD
|
f6c0134030c5e229a7b9c2621311c5204aed77af
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
"""
*************************************************
* @Project: Self Balance
* @Description: GPIO Mapping
* @Owner: Guilherme Chinellato
* @Email: guilhermechinellato@gmail.com
*************************************************
"""
"""
#
#Arduino GPIO
#
4x encoder (INT0-D2, INT1-D3, D4, D7)
4x motor enable (D5, D6, D11, D12)
2x PWM (D9, D10)
2x I2C (SCL-A5, SDA-A4)
"""
'''
Deprecated (replaced to Arduino)
#
#Motors GPIOs
#
#Motor A & B PWM outputs (BCM pinout)
MA_PWM_GPIO = 19
MB_PWM_GPIO = 26
#Motor A & B enable outputs (BCM pinout)
MA_CLOCKWISE_GPIO = 5
MA_ANTICLOCKWISE_GPIO = 6
MB_CLOCKWISE_GPIO = 20
MB_ANTICLOCKWISE_GPIO = 21
#
#Encoders GPIOs
#
#Enconders 1 & 2 for each motor (BCM pinout)
MA_ENCODER_1 = 12
MA_ENCODER_2 = 13
MB_ENCODER_1 = 7
MB_ENCODER_2 = 8
'''
#
#PanTilt GPIOs
#
#MicroServo Vertical and Horizontal outputs (BCM pinout)
SERVO_V_GPIO = 18
SERVO_H_GPIO = 23
'''Servo mapping for servoblaster:
0 on P1-7 GPIO-4
1 on P1-11 GPIO-17
*2 on P1-12 GPIO-18*
3 on P1-13 GPIO-27
4 on P1-15 GPIO-22
*5 on P1-16 GPIO-23*
6 on P1-18 GPIO-24
7 on P1-22 GPIO-25'''
#Servo pins
SERVO_H = '2' #pin 12 BCM 18
SERVO_V = '5' #pin 16 BCM 23
| 18.472222
| 69
| 0.566165
|
eb1fb044cf839bde93fe0d603ce5bba8c4e8cccd
| 449
|
py
|
Python
|
vks/vulkanglobals.py
|
geehalel/pyvk
|
56737ee4547b3f12bf941dcda74305b739d09cbb
|
[
"MIT"
] | 1
|
2022-01-09T19:02:00.000Z
|
2022-01-09T19:02:00.000Z
|
vks/vulkanglobals.py
|
geehalel/pyvk
|
56737ee4547b3f12bf941dcda74305b739d09cbb
|
[
"MIT"
] | null | null | null |
vks/vulkanglobals.py
|
geehalel/pyvk
|
56737ee4547b3f12bf941dcda74305b739d09cbb
|
[
"MIT"
] | null | null | null |
# Copyright (C) 2019 by geehalel@gmail.com
# This code is licensed under the MIT license (MIT) (http://opensource.org/licenses/MIT)
import platform
_WIN32 = (platform.system() == 'Windows')
VK_USE_PLATFORM_WIN32_KHR = _WIN32
VK_USE_PLATFORM_ANDROID_KHR = False
VK_USE_PLATFORM_WAYLAND_KHR = False
_DIRECT2DISPLAY = False
#VK_USE_PLATFORM_XCB_KHR = True
VK_USE_PLATFORM_XCB_KHR = not VK_USE_PLATFORM_WIN32_KHR
DEFAULT_FENCE_TIMEOUT = 100000000000
| 29.933333
| 88
| 0.815145
|