hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a389bd7328bfeb9809c940787f3815d94a0c7bd6 | 2,783 | py | Python | commands.py | abcxyz618/MovieGeek | 06029ed4202c63d3da4e306eb5d500ab81f2e1cb | [
"MIT"
] | null | null | null | commands.py | abcxyz618/MovieGeek | 06029ed4202c63d3da4e306eb5d500ab81f2e1cb | [
"MIT"
] | null | null | null | commands.py | abcxyz618/MovieGeek | 06029ed4202c63d3da4e306eb5d500ab81f2e1cb | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
from omdb_api import *
from tmdb_api import *
| 36.618421 | 102 | 0.594682 |
a38a03f634375d52713a25701814579ff7b6e33e | 92,070 | py | Python | cryptoapis/api/unified_endpoints_api.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | 5 | 2021-05-17T04:45:03.000Z | 2022-03-23T12:51:46.000Z | cryptoapis/api/unified_endpoints_api.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | null | null | null | cryptoapis/api/unified_endpoints_api.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | 2 | 2021-06-02T07:32:26.000Z | 2022-02-12T02:36:23.000Z | """
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.api_client import ApiClient, Endpoint as _Endpoint
from cryptoapis.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from cryptoapis.model.get_address_details_r import GetAddressDetailsR
from cryptoapis.model.get_block_details_by_block_hash_r import GetBlockDetailsByBlockHashR
from cryptoapis.model.get_block_details_by_block_height_r import GetBlockDetailsByBlockHeightR
from cryptoapis.model.get_fee_recommendations_r import GetFeeRecommendationsR
from cryptoapis.model.get_last_mined_block_r import GetLastMinedBlockR
from cryptoapis.model.get_transaction_details_by_transaction_idr import GetTransactionDetailsByTransactionIDR
from cryptoapis.model.inline_response400 import InlineResponse400
from cryptoapis.model.inline_response40010 import InlineResponse40010
from cryptoapis.model.inline_response40015 import InlineResponse40015
from cryptoapis.model.inline_response40016 import InlineResponse40016
from cryptoapis.model.inline_response40017 import InlineResponse40017
from cryptoapis.model.inline_response40024 import InlineResponse40024
from cryptoapis.model.inline_response40026 import InlineResponse40026
from cryptoapis.model.inline_response40030 import InlineResponse40030
from cryptoapis.model.inline_response40037 import InlineResponse40037
from cryptoapis.model.inline_response4004 import InlineResponse4004
from cryptoapis.model.inline_response40042 import InlineResponse40042
from cryptoapis.model.inline_response40053 import InlineResponse40053
from cryptoapis.model.inline_response401 import InlineResponse401
from cryptoapis.model.inline_response40110 import InlineResponse40110
from cryptoapis.model.inline_response40115 import InlineResponse40115
from cryptoapis.model.inline_response40116 import InlineResponse40116
from cryptoapis.model.inline_response40117 import InlineResponse40117
from cryptoapis.model.inline_response40124 import InlineResponse40124
from cryptoapis.model.inline_response40126 import InlineResponse40126
from cryptoapis.model.inline_response40130 import InlineResponse40130
from cryptoapis.model.inline_response40137 import InlineResponse40137
from cryptoapis.model.inline_response4014 import InlineResponse4014
from cryptoapis.model.inline_response40142 import InlineResponse40142
from cryptoapis.model.inline_response40153 import InlineResponse40153
from cryptoapis.model.inline_response402 import InlineResponse402
from cryptoapis.model.inline_response403 import InlineResponse403
from cryptoapis.model.inline_response40310 import InlineResponse40310
from cryptoapis.model.inline_response40315 import InlineResponse40315
from cryptoapis.model.inline_response40316 import InlineResponse40316
from cryptoapis.model.inline_response40317 import InlineResponse40317
from cryptoapis.model.inline_response40324 import InlineResponse40324
from cryptoapis.model.inline_response40326 import InlineResponse40326
from cryptoapis.model.inline_response40330 import InlineResponse40330
from cryptoapis.model.inline_response40337 import InlineResponse40337
from cryptoapis.model.inline_response4034 import InlineResponse4034
from cryptoapis.model.inline_response40342 import InlineResponse40342
from cryptoapis.model.inline_response40353 import InlineResponse40353
from cryptoapis.model.inline_response404 import InlineResponse404
from cryptoapis.model.inline_response4041 import InlineResponse4041
from cryptoapis.model.inline_response4042 import InlineResponse4042
from cryptoapis.model.inline_response409 import InlineResponse409
from cryptoapis.model.inline_response415 import InlineResponse415
from cryptoapis.model.inline_response422 import InlineResponse422
from cryptoapis.model.inline_response429 import InlineResponse429
from cryptoapis.model.inline_response500 import InlineResponse500
from cryptoapis.model.list_all_unconfirmed_transactions_r import ListAllUnconfirmedTransactionsR
from cryptoapis.model.list_confirmed_transactions_by_address_r import ListConfirmedTransactionsByAddressR
from cryptoapis.model.list_latest_mined_blocks_r import ListLatestMinedBlocksR
from cryptoapis.model.list_transactions_by_block_hash_r import ListTransactionsByBlockHashR
from cryptoapis.model.list_transactions_by_block_height_r import ListTransactionsByBlockHeightR
from cryptoapis.model.list_unconfirmed_transactions_by_address_r import ListUnconfirmedTransactionsByAddressR
| 42.704082 | 484 | 0.514945 |
a38b317b32dcbc6c9dff08940ace5dc60a5e39cd | 1,853 | py | Python | examples/run_ranch_baseline.py | pinjutien/DeepExplain | a80d85dcd5adc90968b6924a7ef39528170830f0 | [
"MIT"
] | null | null | null | examples/run_ranch_baseline.py | pinjutien/DeepExplain | a80d85dcd5adc90968b6924a7ef39528170830f0 | [
"MIT"
] | null | null | null | examples/run_ranch_baseline.py | pinjutien/DeepExplain | a80d85dcd5adc90968b6924a7ef39528170830f0 | [
"MIT"
] | null | null | null | """
RANdom CHoice baseline (RANCH): random image from the target class
"""
import random
import numpy as np
import tensorflow_datasets as tfds
from tqdm import tqdm
# output_pattern = '/home/ec2-user/gan_submission_1/mnist/mnist_v2/ranch_baselines_%d'
# tfds_name = 'mnist'
# target_size = [28, 28, 1]
# num_class = 10
# n_samples = 10000
# output_pattern = '/home/ec2-user/gan_submission_1/svhn/svhn_v2/ranch_baselines_%d'
# tfds_name = 'svhn_cropped'
# target_size = [32, 32, 3]
# num_class = 10
# n_samples = 26032
output_pattern = '/home/ec2-user/gan_submission_1/cifar10/cifar10_v2/ranch_baselines_%d'
tfds_name = 'cifar10'
target_size = [32, 32, 3]
num_class = 10
n_samples = 10000
if __name__ == '__main__':
# obtain train images
data_train = list(tfds.as_numpy(tfds.load(tfds_name, split='train')))
# obtain test images with target labels
ds_test = tfds.load(tfds_name, split='test')
dslist = list(tfds.as_numpy(ds_test.take(n_samples)))
ys_target = np.random.RandomState(seed=222).randint(num_class - 1, size=n_samples)
xs, ys_label = [], []
for ind, sample in enumerate(dslist):
xs.append(sample['image'])
ys_label.append(sample['label'])
if ys_target[ind] >= sample['label']:
ys_target[ind] += 1
for ind in range(len(data_train)):
data_train[ind]['image'] = data_train[ind]['image'] / 255.0
xs = np.array(xs)
xs = xs / 255.5
ys_label = np.array(ys_label)
index_map = {i: [] for i in range(10)}
for i, train_sample in enumerate(data_train):
index_map[train_sample['label']].append(i)
outputs = []
for ind in tqdm(range(n_samples)):
i = random.choice(index_map[ys_target[ind]])
outputs.append(data_train[i]['image'])
outputs = np.array(outputs)
np.save(output_pattern % n_samples, outputs) | 28.953125 | 88 | 0.67674 |
a38b4a3c4607025ed47cb0e6994bcee905fa97f0 | 359 | py | Python | pageOne.py | 3bru/qt-tkinter-Test | 41eefe7621c6a0bf3a25b4503df7a7451fc363b2 | [
"MIT"
] | 1 | 2020-05-18T21:59:39.000Z | 2020-05-18T21:59:39.000Z | pageOne.py | 3bru/qt-tkinter-Test | 41eefe7621c6a0bf3a25b4503df7a7451fc363b2 | [
"MIT"
] | null | null | null | pageOne.py | 3bru/qt-tkinter-Test | 41eefe7621c6a0bf3a25b4503df7a7451fc363b2 | [
"MIT"
] | null | null | null | import sqlite3, os
con = sqlite3.connect('database.sqlite')
im = con.cursor()
tablo = """CREATE TABLE IF NOT EXISTS writes(day, topic, texti)"""
deger = """INSERT INTO writes VALUES('oneDay', 'nmap', 'nmaple ilgili bisiler')"""
im.execute(tablo)
im.execute(deger)
con.commit()
im.execute("""SELECT * FROM writes""")
veriler = im.fetchall()
print(veriler)
| 22.4375 | 82 | 0.696379 |
a38b9d380fbd10ce2b7350457ab818a75b222fac | 6,075 | py | Python | basicsr/metrics/psnr_ssim.py | BCV-Uniandes/RSR | dad60eedd3560f2655e3d1ed444153ed2616af2e | [
"zlib-acknowledgement"
] | 14 | 2021-08-28T04:15:37.000Z | 2021-12-28T17:00:33.000Z | basicsr/metrics/psnr_ssim.py | BCV-Uniandes/RSR | dad60eedd3560f2655e3d1ed444153ed2616af2e | [
"zlib-acknowledgement"
] | 2 | 2021-09-26T01:27:06.000Z | 2021-12-24T19:06:09.000Z | basicsr/metrics/psnr_ssim.py | BCV-Uniandes/RSR | dad60eedd3560f2655e3d1ed444153ed2616af2e | [
"zlib-acknowledgement"
] | 1 | 2021-10-18T15:48:56.000Z | 2021-10-18T15:48:56.000Z | import cv2
import numpy as np
from basicsr.metrics.metric_util import reorder_image, to_y_channel
def calculate_psnr(img1,
img2,
crop_border,
input_order='HWC',
test_y_channel=False):
"""Calculate PSNR (Peak Signal-to-Noise Ratio).
Ref: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
Args:
img1 (ndarray): Images with range [0, 255].
img2 (ndarray): Images with range [0, 255].
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the PSNR calculation.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
Default: 'HWC'.
test_y_channel (bool): Test on Y channel of YCbCr. Default: False.
Returns:
float: psnr result.
"""
assert img1.shape == img2.shape, (
f'Image shapes are differnet: {img1.shape}, {img2.shape}.')
if input_order not in ['HWC', 'CHW']:
raise ValueError(
f'Wrong input_order {input_order}. Supported input_orders are '
'"HWC" and "CHW"')
img1 = reorder_image(img1, input_order=input_order)
img2 = reorder_image(img2, input_order=input_order)
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
if crop_border != 0:
img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...]
img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...]
if test_y_channel:
img1 = to_y_channel(img1)
img2 = to_y_channel(img2)
mse = np.mean((img1 - img2)**2)
if mse == 0:
return float('inf')
return 20. * np.log10(255. / np.sqrt(mse))
def _ssim(img1, img2):
"""Calculate SSIM (structural similarity) for one channel images.
It is called by func:`calculate_ssim`.
Args:
img1 (ndarray): Images with range [0, 255] with order 'HWC'.
img2 (ndarray): Images with range [0, 255] with order 'HWC'.
Returns:
float: ssim result.
"""
C1 = (0.01 * 255)**2
C2 = (0.03 * 255)**2
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
kernel = cv2.getGaussianKernel(11, 1.5)
window = np.outer(kernel, kernel.transpose())
mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
mu1_sq = mu1**2
mu2_sq = mu2**2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) *
(2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
def calculate_ssim(img1,
img2,
crop_border,
input_order='HWC',
test_y_channel=False):
"""Calculate SSIM (structural similarity).
Ref:
Image quality assessment: From error visibility to structural similarity
The results are the same as that of the official released MATLAB code in
https://ece.uwaterloo.ca/~z70wang/research/ssim/.
For three-channel images, SSIM is calculated for each channel and then
averaged.
Args:
img1 (ndarray): Images with range [0, 255].
img2 (ndarray): Images with range [0, 255].
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the SSIM calculation.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
Default: 'HWC'.
test_y_channel (bool): Test on Y channel of YCbCr. Default: False.
Returns:
float: ssim result.
"""
assert img1.shape == img2.shape, (
f'Image shapes are differnet: {img1.shape}, {img2.shape}.')
if input_order not in ['HWC', 'CHW']:
raise ValueError(
f'Wrong input_order {input_order}. Supported input_orders are '
'"HWC" and "CHW"')
img1 = reorder_image(img1, input_order=input_order)
img2 = reorder_image(img2, input_order=input_order)
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
if crop_border != 0:
img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...]
img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...]
if test_y_channel:
img1 = to_y_channel(img1)
img2 = to_y_channel(img2)
ssims = []
for i in range(img1.shape[2]):
ssims.append(_ssim(img1[..., i], img2[..., i]))
return np.array(ssims).mean()
import torch
import torch.nn as nn
import lpips
import torchvision
import numpy
# from misc.kernel_loss import shave_a2b | 33.379121 | 80 | 0.596708 |
a38ee10bfa692aa23805d2d2b99b5f0481e7ce48 | 14,224 | py | Python | data/dataset.py | limingwu8/Pneumonia-Detection | 8541e0f34a72f6e94773bf234cfd071732229b2b | [
"MIT"
] | 7 | 2019-01-27T02:30:56.000Z | 2020-04-29T18:47:21.000Z | data/dataset.py | limingwu8/Pneumonia-Detection | 8541e0f34a72f6e94773bf234cfd071732229b2b | [
"MIT"
] | 1 | 2020-01-28T04:40:15.000Z | 2020-05-01T02:37:40.000Z | data/dataset.py | limingwu8/Pneumonia-Detection | 8541e0f34a72f6e94773bf234cfd071732229b2b | [
"MIT"
] | 3 | 2019-08-09T09:16:00.000Z | 2021-07-01T11:45:00.000Z | import os
import numpy as np
import torch
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from skimage import io, transform
from utils.Config import opt
from skimage import exposure
import matplotlib.pylab as plt
from utils import array_tool as at
from sklearn.model_selection import train_test_split
from data.data_utils import read_image, resize_bbox, flip_bbox, random_flip, flip_masks
from utils.vis_tool import apply_mask_bbox
import matplotlib.patches as patches
DSB_BBOX_LABEL_NAMES = ('p') # Pneumonia
"""Transforms:
Data augmentation
"""
def preprocess(img, min_size=600, max_size=1000, train=True):
"""Preprocess an image for feature extraction.
The length of the shorter edge is scaled to :obj:`self.min_size`.
After the scaling, if the length of the longer edge is longer than
:param min_size:
:obj:`self.max_size`, the image is scaled to fit the longer edge
to :obj:`self.max_size`.
After resizing the image, the image is subtracted by a mean image value
:obj:`self.mean`.
Args:
img (~numpy.ndarray): An image. This is in CHW and RGB format.
The range of its value is :math:`[0, 255]`.
Returns:
~numpy.ndarray: A preprocessed image.
"""
C, H, W = img.shape
scale1 = min_size / min(H, W)
scale2 = max_size / max(H, W)
scale = min(scale1, scale2)
if opt.caffe_pretrain:
normalize = caffe_normalize
else:
normalize = pytorch_normalze
if opt.hist_equalize:
hist_img = exposure.equalize_hist(img)
hist_img = transform.resize(hist_img, (C, H * scale, W * scale), mode='reflect')
hist_img = normalize(hist_img)
return hist_img
img = img / 255.
img = transform.resize(img, (C, H * scale, W * scale), mode='reflect')
# both the longer and shorter should be less than
# max_size and min_size
img = normalize(img)
return img
def pytorch_normalze(img):
"""
https://discuss.pytorch.org/t/how-to-preprocess-input-for-pre-trained-networks/683
https://github.com/pytorch/vision/issues/223
return appr -1~1 RGB
"""
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
img = normalize(torch.from_numpy(img))
return img.numpy()
def caffe_normalize(img):
"""
return appr -125-125 BGR
"""
img = img[[2, 1, 0], :, :] # RGB-BGR
img = img * 255
mean = np.array([122.7717, 115.9465, 102.9801]).reshape(3, 1, 1)
img = (img - mean).astype(np.float32, copy=True)
return img
def get_train_loader(root_dir, batch_size=16, shuffle=False, num_workers=4, pin_memory=False):
"""Utility function for loading and returning training and validation Dataloader
:param root_dir: the root directory of data set
:param batch_size: batch size of training and validation set
:param split: if split data set to training set and validation set
:param shuffle: if shuffle the image in training and validation set
:param num_workers: number of workers loading the data, when using CUDA, set to 1
:param val_ratio: ratio of validation set size
:param pin_memory: store data in CPU pin buffer rather than memory. when using CUDA, set to True
:return:
if split the data set then returns:
- train_loader: Dataloader for training
- valid_loader: Dataloader for validation
else returns:
- dataloader: Dataloader of all the data set
"""
img_ids = os.listdir(root_dir)
img_ids.sort()
transformed_dataset = RSNADataset(root_dir=root_dir, img_id=img_ids, transform=True, train=True)
dataloader = DataLoader(transformed_dataset, batch_size=batch_size,
shuffle=shuffle, num_workers=num_workers, pin_memory=pin_memory)
return dataloader
def get_train_val_loader(root_dir, batch_size=16, val_ratio=0.2, shuffle=False, num_workers=4, pin_memory=False):
"""Utility function for loading and returning training and validation Dataloader
:param root_dir: the root directory of data set
:param batch_size: batch size of training and validation set
:param split: if split data set to training set and validation set
:param shuffle: if shuffle the image in training and validation set
:param num_workers: number of workers loading the data, when using CUDA, set to 1
:param val_ratio: ratio of validation set size
:param pin_memory: store data in CPU pin buffer rather than memory. when using CUDA, set to True
:return:
if split the data set then returns:
- train_loader: Dataloader for training
- valid_loader: Dataloader for validation
else returns:
- dataloader: Dataloader of all the data set
"""
img_ids = os.listdir(root_dir)
img_ids.sort()
train_id, val_id = train_test_split(img_ids, test_size=val_ratio, random_state=55, shuffle=shuffle)
train_dataset = RSNADataset(root_dir=root_dir, img_id=train_id, transform=True, train=True)
train_loader = DataLoader(train_dataset, batch_size=batch_size,
shuffle=shuffle, num_workers=num_workers, pin_memory=pin_memory)
val_dataset = RSNADataset(root_dir=root_dir, img_id=val_id, transform=True, train=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size,
shuffle=shuffle, num_workers=num_workers, pin_memory=pin_memory)
return train_loader, val_loader
def get_test_loader(test_dir, batch_size=16, shuffle=False, num_workers=4, pin_memory=False):
"""Utility function for loading and returning training and validation Dataloader
:param root_dir: the root directory of data set
:param batch_size: batch size of training and validation set
:param shuffle: if shuffle the image in training and validation set
:param num_workers: number of workers loading the data, when using CUDA, set to 1
:param pin_memory: store data in CPU pin buffer rather than memory. when using CUDA, set to True
:return:
- testloader: Dataloader of all the test set
"""
transformed_dataset = RSNADatasetTest(root_dir=test_dir)
testloader = DataLoader(transformed_dataset, batch_size=batch_size,
shuffle=shuffle, num_workers=num_workers, pin_memory=pin_memory)
return testloader
def show_batch_train(sample_batched):
"""
Visualize one training image and its corresponding bbox
"""
if len(sample_batched.keys())==5:
# if sample_batched['img_id']=='8d978e76-14b9-4d9d-9ba6-aadd3b8177ce':
# print('stop')
img_id, image, bbox = sample_batched['img_id'], sample_batched['image'], sample_batched['bbox']
orig_img = at.tonumpy(image)
orig_img = inverse_normalize(orig_img)
bbox = bbox[0, :]
ax = plt.subplot(111)
ax.imshow(np.transpose(np.squeeze(orig_img / 255.), (1, 2, 0)))
ax.set_title(img_id[0])
for i in range(bbox.shape[0]):
y1, x1, y2, x2 = int(bbox[i][0]), int(bbox[i][1]), int(bbox[i][2]), int(bbox[i][3])
h = y2 - y1
w = x2 - x1
rect = patches.Rectangle((x1, y1), w, h, linewidth=1, edgecolor='r', facecolor='none')
ax.add_patch(rect)
plt.show()
if __name__ == '__main__':
# dataset = RSNADataset(root_dir=opt.root_dir, transform=True)
# sample = dataset[13]
# print(sample.keys())
# Load training set
# trainloader = get_train_loader(opt.root_dir, batch_size=opt.batch_size, shuffle=opt.shuffle,
# num_workers=opt.num_workers, pin_memory=opt.pin_memory)
#
# for i_batch, sample in tqdm(enumerate(trainloader)):
# B,C,H,W = sample['image'].shape
# if (H,W)!=(600,600):
# print(sample['img_id'])
# show_batch_train(sample)
# Load testing set
# testloader = get_test_loader(opt.test_dir, batch_size=opt.batch_size, shuffle=opt.shuffle,
# num_workers=opt.num_workers, pin_memory=opt.pin_memory)
# for i_batch, sample in enumerate(testloader):
# print('i_batch: ', i_batch, 'len(sample)', len(sample.keys()))
# show_batch_test(sample)
# Load training & validation set
train_loader, val_loader = get_train_val_loader(opt.root_dir, batch_size=opt.batch_size, val_ratio=0.1,
shuffle=True, num_workers=opt.num_workers,
pin_memory=opt.pin_memory)
for i_batch, sample in enumerate(train_loader):
show_batch_train(sample)
# Test train & validation set on densenet
# img_ids = os.listdir(opt.root_dir)
# dataset = RSNADataset_densenet(root_dir=opt.root_dir, img_id=img_ids, transform=True)
# sample = dataset[13]
# print(sample.keys())
# train_loader, val_loader = get_train_val_loader_densenet(opt.root_dir, batch_size=128, val_ratio=0.1,
# shuffle=False, num_workers=opt.num_workers,
# pin_memory=opt.pin_memory)
# non_zeros = 0 # 4916 + 743 = 5659
# zeros = 0 # 15692 + 4505 = 20197
# for i, sample in tqdm(enumerate(val_loader)):
# non_zeros += np.count_nonzero(at.tonumpy(sample['label']))
# zeros += (128-np.count_nonzero(at.tonumpy(sample['label'])))
# # print(sample['img_id'], ', ', at.tonumpy(sample['label']))
# print("non_zeros: ", non_zeros)
# print("zeros: ", zeros)
| 41.228986 | 113 | 0.646161 |
a38f9c51d087930a15e07db3d41e43fedee278f9 | 8,344 | py | Python | make_dataset/kor_sample_dataset.py | park-sungmoo/odqa_baseline_code | 45954be766e5f987bef18e5b8a2e47f1508742cd | [
"Apache-2.0"
] | 67 | 2021-05-12T15:54:28.000Z | 2022-03-12T15:55:35.000Z | make_dataset/kor_sample_dataset.py | park-sungmoo/odqa_baseline_code | 45954be766e5f987bef18e5b8a2e47f1508742cd | [
"Apache-2.0"
] | 71 | 2021-05-01T06:07:37.000Z | 2022-01-28T16:54:46.000Z | make_dataset/kor_sample_dataset.py | park-sungmoo/odqa_baseline_code | 45954be766e5f987bef18e5b8a2e47f1508742cd | [
"Apache-2.0"
] | 14 | 2021-05-24T10:57:27.000Z | 2022-02-18T06:34:11.000Z | import json
import os.path as p
from collections import defaultdict
import pandas as pd
from datasets import load_dataset
from datasets import concatenate_datasets
from datasets import Sequence, Value, Features, Dataset, DatasetDict
from utils.tools import get_args
f = Features(
{
"answers": Sequence(
feature={"text": Value(dtype="string", id=None), "answer_start": Value(dtype="int32", id=None)},
length=-1,
id=None,
),
"id": Value(dtype="string", id=None),
"context": Value(dtype="string", id=None),
"question": Value(dtype="string", id=None),
"title": Value(dtype="string", id=None),
}
)
def make_kor_dataset_v1(args):
"""KorQuad Dataset V1
1. 512 Filtering
2. Context Question 4
3. ans_start 8000
"""
kor_dataset_path = p.join(args.path.train_data_dir, "kor_dataset")
if p.exists(kor_dataset_path):
raise FileExistsError(f"{kor_dataset_path} !")
kor_dataset = load_dataset("squad_kor_v1")
kor_dataset = concatenate_datasets(
[kor_dataset["train"].flatten_indices(), kor_dataset["validation"].flatten_indices()]
)
# (1) : KLUE MRC 512
kor_dataset = filtering_by_doc_len(kor_dataset, doc_len=512)
# (2) Context : Context 4
kor_dataset = filtering_by_dup_question(kor_dataset, dup_limit=4)
# (3) KOR answer_start Weight Sampling 2
kor_dataset = sampling_by_ans_start_weights(kor_dataset, sample=8000)
# (4) KOR_DATASET
kor_datasets = DatasetDict({"train": kor_dataset})
kor_datasets.save_to_disk(kor_dataset_path)
print(f"{kor_dataset_path} !")
def make_kor_dataset_v2(args):
"""KorQuad Dataset V1
1. 512 Filtering
2. Context Question 4
3. ans_start 8000
4. doc_len 4000
"""
kor_dataset_path = p.join(args.path.train_data_dir, "kor_dataset_v2")
if p.exists(kor_dataset_path):
raise FileExistsError(f"{kor_dataset_path} !")
kor_dataset = load_dataset("squad_kor_v1")
kor_dataset = concatenate_datasets(
[kor_dataset["train"].flatten_indices(), kor_dataset["validation"].flatten_indices()]
)
# (1) : KLUE MRC 512
kor_dataset = filtering_by_doc_len(kor_dataset, doc_len=512)
# (2) Context : Context 4
kor_dataset = filtering_by_dup_question(kor_dataset, dup_limit=4)
# (3) KOR answer_start Weight Sampling 2
kor_dataset = sampling_by_ans_start_weights(kor_dataset)
# (4) KOR docs_len Weights Sampling 4000
kor_dataset = sampling_by_doc_lens(kor_dataset, sample=4000)
# (5) KOR_DATASET
kor_datasets = DatasetDict({"train": kor_dataset})
kor_datasets.save_to_disk(kor_dataset_path)
print(f"{kor_dataset_path} !")
def make_etr_dataset_v1(args):
"""ETRI
1. 512 Filtering
2. Context , Question 4
3. ans_start 3000
"""
etr_dataset_path = p.join(args.path.train_data_dir, "etr_dataset_v1")
if p.exists(etr_dataset_path):
raise FileExistsError(f"{etr_dataset_path} !")
etr_dataset = get_etr_dataset(args)
# (1) : KLUE MRC 512
etr_dataset = filtering_by_doc_len(etr_dataset, doc_len=512)
# (2) Context : Context 4
etr_dataset = filtering_by_dup_question(etr_dataset, dup_limit=4)
# (3) ETR answer_start Weight 3000 Sampling
etr_dataset = sampling_by_ans_start_weights(etr_dataset, sample=3000)
# (4) ETR_DATASET
etr_datasets = DatasetDict({"train": etr_dataset})
etr_datasets.save_to_disk(etr_dataset_path)
print(f"{etr_dataset_path} !")
if __name__ == "__main__":
args = get_args()
main(args)
| 28.772414 | 112 | 0.65604 |
a3927c6d9fb19dc907aa3851f9fb6293c833eaf2 | 1,737 | py | Python | tests/test_simple.py | teosavv/pyembroidery | 00985f423e64ea1a454e5484012c19a64f26eb2c | [
"MIT"
] | 45 | 2018-07-08T09:49:30.000Z | 2022-03-23T07:01:15.000Z | tests/test_simple.py | teosavv/pyembroidery | 00985f423e64ea1a454e5484012c19a64f26eb2c | [
"MIT"
] | 59 | 2018-07-05T22:05:58.000Z | 2022-02-20T01:01:20.000Z | tests/test_simple.py | teosavv/pyembroidery | 00985f423e64ea1a454e5484012c19a64f26eb2c | [
"MIT"
] | 23 | 2018-08-10T17:58:04.000Z | 2022-03-29T03:41:46.000Z | import os
import shutil
import pyembroidery
import test_fractals
| 31.581818 | 84 | 0.651698 |
a392dab4e0208bcba731af6d1b6b1dd6d3c0e78a | 21,317 | py | Python | train.py | eapache/HawkEars | 3b979166ed09de9f9254b830bb57499e1da7a015 | [
"MIT"
] | null | null | null | train.py | eapache/HawkEars | 3b979166ed09de9f9254b830bb57499e1da7a015 | [
"MIT"
] | 1 | 2021-12-17T16:56:12.000Z | 2021-12-19T15:53:55.000Z | train.py | eapache/HawkEars | 3b979166ed09de9f9254b830bb57499e1da7a015 | [
"MIT"
] | 1 | 2021-12-17T16:59:04.000Z | 2021-12-17T16:59:04.000Z | # Train the selected neural network model on spectrograms for birds and a few other classes.
# Train the selected neural network model on spectrograms for birds and a few other classes.
# To see command-line arguments, run the script with -h argument.
import argparse
import math
import matplotlib.pyplot as plt
import numpy as np
import os
import random
import shutil
import sys
import time
import zlib
from collections import namedtuple
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' # 1 = no info, 2 = no warnings, 3 = no errors
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
import tensorflow as tf
from tensorflow import keras
from core import audio
from core import constants
from core import data_generator
from core import database
from core import plot
from core import util
from model import model_checkpoint
from model import efficientnet_v2
# learning rate schedule with cosine decay
def cos_lr_schedule(epoch):
global trainer
base_lr = trainer.parameters.base_lr * trainer.parameters.batch_size / 64
lr = base_lr * (1 + math.cos(epoch * math.pi / max(trainer.parameters.epochs, 1))) / 2
if trainer.parameters.verbosity == 0:
print(f'epoch: {epoch + 1} / {trainer.parameters.epochs}') # so there is at least some status info
return lr
if __name__ == '__main__':
# command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument('-b', type=int, default=32, help='Batch size. Default = 32.')
parser.add_argument('-c', type=int, default=15, help='Minimum epochs before saving checkpoint. Default = 15.')
parser.add_argument('-d', type=float, default=0.0, help='Minimum validation accuracy before saving checkpoint. Default = 0.')
parser.add_argument('-e', type=int, default=10, help='Number of epochs. Default = 10.')
parser.add_argument('-f', type=str, default='training', help='Name of training database. Default = training.')
parser.add_argument('-g', type=int, default=1, help='If 1, make a separate copy of each saved checkpoint. Default = 1.')
parser.add_argument('-j', type=int, default=0, help='If 1, save checkpoint only when val accuracy improves. Default = 0.')
parser.add_argument('-m', type=int, default=1, help='Model type (0 = Load existing model, 1 = EfficientNetV2. Default = 1.')
parser.add_argument('-m2', type=str, default='a0', help='Name of EfficientNetV2 configuration to use. Default = "a0". ')
parser.add_argument('-r', type=float, default=.006, help='Base learning rate. Default = .006')
parser.add_argument('-t', type=float, default=.01, help='Test portion. Default = .01')
parser.add_argument('-u', type=int, default=0, help='1 = Train a multi-label classifier. Default = 0.')
parser.add_argument('-v', type=int, default=1, help='Verbosity (0-2, 0 omits output graphs, 2 plots misidentified test spectrograms, 3 adds graph of model). Default = 1.')
parser.add_argument('-x', type=str, default='', help='Name(s) of extra validation databases. "abc" means load "abc.db". "abc,def" means load both databases for validation. Default = "". ')
parser.add_argument('-y', type=int, default=0, help='If y = 1, extract spectrograms for binary classifier. Default = 0.')
parser.add_argument('-z', type=int, default=None, help='Integer seed for random number generators. Default = None (do not). If specified, other settings to increase repeatability will also be enabled, which slows down training.')
args = parser.parse_args()
Parameters = namedtuple('Parameters', ['base_lr', 'batch_size', 'binary_classifier', 'ckpt_min_epochs', 'ckpt_min_val_accuracy',
'copy_ckpt', 'eff_config', 'epochs', 'multilabel', 'save_best_only', 'seed', 'test_portion', 'training', 'type',
'val_db', 'verbosity'])
parameters = Parameters(base_lr=args.r, batch_size = args.b, binary_classifier=(args.y==1), ckpt_min_epochs=args.c, ckpt_min_val_accuracy=args.d,
copy_ckpt=(args.g == 1), eff_config = args.m2, epochs = args.e, multilabel=(args.u==1), save_best_only=(args.j == 1), seed=args.z,
test_portion = args.t, training=args.f, type = args.m, val_db = args.x, verbosity = args.v)
if args.z != None:
# these settings make results more reproducible, which is very useful when tuning parameters
os.environ['PYTHONHASHSEED'] = str(args.z)
#os.environ['TF_DETERMINISTIC_OPS'] = '1'
os.environ['TF_CUDNN_DETERMINISTIC'] = '1'
random.seed(args.z)
np.random.seed(args.z)
tf.random.set_seed(args.z)
tf.config.threading.set_inter_op_parallelism_threads(1)
tf.config.threading.set_intra_op_parallelism_threads(1)
keras.mixed_precision.set_global_policy("mixed_float16") # trains 25-30% faster
trainer = Trainer(parameters)
trainer.run()
| 47.476615 | 233 | 0.598208 |
a3943fc348baced6fa934c762ac87be734e9ae13 | 2,002 | py | Python | limix/heritability/estimate.py | fpcasale/limix | a6bc2850f243fe779991bb53a24ddbebe0ab74d2 | [
"Apache-2.0"
] | null | null | null | limix/heritability/estimate.py | fpcasale/limix | a6bc2850f243fe779991bb53a24ddbebe0ab74d2 | [
"Apache-2.0"
] | null | null | null | limix/heritability/estimate.py | fpcasale/limix | a6bc2850f243fe779991bb53a24ddbebe0ab74d2 | [
"Apache-2.0"
] | null | null | null | from __future__ import division
from numpy import ascontiguousarray, copy, ones, var
from numpy_sugar.linalg import economic_qs
from glimix_core.glmm import GLMMExpFam
def estimate(pheno, lik, K, covs=None, verbose=True):
r"""Estimate the so-called narrow-sense heritability.
It supports Normal, Bernoulli, Binomial, and Poisson phenotypes.
Let :math:`N` be the sample size and :math:`S` the number of covariates.
Parameters
----------
pheno : tuple, array_like
Phenotype. Dimensions :math:`N\\times 0`.
lik : {'normal', 'bernoulli', 'binomial', 'poisson'}
Likelihood name.
K : array_like
Kinship matrix. Dimensions :math:`N\\times N`.
covs : array_like
Covariates. Default is an offset. Dimensions :math:`N\\times S`.
Returns
-------
float
Estimated heritability.
Examples
--------
.. doctest::
>>> from numpy import dot, exp, sqrt
>>> from numpy.random import RandomState
>>> from limix.heritability import estimate
>>>
>>> random = RandomState(0)
>>>
>>> G = random.randn(50, 100)
>>> K = dot(G, G.T)
>>> z = dot(G, random.randn(100)) / sqrt(100)
>>> y = random.poisson(exp(z))
>>>
>>> print('%.2f' % estimate(y, 'poisson', K, verbose=False))
0.70
"""
K = _background_standardize(K)
QS = economic_qs(K)
lik = lik.lower()
if lik == "binomial":
p = len(pheno[0])
else:
p = len(pheno)
if covs is None:
covs = ones((p, 1))
glmm = GLMMExpFam(pheno, lik, covs, QS)
glmm.feed().maximize(verbose=verbose)
g = glmm.scale * (1 - glmm.delta)
e = glmm.scale * glmm.delta
h2 = g / (var(glmm.mean()) + g + e)
return h2
| 24.414634 | 76 | 0.580919 |
a394632989f95d229e000f46db6a73bbdcda0cf3 | 2,739 | py | Python | pyrat/__main__.py | gitmarek/pyrat | cbf918d5c23d5d39e62e00bb64b6d0596170c68b | [
"MIT"
] | null | null | null | pyrat/__main__.py | gitmarek/pyrat | cbf918d5c23d5d39e62e00bb64b6d0596170c68b | [
"MIT"
] | null | null | null | pyrat/__main__.py | gitmarek/pyrat | cbf918d5c23d5d39e62e00bb64b6d0596170c68b | [
"MIT"
] | null | null | null | import argparse, importlib, sys
import pyrat
from pyrat import name, version, logger
# This returns a function to be called by a subparser below
# We assume in the tool's submodule there's a function called 'start(args)'
# That takes over the execution of the program.
if __name__ == '__main__':
# create the top-level parser
parser = argparse.ArgumentParser(prog=name,
description='Raw tools for raw audio.',
epilog= name+' <command> -h for more details.')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--quiet', action='store_true',
help='takes precedence over \'verbose\'')
parser.add_argument('-v', '--version', action='store_true',
help='print version number and exit')
subparsers = parser.add_subparsers(title="Commands")
# create the parser for the "conv" command
parser_conv = subparsers.add_parser('conv',
description='''Convolve input signal with kernel.
Normalize the result and write it to outfile.''',
help='Convolve input with a kernel.')
parser_conv.add_argument('infile', type=argparse.FileType('r'))
parser_conv.add_argument('kerfile', type=argparse.FileType('r'),
help="kernel to be convolved with infile")
parser_conv.add_argument('outfile', type=argparse.FileType('w'))
parser_conv.set_defaults(func=tool_('conv'))
# create the parser for the "randph" command
parser_randph = subparsers.add_parser('randph',
description='''Randomize phases of Fourier coefficients.
Calculate the FFT of the entire signal; then randomize the phases of each
frequency bin by multiplying the frequency coefficient by a random phase:
e^{2pi \phi}, where $\phi$ is distributed uniformly on the interval [0,b). By
default, b=0.1. The result is saved to outfile.''',
help='Randomize phases of Fourier coefficients.')
parser_randph.add_argument('infile', type=argparse.FileType('r'))
parser_randph.add_argument('outfile', type=argparse.FileType('w'))
parser_randph.add_argument('-b', type=float, default=0.1,
help='phases disttibuted uniformly on [0,b)')
parser_randph.set_defaults(func=tool_('randph'))
if len(sys.argv) < 2:
parser.print_usage()
sys.exit(1)
args = parser.parse_args()
if args.version:
print(name + '-' + version)
sys.exit(0)
if args.verbose:
logger.setLevel('INFO')
else:
logger.setLevel('WARNING')
if args.quiet:
logger.setLevel(60) # above 'CRITICAL'
args.func(args)
sys.exit(0)
| 36.039474 | 78 | 0.683826 |
a394774a260348220f0663c39347cf191a6da686 | 485 | py | Python | zof/event.py | byllyfish/pylibofp | 8e96caf83f57cab930b45a78eb4a8eaa6d9d0408 | [
"MIT"
] | 4 | 2017-09-20T19:10:51.000Z | 2022-01-10T04:02:00.000Z | zof/event.py | byllyfish/pylibofp | 8e96caf83f57cab930b45a78eb4a8eaa6d9d0408 | [
"MIT"
] | 2 | 2017-09-02T22:53:03.000Z | 2018-01-01T03:27:48.000Z | zof/event.py | byllyfish/pylibofp | 8e96caf83f57cab930b45a78eb4a8eaa6d9d0408 | [
"MIT"
] | null | null | null | from .objectview import to_json, from_json
| 26.944444 | 74 | 0.610309 |
a3955ee346d7a3a5338cd528fa6afbec24d5527c | 2,007 | py | Python | python/projecteuler/src/longest_collatz_sequence.py | christopher-burke/warmups | 140c96ada87ec5e9faa4622504ddee18840dce4a | [
"MIT"
] | null | null | null | python/projecteuler/src/longest_collatz_sequence.py | christopher-burke/warmups | 140c96ada87ec5e9faa4622504ddee18840dce4a | [
"MIT"
] | 2 | 2022-03-10T03:49:14.000Z | 2022-03-14T00:49:54.000Z | python/projecteuler/src/longest_collatz_sequence.py | christopher-burke/warmups | 140c96ada87ec5e9faa4622504ddee18840dce4a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Longest Collatz sequence.
The following iterative sequence is defined
for the set of positive integers:
n n/2 (n is even)
n 3n + 1 (n is odd)
Using the rule above and starting with 13,
we generate the following sequence:
13 40 20 10 5 16 8 4 2 1
It can be seen that this sequence (starting at 13 and finishing at 1)
contains 10 terms. Although it has not been proved yet
(Collatz Problem), it is thought that all starting numbers finish at 1.
Which starting number, under one million, produces the longest chain?
NOTE: Once the chain starts the terms are
allowed to go above one million.
source: https://projecteuler.net/problem=14
"""
CACHE = {1: [1]}
CACHE_LENGTH = {1: 1}
def collatz_sequence(n) -> int:
"""Get the Collatz Sequence list.
Add each found Collatz Sequence to CACHE.
:return:
"""
if n in CACHE:
return CACHE[n]
next_ = int(n // 2) if n % 2 == 0 else int(3 * n + 1)
CACHE[n] = [n] + collatz_sequence(next_)
return CACHE[n]
def longest_collatz_sequence(limit: int) -> int:
"""Find the longest Collatz Sequence length.
:return: number that generates the longest collazt sequence.
"""
for i in range(2, limit+1):
collatz_sequence_length(i)
longest = max(CACHE_LENGTH.keys(), key=lambda k: CACHE_LENGTH[k])
return longest
def collatz_sequence_length(n):
"""Get the Collatz Sequence of n.
:return: List of Collatz Sequence.
"""
if n not in CACHE_LENGTH:
next_ = int(n // 2) if n % 2 == 0 else int(3 * n + 1)
CACHE_LENGTH[n] = 1 + collatz_sequence_length(next_)
return CACHE_LENGTH[n]
def main() -> int:
"""Find the Longest Collatz sequence under 1,000,000.
:return: Longest Collatz sequence under 1,000,000
"""
return longest_collatz_sequence(1000000)
if __name__ == "__main__":
lcs = main()
print(lcs, CACHE_LENGTH[lcs])
print(" ".join(map(str, collatz_sequence(lcs))))
| 23.611765 | 71 | 0.659691 |
a396aa841a074ff27cad63b9fc597eb1d7fa8b7c | 1,823 | py | Python | examples/classify_pose.py | scottamain/aiy-maker-kit | 4cdb973067b83d27cf0601c811d887877d1bc253 | [
"Apache-2.0"
] | null | null | null | examples/classify_pose.py | scottamain/aiy-maker-kit | 4cdb973067b83d27cf0601c811d887877d1bc253 | [
"Apache-2.0"
] | null | null | null | examples/classify_pose.py | scottamain/aiy-maker-kit | 4cdb973067b83d27cf0601c811d887877d1bc253 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Performs pose classification using the MoveNet model.
The MoveNet model identifies the body keypoints on a person, and then this
code passes those keypoints to a custom-trained pose classifier model that
classifies the pose with a label, such as the name of a yoga pose.
You must first complete the Google Colab to train the pose classification model:
https://g.co/coral/train-poses
And save the output .tflite and .txt files into the examples/models/ directory.
Then just run this script:
python3 classify_pose.py
For more instructions, see g.co/aiy/maker
"""
from aiymakerkit import vision
from pycoral.utils.dataset import read_label_file
import models
MOVENET_CLASSIFY_MODEL = 'models/pose_classifier.tflite'
MOVENET_CLASSIFY_LABELS = 'models/pose_labels.txt'
pose_detector = vision.PoseDetector(models.MOVENET_MODEL)
pose_classifier = vision.PoseClassifier(MOVENET_CLASSIFY_MODEL)
labels = read_label_file(MOVENET_CLASSIFY_LABELS)
for frame in vision.get_frames():
# Detect the body points and draw the skeleton
pose = pose_detector.get_pose(frame)
vision.draw_pose(frame, pose)
# Classify different body poses
label_id = pose_classifier.get_class(pose)
vision.draw_label(frame, labels.get(label_id))
| 35.745098 | 80 | 0.785518 |
a396f80d3df39bc129b954b6343810b69c00e0ea | 291 | py | Python | weldx/tags/measurement/source.py | CagtayFabry/weldx | 463f949d4fa54b5edafa2268cb862716865a62c2 | [
"BSD-3-Clause"
] | 13 | 2020-02-20T07:45:02.000Z | 2021-12-10T13:15:47.000Z | weldx/tags/measurement/source.py | BAMWelDX/weldx | ada4e67fa00cdb80a0b954057f4e685b846c9fe5 | [
"BSD-3-Clause"
] | 675 | 2020-02-20T07:47:00.000Z | 2022-03-31T15:17:19.000Z | weldx/tags/measurement/source.py | CagtayFabry/weldx | 463f949d4fa54b5edafa2268cb862716865a62c2 | [
"BSD-3-Clause"
] | 5 | 2020-09-02T07:19:17.000Z | 2021-12-05T08:57:50.000Z | from weldx.asdf.util import dataclass_serialization_class
from weldx.measurement import SignalSource
__all__ = ["SignalSource", "SignalSourceConverter"]
SignalSourceConverter = dataclass_serialization_class(
class_type=SignalSource, class_name="measurement/source", version="0.1.0"
)
| 29.1 | 77 | 0.821306 |
a39715724a34e51cf7b15a4f030411898b87a5ec | 1,706 | py | Python | test/test_entities_api.py | iknaio/graphsense-python | b61c66b6ec0bb9720036ae61777e90ce63a971cc | [
"MIT"
] | null | null | null | test/test_entities_api.py | iknaio/graphsense-python | b61c66b6ec0bb9720036ae61777e90ce63a971cc | [
"MIT"
] | 1 | 2022-02-24T11:21:49.000Z | 2022-02-24T11:21:49.000Z | test/test_entities_api.py | iknaio/graphsense-python | b61c66b6ec0bb9720036ae61777e90ce63a971cc | [
"MIT"
] | null | null | null | """
GraphSense API
GraphSense API # noqa: E501
The version of the OpenAPI document: 0.5.1
Generated by: https://openapi-generator.tech
"""
import unittest
import graphsense
from graphsense.api.entities_api import EntitiesApi # noqa: E501
if __name__ == '__main__':
unittest.main()
| 21.871795 | 73 | 0.623681 |
a39afee8e197b6834391bc0d4c2a7ba0f29e4cdf | 622 | py | Python | tests/test_versions_in_sync.py | simon-graham/pure_interface | da7bf05151c1c906c753987fbf7e3251905b4ba0 | [
"MIT"
] | 10 | 2018-08-27T04:15:53.000Z | 2021-08-18T09:45:35.000Z | tests/test_versions_in_sync.py | simon-graham/pure_interface | da7bf05151c1c906c753987fbf7e3251905b4ba0 | [
"MIT"
] | 35 | 2018-08-27T04:17:44.000Z | 2021-09-22T05:39:57.000Z | tests/test_versions_in_sync.py | tim-mitchell/pure_interface | 46a2de2574f4543980303cafd89cfcbdb643fbbb | [
"MIT"
] | 3 | 2018-09-19T21:32:01.000Z | 2020-11-17T00:58:55.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import unittest
import pure_interface
| 29.619048 | 82 | 0.636656 |
a39ce7f687dbc4302e562228dd957da1ccaaa084 | 315 | py | Python | catalog/bindings/wfs/get_capabilities_2.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/wfs/get_capabilities_2.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/wfs/get_capabilities_2.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass
from bindings.wfs.get_capabilities_type_2 import GetCapabilitiesType2
__NAMESPACE__ = "http://www.opengis.net/wfs/2.0"
| 26.25 | 69 | 0.755556 |
a39d78970a2b5428929cac47bbcd677dcd4fd411 | 2,169 | py | Python | timeStamps/admin.py | zandegran/django-timeStamp | 2c598d5543dc9b9198f41f0712406f22e60d5fa6 | [
"MIT"
] | 1 | 2017-12-15T17:36:58.000Z | 2017-12-15T17:36:58.000Z | timeStamps/admin.py | zandegran/django-timeStamp | 2c598d5543dc9b9198f41f0712406f22e60d5fa6 | [
"MIT"
] | null | null | null | timeStamps/admin.py | zandegran/django-timeStamp | 2c598d5543dc9b9198f41f0712406f22e60d5fa6 | [
"MIT"
] | null | null | null | """
This module is to define how TimeStamp model is represented in the Admin site
It also registers the model to be shown in the admin site
.. seealso:: :class:`..models.TimeStamp`
"""
from django.contrib import admin
from .models import TimeStamp
admin.site.register(TimeStamp,TimeStampAdmin) # Registers the TimeStamp Model with TimeStampAdmin setting in the Admin site | 32.863636 | 125 | 0.654219 |
a39e36cdbd6fb2489b1dabdf74c900884f32c597 | 718 | py | Python | setup.py | Gearheart-team/django-accounts | e0c2f12d350846fa31143b6dbdb0cf6fa713fb11 | [
"MIT"
] | null | null | null | setup.py | Gearheart-team/django-accounts | e0c2f12d350846fa31143b6dbdb0cf6fa713fb11 | [
"MIT"
] | null | null | null | setup.py | Gearheart-team/django-accounts | e0c2f12d350846fa31143b6dbdb0cf6fa713fb11 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(
name='izeni-django-accounts',
version='1.1.2a',
namespace_packages=['izeni', 'izeni.django'],
packages=find_packages(),
include_package_data=True,
author='Izeni, Inc.',
author_email='django-accounts@izeni.com',
description=open('README.md').read(),
url='https://dev.izeni.net/izeni/django-accounts',
install_requires=[
'Django==1.11.7',
'djangorestframework>3.4',
#'python-social-auth==0.2.13',
'social-auth-app-django',
'requests==2.8.1',
],
dependency_links=[
'https://github.com/izeni-team/python-social-auth.git@v0.2.21-google-fix#egg=python-social-auth-0',
]
)
| 29.916667 | 107 | 0.637883 |
a39ece0f6a490b1cd3625b5fef325786496075c3 | 2,973 | py | Python | train.py | Saaaber/urban-segmentation | fc893feb9208d3206d7c5329b1ccf4cfab97ed31 | [
"MIT"
] | 3 | 2020-11-16T20:21:25.000Z | 2021-06-11T13:09:30.000Z | train.py | Saaaber/urban-segmentation | fc893feb9208d3206d7c5329b1ccf4cfab97ed31 | [
"MIT"
] | null | null | null | train.py | Saaaber/urban-segmentation | fc893feb9208d3206d7c5329b1ccf4cfab97ed31 | [
"MIT"
] | 3 | 2020-11-11T23:43:15.000Z | 2022-03-17T09:03:42.000Z | # Copyright (c) Ville de Montreal. All rights reserved.
# Licensed under the MIT license.
# See LICENSE file in the project root for full license information.
import os
import json
import torch
import argparse
import datetime
from utils.factories import ModelFactory, OptimizerFactory, TrainerFactory
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Semantic Segmentation Training")
parser.add_argument('-c', '--config', default=None, type=str,
help="config file path (default: None)")
parser.add_argument('-r', '--resume', default=None, type=str,
help="path to latest checkpoint (default: None)")
parser.add_argument('-d', '--dir', default=None, type=str,
help="experiment dir path (default: None)")
args = parser.parse_args()
# Check for GPU
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
torch.backends.cudnn.deterministic = True
# Check if Colab run
COLAB = os.path.exists("/content/gdrive")
if args.config:
# Load config file
config = json.load(open(args.config))
elif args.resume:
# Load config file from checkpoint
config = torch.load(args.resume, map_location=device)['config']
# Change log dir if colab run
if COLAB is True:
config['trainer']['log_dir'] = "/content/gdrive/My Drive/colab_saves/logs/"
# Set experiment dir to current time if none provided
if args.dir:
experiment_dir = args.dir
else:
experiment_dir = datetime.datetime.now().strftime("%m%d_%H%M%S")
# Init model and optimizer from config with factories
model = ModelFactory.get(config['model'])
params = filter(lambda p: p.requires_grad, model.parameters())
optimizer = OptimizerFactory.get(config['optimizer'], params)
# Check if semi-supervised run
if config['semi'] is True:
# Init model_d and optimizer_d from config with factories
model_d = ModelFactory.get(config['model_d'])
params_d = filter(lambda p: p.requires_grad, model_d.parameters())
optimizer_d = OptimizerFactory.get(config['optimizer_d'], params_d)
# Init semi-supervised trainer object from config with factory
trainer = TrainerFactory.get(config)(
model,
model_d,
optimizer,
optimizer_d,
config=config,
resume=args.resume,
experiment_dir=experiment_dir,
**config['trainer']['options'])
else:
# Init supervised trainer object from config with factory
trainer = TrainerFactory.get(config)(
model,
optimizer,
config=config,
resume=args.resume,
experiment_dir=experiment_dir,
**config['trainer']['options'])
# Run a training experiment
trainer.train()
| 33.784091 | 83 | 0.636731 |
a3a01913f52507b8c2e9c60bffcef520ae43b4db | 1,036 | py | Python | pypeit/core/wavecal/spectrographs/templ_soar_goodman.py | rcooke-ast/PYPIT | 0cb9c4cb422736b855065a35aefc2bdba6d51dd0 | [
"BSD-3-Clause"
] | null | null | null | pypeit/core/wavecal/spectrographs/templ_soar_goodman.py | rcooke-ast/PYPIT | 0cb9c4cb422736b855065a35aefc2bdba6d51dd0 | [
"BSD-3-Clause"
] | null | null | null | pypeit/core/wavecal/spectrographs/templ_soar_goodman.py | rcooke-ast/PYPIT | 0cb9c4cb422736b855065a35aefc2bdba6d51dd0 | [
"BSD-3-Clause"
] | null | null | null | """ Generate the wavelength templates for SOAR Goodman"""
import os
from pypeit.core.wavecal import templates
from IPython import embed
if __name__ == '__main__':
soar_goodman_400(overwrite=True) | 31.393939 | 85 | 0.638031 |
a3a1c89d1bcdd899b6c1712a17770e89aa6ef0b0 | 5,062 | py | Python | vivarium/lidar.py | Pyrofoux/vivarium | 90c07384929f6c34915f053fd8e95e91358c4e58 | [
"MIT"
] | 2 | 2020-10-30T15:28:06.000Z | 2022-01-31T17:13:25.000Z | vivarium/lidar.py | Pyrofoux/vivarium | 90c07384929f6c34915f053fd8e95e91358c4e58 | [
"MIT"
] | null | null | null | vivarium/lidar.py | Pyrofoux/vivarium | 90c07384929f6c34915f053fd8e95e91358c4e58 | [
"MIT"
] | null | null | null | from simple_playgrounds.entities.agents.sensors.sensor import *
from simple_playgrounds.entities.agents.sensors.semantic_sensors import *
from collections import defaultdict
from pymunk.vec2d import Vec2d
import math
#@SensorGenerator.register('lidar')
| 34.435374 | 111 | 0.600356 |
a3a2b31e0b527f3675dc65a92359c7b90836c880 | 511 | py | Python | apilos_settings/models.py | MTES-MCT/apilos | 6404b94b0f668e39c1dc12a6421aebd26ef1c98b | [
"MIT"
] | null | null | null | apilos_settings/models.py | MTES-MCT/apilos | 6404b94b0f668e39c1dc12a6421aebd26ef1c98b | [
"MIT"
] | 2 | 2021-12-15T05:10:43.000Z | 2021-12-15T05:11:00.000Z | apilos_settings/models.py | MTES-MCT/apilos | 6404b94b0f668e39c1dc12a6421aebd26ef1c98b | [
"MIT"
] | 1 | 2021-12-28T13:06:06.000Z | 2021-12-28T13:06:06.000Z | from django.db import models
| 26.894737 | 60 | 0.700587 |
a3a3cd19889c828efa32a912a6cda2aa73fb4ca6 | 4,310 | py | Python | bin/allplots.py | Gabaldonlab/karyon | ba81828921b83b553f126892795253be1fd941ba | [
"MIT"
] | null | null | null | bin/allplots.py | Gabaldonlab/karyon | ba81828921b83b553f126892795253be1fd941ba | [
"MIT"
] | 2 | 2021-07-07T08:40:56.000Z | 2022-01-06T16:10:27.000Z | bin/allplots.py | Gabaldonlab/karyon | ba81828921b83b553f126892795253be1fd941ba | [
"MIT"
] | null | null | null | #!/bin/python
import sys, os, re, subprocess, math
import argparse
import psutil
from pysam import pysam
from Bio import SeqIO
import numpy as np
import numpy.random
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
#import seaborn as sns
import pandas as pd
import scipy.stats
from scipy.stats import gaussian_kde
from scipy import stats
from decimal import Decimal
import string, random
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--fasta', required=True, help="fasta file used as input")
parser.add_argument('-d', '--output_directory', default="./", help='Directory where all the output files will be generated.')
parser.add_argument('-o', '--output_name', required=True, help="Output prefix")
parser.add_argument('-v', '--vcf', required=True, help="VCF file used as input")
parser.add_argument('-p', '--pileup', required=True, help="Mpileup file used as input")
parser.add_argument('-b', '--bam', required=True, help="Bam file used as input")
parser.add_argument('-l', '--library', required=True, nargs='+', help="Illumina libraries used for the KAT plot")
parser.add_argument('--configuration', default=False, help="Configuration file. By default will use ./configuration.txt as the configuration file.")
parser.add_argument('-w', '--window_size', default=1000, help="Window size for plotting")
parser.add_argument('-x', '--max_scaf2plot', default=20, help="Number of scaffolds to analyze")
parser.add_argument('-s', '--scafminsize', default=False, help="Will ignore scaffolds with length below the given threshold")
parser.add_argument('-S', '--scafmaxsize', default=False, help="Will ignore scaffolds with length above the given threshold")
parser.add_argument('-i', '--job_id', default=False, help='Identifier of the intermediate files generated by the different programs. If false, the program will assign a name consisting of a string of 6 random alphanumeric characters.')
args = parser.parse_args()
true_output = os.path.abspath(args.output_directory)
if true_output[-1] != "/":
true_output=true_output+"/"
config_path = args.configuration
if not args.configuration:
selfpath = os.path.dirname(os.path.realpath(sys.argv[0]))
config_path = selfpath[:selfpath.rfind('/')]
config_path = selfpath[:selfpath.rfind('/')]+"/configuration.txt"
config_dict = parse_config(config_path)
counter = int(args.max_scaf2plot)
window_size=int(args.window_size)
step=window_size/2
true_output = os.path.abspath(args.output_directory)
cwd = os.path.abspath(os.getcwd())
os.chdir(true_output)
os.system("bgzip -c "+ args.vcf + " > " + args.vcf + ".gz")
os.system("tabix -p vcf "+ args.vcf+".gz")
#vcf_file = pysam.VariantFile(args.vcf+".gz", 'r')
bam_file = pysam.AlignmentFile(args.bam, 'rb')
home = config_dict["karyon"][0]
job_ID = args.job_id if args.job_id else id_generator()
name = args.output_name if args.output_name else job_ID
kitchen = home + "tmp/"+job_ID
lendict = {}
fastainput = SeqIO.index(args.fasta, "fasta")
for i in fastainput:
lendict[i] = len(fastainput[i].seq)
from karyonplots import katplot, allplots
from report import report, ploidy_veredict
df = allplots(window_size,
args.vcf,
args.fasta,
args.bam,
args.pileup,
args.library[0],
config_dict['nQuire'][0],
config_dict["KAT"][0],
kitchen,
true_output,
counter,
job_ID, name,
args.scafminsize,
args.scafmaxsize, False)
df2 = ploidy_veredict(df, true_output, name, window_size)
report(true_output, name, df2, True, False, window_size, False, False)
df2.to_csv(true_output+"/Report/"+name+".csv", index=False)
os.chdir(cwd)
| 35.916667 | 236 | 0.710905 |
a3a50e8b6b7936872866a8a4572b115958922c08 | 713 | py | Python | console/middleware.py | laincloud/Console | 9d4fb68ad5378279697803ca45a4eda58d72d9a3 | [
"MIT"
] | 11 | 2016-05-04T11:55:01.000Z | 2018-09-29T01:00:05.000Z | console/middleware.py | laincloud/Console | 9d4fb68ad5378279697803ca45a4eda58d72d9a3 | [
"MIT"
] | 21 | 2016-05-25T06:54:44.000Z | 2019-06-06T00:38:38.000Z | console/middleware.py | laincloud/Console | 9d4fb68ad5378279697803ca45a4eda58d72d9a3 | [
"MIT"
] | 16 | 2016-05-13T08:20:43.000Z | 2021-12-31T09:23:14.000Z | # -*- coding: utf-8
from django.http import JsonResponse, HttpResponse
# from commons.settings import ARCHON_HOST
| 32.409091 | 100 | 0.605891 |
a3a5bb350e05522589702afb78e2a9430fe6a8c4 | 1,061 | py | Python | test.py | vinsmokemau/NQueens | 7c9291f655b8e4f0ce4c6c5d07a80440f8f2c0a8 | [
"MIT"
] | null | null | null | test.py | vinsmokemau/NQueens | 7c9291f655b8e4f0ce4c6c5d07a80440f8f2c0a8 | [
"MIT"
] | null | null | null | test.py | vinsmokemau/NQueens | 7c9291f655b8e4f0ce4c6c5d07a80440f8f2c0a8 | [
"MIT"
] | null | null | null | import unittest
from algorithm import NQueens
| 25.878049 | 65 | 0.673893 |
6e55e971b17323a0b8342354a7a6ad601469f01e | 18,524 | py | Python | syntropynac/resolve.py | SyntropyNet/syntropy-nac | 8beddcd606d46fd909f51d0c53044be496cec995 | [
"MIT"
] | 3 | 2021-01-06T08:24:47.000Z | 2021-02-27T08:08:07.000Z | syntropynac/resolve.py | SyntropyNet/syntropy-nac | 8beddcd606d46fd909f51d0c53044be496cec995 | [
"MIT"
] | null | null | null | syntropynac/resolve.py | SyntropyNet/syntropy-nac | 8beddcd606d46fd909f51d0c53044be496cec995 | [
"MIT"
] | null | null | null | import functools
from dataclasses import dataclass
from itertools import combinations
import click
import syntropy_sdk as sdk
from syntropy_sdk import utils
from syntropynac.exceptions import ConfigureNetworkError
from syntropynac.fields import ALLOWED_PEER_TYPES, ConfigFields, PeerState, PeerType
def resolve_agents(api, agents, silent=False):
"""Resolves endpoint names to ids inplace.
Args:
api (PlatformApi): API object to communicate with the platform.
agents (dict): A dictionary containing endpoints.
silent (bool, optional): Indicates whether to suppress messages - used with Ansible. Defaults to False.
"""
for name, id in agents.items():
if id is not None:
continue
result = resolve_agent_by_name(api, name, silent=silent)
if len(result) != 1:
error = f"Could not resolve endpoint name {name}, found: {result}."
if not silent:
click.secho(
error,
err=True,
fg="red",
)
continue
else:
raise ConfigureNetworkError(error)
agents[name] = result[0]
def resolve_present_absent(agents, present, absent):
"""Resolves agent connections by objects into agent connections by ids.
Additionally removes any present connections if they were already added to absent.
Present connections are the connections that appear as "present" in the config
and will be added to the network.
Absent connections are the connections that appear as "absent" in the config and
will be removed from the existing network.
Services is a list of service names assigned to the connection's corresponding endpoints.
Args:
agents (dict[str, int]): Agent map from name to id.
present (list): A list of connections that are marked as present in the config.
absent (list): A list of connections that are marked as absent in the config.
Returns:
tuple: Three items that correspond to present/absent connections and a list
of ConnectionServices objects that correspond to present connections.
Present/absent connections is a list of lists of two elements, where
elements are agent ids.
"""
present_ids = [[agents[src[0]], agents[dst[0]]] for src, dst in present]
absent_ids = [[agents[src[0]], agents[dst[0]]] for src, dst in absent]
services = [
ConnectionServices.create(link, conn)
for link, conn in zip(present_ids, present)
if link not in absent_ids
and link[::-1] not in absent_ids
and link[0] != link[1]
]
return (
[
link
for link in present_ids
if link not in absent_ids
and link[::-1] not in absent_ids
and link[0] != link[1]
],
[i for i in absent_ids if i[0] != i[1]],
services,
)
def validate_connections(connections, silent=False, level=0):
"""Check if the connections structure makes any sense.
Recursively goes inside 'connect_to' dictionary up to 1 level.
Args:
connections (dict): A dictionary describing connections.
silent (bool, optional): Indicates whether to suppress output to stderr.
Raises ConfigureNetworkError instead. Defaults to False.
level (int, optional): Recursion level depth. Defaults to 0.
Raises:
ConfigureNetworkError: If silent==True, then raise an exception in case of irrecoverable error.
Returns:
bool: Returns False in case of invalid connections structure.
"""
if level > 1:
silent or click.secho(
(
f"Field {ConfigFields.CONNECT_TO} found at level {level + 1}. This will be ignored, "
"however, please double check your configuration file."
)
)
return True
for name, con in connections.items():
if not name or not isinstance(name, (str, int)):
error = f"Invalid endpoint name found."
if not silent:
click.secho(error, err=True, fg="red")
return False
else:
raise ConfigureNetworkError(error)
if not isinstance(con, dict):
error = f"Entry '{name}' in {ConfigFields.CONNECT_TO} must be a dictionary, but found {con.__class__.__name__}."
if not silent:
click.secho(error, err=True, fg="red")
return False
else:
raise ConfigureNetworkError(error)
if ConfigFields.PEER_TYPE not in con:
error = f"Endpoint '{name}' {ConfigFields.PEER_TYPE} must be present."
if not silent:
click.secho(error, err=True, fg="red")
return False
else:
raise ConfigureNetworkError(error)
if con[ConfigFields.PEER_TYPE] not in ALLOWED_PEER_TYPES:
error = f"Endpoint '{name}' {ConfigFields.PEER_TYPE} '{con[ConfigFields.PEER_TYPE]}' is not allowed."
if not silent:
click.secho(error, err=True, fg="red")
return False
else:
raise ConfigureNetworkError(error)
probably_an_id = False
try:
name_as_id = int(name)
probably_an_id = True
except ValueError:
name_as_id = name
if probably_an_id and con[ConfigFields.PEER_TYPE] == PeerType.ENDPOINT:
click.secho(
(
f"Endpoint '{name}' {ConfigFields.PEER_TYPE} is {PeerType.ENDPOINT}, however, "
f"it appears to be an {PeerType.ID}."
),
err=True,
fg="yellow",
)
if not probably_an_id and con[ConfigFields.PEER_TYPE] == PeerType.ID:
error = (
f"Endpoint '{name}' {ConfigFields.PEER_TYPE} is {PeerType.ID}, however, "
f"it appears to be an {PeerType.ENDPOINT}."
)
if not silent:
click.secho(error, err=True, fg="red")
return False
else:
raise ConfigureNetworkError(error)
if ConfigFields.ID in con and con[ConfigFields.ID] is not None:
try:
_ = int(con[ConfigFields.ID])
id_valid = True
except ValueError:
id_valid = False
if (
not isinstance(con[ConfigFields.ID], (str, int))
or not con[ConfigFields.ID]
or not id_valid
):
error = f"Endpoint '{name}' {ConfigFields.ID} is invalid."
if not silent:
click.secho(error, err=True, fg="red")
return False
else:
raise ConfigureNetworkError(error)
if (
con[ConfigFields.PEER_TYPE] == PeerType.ID
and int(con[ConfigFields.ID]) != name_as_id
):
error = f"Endpoint '{name}' {ConfigFields.ID} field does not match endpoint id."
if not silent:
click.secho(error, err=True, fg="red")
return False
else:
raise ConfigureNetworkError(error)
if ConfigFields.SERVICES in con:
if not isinstance(con[ConfigFields.SERVICES], (list, tuple)):
error = (
f"Endpoint '{name}' {ConfigFields.SERVICES} must be a "
f"list, but found {con[ConfigFields.SERVICES].__class__.__name__}."
)
if not silent:
click.secho(error, err=True, fg="red")
return False
else:
raise ConfigureNetworkError(error)
for service in con[ConfigFields.SERVICES]:
if not isinstance(service, (str, int)):
error = (
f"Endpoint '{name}' service must be a string"
f", but found {service.__class__.__name__}."
)
if not silent:
click.secho(error, err=True, fg="red")
return False
else:
raise ConfigureNetworkError(error)
if ConfigFields.CONNECT_TO in con:
if not validate_connections(
con[ConfigFields.CONNECT_TO], silent, level + 1
):
return False
return True
def resolve_p2p_connections(api, connections, silent=False):
"""Resolves configuration connections for Point to Point topology.
Args:
api (PlatformApi): API object to communicate with the platform.
connections (dict): A dictionary containing connections as described in the config file.
silent (bool, optional): Indicates whether to suppress messages - used with Ansible. Defaults to False.
Returns:
list: A list of two item lists describing endpoint to endpoint connections.
"""
present = []
absent = []
agents = {}
for src in connections.items():
dst = src[1].get(ConfigFields.CONNECT_TO)
if dst is None or len(dst.keys()) == 0:
continue
dst = list(dst.items())[0]
agents[src[0]] = get_peer_id(*src)
agents[dst[0]] = get_peer_id(*dst)
if (
src[1].get(ConfigFields.STATE) == PeerState.ABSENT
or dst[1].get(ConfigFields.STATE) == PeerState.ABSENT
):
absent.append((src, dst))
elif (
src[1].get(ConfigFields.STATE, PeerState.PRESENT) == PeerState.PRESENT
or dst[1].get(ConfigFields.STATE, PeerState.PRESENT) == PeerState.PRESENT
):
present.append((src, dst))
else:
error = f"Invalid state for agents {src[0]} or {dst[0]}"
if not silent:
click.secho(error, fg="red", err=True)
else:
raise ConfigureNetworkError(error)
resolve_agents(api, agents, silent=silent)
if any(id is None for id in agents.keys()):
return resolve_present_absent({}, [], [])
return resolve_present_absent(agents, present, absent)
def expand_agents_tags(api, dst_dict, silent=False):
"""Expand tag endpoints into individual endpoints.
Args:
api (PlatformApi): API object to communicate with the platform.
dst_dict (dict): Connections dictionary that contain tags as endpoints.
silent (bool, optional): Indicates whether to suppress messages - used with Ansible. Defaults to False.
Raises:
ConfigureNetworkError: In case of any errors
Returns:
Union[dict, None]: Dictionary with expanded endpoints where key is the name and value is the config(id, state, type).
"""
items = {}
# First expand tags
for name, dst in dst_dict.items():
if dst.get(ConfigFields.PEER_TYPE) != PeerType.TAG:
continue
agents = utils.WithPagination(sdk.AgentsApi(api).platform_agent_index)(
filter=f"tags_names[]:{name}",
_preload_content=False,
)["data"]
if not agents:
error = f"Could not find endpoints by the tag {name}"
if not silent:
click.secho(error, err=True, fg="red")
return
else:
raise ConfigureNetworkError(error)
tag_state = dst.get(ConfigFields.STATE, PeerState.PRESENT)
for agent in agents:
agent_name = agent["agent_name"]
if agent_name not in items or (
tag_state == PeerState.ABSENT
and items[agent_name][ConfigFields.STATE] == PeerState.PRESENT
):
items[agent_name] = {
ConfigFields.ID: agent["agent_id"],
ConfigFields.STATE: tag_state,
ConfigFields.PEER_TYPE: PeerType.ENDPOINT,
ConfigFields.SERVICES: dst.get(ConfigFields.SERVICES),
}
# Then override with explicit configs
for name, dst in dst_dict.items():
if dst.get(ConfigFields.PEER_TYPE) != PeerType.TAG:
items[name] = dst
continue
return items
def resolve_p2m_connections(api, connections, silent=False):
"""Resolves configuration connections for Point to Multipoint topology. Also, expands tags.
Args:
api (PlatformApi): API object to communicate with the platform.
connections (dict): A dictionary containing connections as described in the config file.
silent (bool, optional): Indicates whether to suppress messages - used with Ansible. Defaults to False.
Returns:
list: A list of two item lists describing endpoint to endpoint connections.
"""
present = []
absent = []
agents = {}
for src in connections.items():
dst_dict = src[1].get(ConfigFields.CONNECT_TO)
if dst_dict is None or len(dst_dict.keys()) == 0:
continue
dst_dict = expand_agents_tags(api, dst_dict)
if dst_dict is None:
return resolve_present_absent({}, [], [])
agents[src[0]] = get_peer_id(*src)
for dst in dst_dict.items():
agents[dst[0]] = get_peer_id(*dst)
if (
src[1].get(ConfigFields.STATE) == PeerState.ABSENT
or dst[1].get(ConfigFields.STATE) == PeerState.ABSENT
):
absent.append((src, dst))
elif (
src[1].get(ConfigFields.STATE, PeerState.PRESENT) == PeerState.PRESENT
or dst[1].get(ConfigFields.STATE, PeerState.PRESENT)
== PeerState.PRESENT
):
present.append((src, dst))
else:
error = f"Invalid state for agents {src[0]} or {dst[0]}"
if not silent:
click.secho(error, fg="red", err=True)
else:
raise ConfigureNetworkError(error)
resolve_agents(api, agents, silent=silent)
if any(id is None for id in agents.keys()):
return resolve_present_absent({}, [], [])
return resolve_present_absent(agents, present, absent)
def resolve_mesh_connections(api, connections, silent=False):
"""Resolves configuration connections for mesh topology. Also, expands tags.
Args:
api (PlatformApi): API object to communicate with the platform.
connections (dict): A dictionary containing connections.
silent (bool, optional): Indicates whether to suppress messages - used with Ansible. Defaults to False.
Returns:
list: A list of two item lists describing endpoint to endpoint connections.
"""
present = []
absent = []
connections = expand_agents_tags(api, connections)
if connections is None:
return resolve_present_absent({}, [], [])
agents = {
name: get_peer_id(name, connection) for name, connection in connections.items()
}
# NOTE: Assuming connections are bidirectional
for src, dst in combinations(connections.items(), 2):
if (
src[1].get(ConfigFields.STATE) == PeerState.ABSENT
or dst[1].get(ConfigFields.STATE) == PeerState.ABSENT
):
absent.append((src, dst))
elif (
src[1].get(ConfigFields.STATE, PeerState.PRESENT) == PeerState.PRESENT
or dst[1].get(ConfigFields.STATE, PeerState.PRESENT) == PeerState.PRESENT
):
present.append((src, dst))
else:
error = f"Invalid state for agents {src[0]} or {dst[0]}"
if not silent:
click.secho(error, fg="red", err=True)
else:
raise ConfigureNetworkError(error)
resolve_agents(api, agents, silent=silent)
if any(id is None for id in agents.keys()):
return resolve_present_absent({}, [], [])
return resolve_present_absent(agents, present, absent)
| 36.608696 | 125 | 0.589721 |
6e56c45295d74ab6452768ca7c9600d73e511225 | 10,298 | py | Python | idact/detail/nodes/node_impl.py | garstka/idact | b9c8405c94db362c4a51d6bfdf418b14f06f0da1 | [
"MIT"
] | 5 | 2018-12-06T15:40:34.000Z | 2019-06-19T11:22:58.000Z | idact/detail/nodes/node_impl.py | garstka/idact | b9c8405c94db362c4a51d6bfdf418b14f06f0da1 | [
"MIT"
] | 9 | 2018-12-06T16:35:26.000Z | 2019-04-28T19:01:40.000Z | idact/detail/nodes/node_impl.py | garstka/idact | b9c8405c94db362c4a51d6bfdf418b14f06f0da1 | [
"MIT"
] | 2 | 2019-04-28T19:18:58.000Z | 2019-06-17T06:56:28.000Z | """This module contains the implementation of the cluster node interface."""
import datetime
from typing import Optional, Any, Callable
import bitmath
import fabric.operations
import fabric.tasks
import fabric.decorators
from fabric.exceptions import CommandTimeout
from fabric.state import env
from idact.core.retry import Retry
from idact.core.config import ClusterConfig
from idact.core.jupyter_deployment import JupyterDeployment
from idact.core.node_resource_status import NodeResourceStatus
from idact.detail.auth.authenticate import authenticate
from idact.detail.helper.raise_on_remote_fail import raise_on_remote_fail
from idact.detail.helper.retry import retry_with_config
from idact.detail.helper.stage_info import stage_debug
from idact.detail.helper.utc_from_str import utc_from_str
from idact.detail.helper.utc_now import utc_now
from idact.detail.jupyter.deploy_jupyter import deploy_jupyter
from idact.detail.log.capture_fabric_output_to_log import \
capture_fabric_output_to_log
from idact.detail.log.get_logger import get_logger
from idact.detail.nodes.node_internal import NodeInternal
from idact.detail.nodes.node_resource_status_impl import NodeResourceStatusImpl
from idact.detail.serialization.serializable_types import SerializableTypes
from idact.detail.tunnel.build_tunnel import build_tunnel
from idact.detail.tunnel.get_bindings_with_single_gateway import \
get_bindings_with_single_gateway
from idact.detail.tunnel.ssh_tunnel import SshTunnel
from idact.detail.tunnel.tunnel_internal import TunnelInternal
from idact.detail.tunnel.validate_tunnel_ports import validate_tunnel_ports
ANY_TUNNEL_PORT = 0
def __eq__(self, other):
return self.__dict__ == other.__dict__
| 37.721612 | 79 | 0.583026 |
6e5770f83af2ce49e0548c12ebb2126470694c34 | 2,012 | py | Python | geoportal/LUX_alembic/versions/17fb1559a5cd_create_table_for_hierarchy_of_accounts.py | arnaud-morvan/geoportailv3 | b9d676cf78e45e12894f7d1ceea99b915562d64f | [
"MIT"
] | 17 | 2015-01-14T08:40:22.000Z | 2021-05-08T04:39:50.000Z | geoportal/LUX_alembic/versions/17fb1559a5cd_create_table_for_hierarchy_of_accounts.py | arnaud-morvan/geoportailv3 | b9d676cf78e45e12894f7d1ceea99b915562d64f | [
"MIT"
] | 1,477 | 2015-01-05T09:58:41.000Z | 2022-03-18T11:07:09.000Z | geoportal/LUX_alembic/versions/17fb1559a5cd_create_table_for_hierarchy_of_accounts.py | arnaud-morvan/geoportailv3 | b9d676cf78e45e12894f7d1ceea99b915562d64f | [
"MIT"
] | 14 | 2015-07-24T07:33:13.000Z | 2021-03-02T13:51:48.000Z | """create table for hierarchy of accounts
Revision ID: 17fb1559a5cd
Revises: 3b7de32aebed
Create Date: 2015-09-16 14:20:30.972593
"""
# revision identifiers, used by Alembic.
revision = '17fb1559a5cd'
down_revision = '3b7de32aebed'
branch_labels = None
depends_on = None
from alembic import op, context
import sqlalchemy as sa
| 27.944444 | 73 | 0.611332 |
6e596f23ab56bd2dd8dd6ce01540892f3e46cdad | 1,076 | py | Python | tests/test_migrate.py | tvcsantos/Flexget | e08ce2957dd4f0668911d1e56347369939e4d0a5 | [
"MIT"
] | 1 | 2017-08-25T07:17:04.000Z | 2017-08-25T07:17:04.000Z | tests/test_migrate.py | tvcsantos/Flexget | e08ce2957dd4f0668911d1e56347369939e4d0a5 | [
"MIT"
] | 1 | 2018-06-09T18:03:35.000Z | 2018-06-09T18:03:35.000Z | tests/test_migrate.py | tvcsantos/Flexget | e08ce2957dd4f0668911d1e56347369939e4d0a5 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals, division, absolute_import
import os
from tests import FlexGetBase
| 31.647059 | 132 | 0.636617 |
6e5a5481a3630f1bb09ba60f327038cb691a80cf | 2,422 | py | Python | src/challenges/CtCI/dynamic/P1_triple_step.py | Ursidours/pythonic_interviews | a88e10b82ed2a163dfcc0bfd1d01a9e9e606c045 | [
"MIT"
] | 2 | 2021-11-13T01:30:25.000Z | 2022-02-11T18:17:22.000Z | src/challenges/CtCI/dynamic/P1_triple_step.py | arnaudblois/pythonic_interviews | a88e10b82ed2a163dfcc0bfd1d01a9e9e606c045 | [
"MIT"
] | null | null | null | src/challenges/CtCI/dynamic/P1_triple_step.py | arnaudblois/pythonic_interviews | a88e10b82ed2a163dfcc0bfd1d01a9e9e606c045 | [
"MIT"
] | null | null | null | """
Problem 1 of Chapter 8 in CtCi
Triple Step: A child is running up a staircase with N steps and can hop either
1 step, 2 steps, or 3 steps at a time. Return the number of possible ways exist
this can be done.
General idea of the solution: At any step N, the child must necessarily come
from the steps N-3, N-2 or N-1. The possible ways to go to N are therefore the
sums of the possible ways to come to N-3, N-2 and N-1. This is the definition
of the tribonacci numbers, a generalization of the Fibonacci sequence.
"""
from src.utils.decorators import Memoize
def tribonacci_number(N):
"""
Closed-form formula to calculate the Nth Tribonacci number. Of course, no
one would expect this in an interview :)
"""
a1 = (19 + 3 * 33**0.5)**(1 / 3)
a2 = (19 - 3 * 33**0.5)**(1 / 3)
b = (586 + 102 * 33**0.5)**(1 / 3)
numerator = 3 * b * (1 / 3 * (a1 + a2 + 1))**(N + 1)
denominator = b**2 - 2 * b + 4
result = round(numerator / denominator)
return result
def triple_step_iterative(nb_of_steps):
"""
The most naive implementation, using 3 variables corresponding
to the 3 previous states, we calculate the next and update them
continuously until we've looped up to nb_of_steps.
"""
a, b, c = 0, 0, 1
for step in range(nb_of_steps):
temp_var = a + b + c
a = b
b = c
c = temp_var
return c
def triple_step_bottom_up(nb_of_steps):
"""
As with all bottom-up approaches, we initiate a list which we
update as we calculate the next step.
"""
nb_possible_ways = [1, 1, 2] + [None for _ in range(3, nb_of_steps + 1)]
for step in range(3, nb_of_steps + 1):
nb_possible_ways[step] = (
nb_possible_ways[step - 1]
+ nb_possible_ways[step - 2]
+ nb_possible_ways[step - 3]
)
return nb_possible_ways[nb_of_steps]
| 31.454545 | 79 | 0.641618 |
6e5a95d6b33481e439c3c6dd74b69db486074c51 | 117 | py | Python | lib/JumpScale/lib/docker/__init__.py | Jumpscale/jumpscale6_core | 0502ddc1abab3c37ed982c142d21ea3955d471d3 | [
"BSD-2-Clause"
] | 1 | 2015-10-26T10:38:13.000Z | 2015-10-26T10:38:13.000Z | lib/JumpScale/lib/docker/__init__.py | Jumpscale/jumpscale6_core | 0502ddc1abab3c37ed982c142d21ea3955d471d3 | [
"BSD-2-Clause"
] | null | null | null | lib/JumpScale/lib/docker/__init__.py | Jumpscale/jumpscale6_core | 0502ddc1abab3c37ed982c142d21ea3955d471d3 | [
"BSD-2-Clause"
] | null | null | null | from JumpScale import j
j.base.loader.makeAvailable(j, 'tools')
from Docker import Docker
j.tools.docker = Docker()
| 19.5 | 39 | 0.769231 |
6e5ab1e623f341546ab1d75882702a30b02894e2 | 3,704 | py | Python | scenes/capture/motor_pi/stepper/motorclient.py | tum-pbs/reconstructScalarFlows | 948efeaa99b90c3879f9fb544da9a596b0cb5852 | [
"Apache-2.0"
] | null | null | null | scenes/capture/motor_pi/stepper/motorclient.py | tum-pbs/reconstructScalarFlows | 948efeaa99b90c3879f9fb544da9a596b0cb5852 | [
"Apache-2.0"
] | 1 | 2020-02-20T12:37:38.000Z | 2020-02-20T17:04:53.000Z | scenes/capture/motor_pi/stepper/motorclient.py | tum-pbs/reconstructScalarFlows | 948efeaa99b90c3879f9fb544da9a596b0cb5852 | [
"Apache-2.0"
] | 3 | 2020-01-23T04:32:46.000Z | 2020-02-20T05:48:36.000Z | #!/usr/bin/env python2
import sys
import socket
import datetime
import math
import time
from time import sleep
# The c binary for controlling the stepper motor is loaded via ctypes
from ctypes import *
stepper_lib = cdll.LoadLibrary('./stepper.so')
# buffer containing the incomplete commands
recvBuffer = str()
# all my socket messages will follow the scheme: "<Control code>|<data>~"
# waits until a full message is received
# Init the native c library
# set the slide to the given relative (0-1) position
if __name__ == "__main__":
main(sys.argv)
| 23.896774 | 78 | 0.560475 |
6e5d48ba91cb1100ebbf354d7f7d6405aa099be0 | 20,335 | py | Python | bots/invaders/agent.py | alv67/Lux-AI-challenge | 4fdd623a8ff578f769a6925ec0200170f84d4737 | [
"MIT"
] | null | null | null | bots/invaders/agent.py | alv67/Lux-AI-challenge | 4fdd623a8ff578f769a6925ec0200170f84d4737 | [
"MIT"
] | 27 | 2021-10-17T22:46:41.000Z | 2021-12-05T23:41:19.000Z | bots/invaders/agent.py | alv67/Lux-AI-challenge | 4fdd623a8ff578f769a6925ec0200170f84d4737 | [
"MIT"
] | 3 | 2021-11-14T19:22:16.000Z | 2021-12-04T06:46:33.000Z | import os
import math
import sys
from typing import List, Tuple
# for kaggle-environments
from abn.game_ext import GameExtended
from abn.jobs import Task, Job, JobBoard
from abn.actions import Actions
from lux.game_map import Position, Cell, RESOURCE_TYPES
from lux.game_objects import City
from lux.game_constants import GAME_CONSTANTS
from lux import annotate
## DEBUG ENABLE
DEBUG_SHOW_TIME = False
DEBUG_SHOW_CITY_JOBS = False
DEBUG_SHOW_CITY_FULLED = False
DEBUG_SHOW_EXPAND_MAP = True
DEBUG_SHOW_EXPAND_LIST = False
DEBUG_SHOW_INPROGRESS = True
DEBUG_SHOW_TODO = True
DEBUG_SHOW_ENERGY_MAP = False
DEBUG_SHOW_ENEMY_CITIES = False
DEBUG_SHOW_INVASION_MAP = False
DEBUG_SHOW_EXPLORE_MAP = False
MAX_CITY_SIZE = 10
DISTANCE_BETWEEN_CITIES = 5
# Define global variables
game_state = GameExtended()
actions = Actions(game_state)
lets_build_city = False
build_pos = None
jobs = game_state.job_board
completed_cities = []
| 47.847059 | 126 | 0.503369 |
6e5de644fd911fb842013165cff69e62361a9159 | 12,503 | py | Python | PySRCG/src/Tabs/cyberware_tab.py | apampuch/PySRCG | bb3777aed3517b473e5860336c015e2e8d0905e9 | [
"MIT"
] | null | null | null | PySRCG/src/Tabs/cyberware_tab.py | apampuch/PySRCG | bb3777aed3517b473e5860336c015e2e8d0905e9 | [
"MIT"
] | null | null | null | PySRCG/src/Tabs/cyberware_tab.py | apampuch/PySRCG | bb3777aed3517b473e5860336c015e2e8d0905e9 | [
"MIT"
] | null | null | null | from copy import copy
from tkinter import *
from tkinter import ttk
from src import app_data
from src.CharData.augment import Cyberware
from src.Tabs.notebook_tab import NotebookTab
from src.statblock_modifier import StatMod
from src.utils import treeview_get, recursive_treeview_fill, calculate_attributes, get_variables
# list of attributes that we need to look for variables in, eg "Cost: rating * 500"
ATTRIBUTES_TO_CALCULATE = ["essence", "cost", "availability_rating", "availability_time", "mods"]
STRINGS_TO_IGNORE = [] # nyi
def add_cyberware_item(self, cyber):
"""
:type cyber: Cyberware
"""
for key in cyber.mods.keys():
value = cyber.mods[key]
StatMod.add_mod(key, value)
self.statblock.cyberware.append(cyber)
self.cyberware_list.insert(END, cyber.name)
def fill_description_box(self, contents):
"""Clears the item description box and fills it with contents."""
# temporarily unlock box, clear it, set the text, then re-lock it
self.desc_box.config(state=NORMAL)
self.desc_box.delete(1.0, END)
self.desc_box.insert(END, contents)
self.desc_box.config(state=DISABLED)
def int_validate(self, action, index, value_if_allowed,
prior_value, text, validation_type, trigger_type, widget_name):
"""
Validates if entered text can be an int and over 0.
:param action:
:param index:
:param value_if_allowed:
:param prior_value:
:param text:
:param validation_type:
:param trigger_type:
:param widget_name:
:return: True if text is valid
"""
if value_if_allowed == "":
return True
if value_if_allowed:
try:
i = int(value_if_allowed)
if i > 0:
return True
else:
self.bell()
return False
except ValueError:
self.bell()
return False
else:
self.bell()
return False | 38.589506 | 116 | 0.589698 |
6e5f43493f76b33f089dfbae79e524b7b68ad4b5 | 337 | py | Python | myapp/mymetric/my-metric.py | affoliveira/hiring-engineers | 4064d8c7b6cead9a88197e95fcd6a0f2395e4d44 | [
"Apache-2.0"
] | null | null | null | myapp/mymetric/my-metric.py | affoliveira/hiring-engineers | 4064d8c7b6cead9a88197e95fcd6a0f2395e4d44 | [
"Apache-2.0"
] | null | null | null | myapp/mymetric/my-metric.py | affoliveira/hiring-engineers | 4064d8c7b6cead9a88197e95fcd6a0f2395e4d44 | [
"Apache-2.0"
] | null | null | null | from datadog import initialize, statsd
import time
import random
import os
options = {
'statsd_host':os.environ['DD_AGENT_HOST'],
'statsd_port':8125
}
initialize(**options)
i = 0
while(1):
i += 1
r = random.randint(0, 1000)
statsd.gauge('mymetric',r , tags=["environment:dev"])
time.sleep(int(os.environ['interval'])) | 17.736842 | 55 | 0.68546 |
6e5f8bfb8859c97984af510e67f81278396d3ad6 | 277 | py | Python | 1 ano/logica-de-programacao/list-telefone-lucio.py | ThiagoPereira232/tecnico-informatica | 6b55ecf34501b38052943acf1b37074e3472ce6e | [
"MIT"
] | 1 | 2021-09-24T16:26:04.000Z | 2021-09-24T16:26:04.000Z | 1 ano/logica-de-programacao/list-telefone-lucio.py | ThiagoPereira232/tecnico-informatica | 6b55ecf34501b38052943acf1b37074e3472ce6e | [
"MIT"
] | null | null | null | 1 ano/logica-de-programacao/list-telefone-lucio.py | ThiagoPereira232/tecnico-informatica | 6b55ecf34501b38052943acf1b37074e3472ce6e | [
"MIT"
] | null | null | null | n = [0,0,0,0,0,0,0,0,0,0]
t = [0,0,0,0,0,0,0,0,0,0]
c=0
while(c<10):
n[c]=input("Digite o nome")
t[c]=input("Digite o telefone")
c+=1
const=""
while(const!="fim"):
cons=input("Digite nome a consultar")
if(n[c]==const):
print(f"TEl: {t[c]}")
c+=1 | 21.307692 | 41 | 0.516245 |
6e61986199cea39f158bd8be59e6773d5f58be23 | 8,979 | py | Python | serve_tiny_performance_mdrnn.py | cpmpercussion/robojam | 8f9524be0ad850bdfc0c3459b0e4b677f5f70a84 | [
"MIT"
] | 10 | 2017-11-18T04:01:03.000Z | 2022-03-06T21:07:09.000Z | serve_tiny_performance_mdrnn.py | cpmpercussion/robojam | 8f9524be0ad850bdfc0c3459b0e4b677f5f70a84 | [
"MIT"
] | 17 | 2018-06-12T20:54:40.000Z | 2022-02-09T23:27:24.000Z | serve_tiny_performance_mdrnn.py | cpmpercussion/robojam | 8f9524be0ad850bdfc0c3459b0e4b677f5f70a84 | [
"MIT"
] | 2 | 2017-12-05T23:39:42.000Z | 2018-06-13T13:46:33.000Z | #!/usr/bin/env python3
"""A flask server for Robojam"""
import json
import time
from io import StringIO
import pandas as pd
import tensorflow as tf
import robojam
from tensorflow.compat.v1.keras import backend as K
from flask import Flask, request
from flask_cors import CORS
# Start server.
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) # set logging.
app = Flask(__name__)
cors = CORS(app)
compute_graph = tf.compat.v1.Graph()
with compute_graph.as_default():
sess = tf.compat.v1.Session()
# Network hyper-parameters:
N_MIX = 5
N_LAYERS = 2
N_UNITS = 512
TEMP = 1.5
SIG_TEMP = 0.01
# MODEL_FILE = 'models/robojam-td-model-E12-VL-4.57.hdf5'
MODEL_FILE = 'models/robojam-metatone-layers2-units512-mixtures5-scale10-E30-VL-5.65.hdf5'
if __name__ == "__main__":
"""Start a TinyPerformance MDRNN Server"""
tf.compat.v1.logging.info("Starting RoboJam Server.")
K.set_session(sess)
with compute_graph.as_default():
net = robojam.load_robojam_inference_model(model_file=MODEL_FILE, layers=N_LAYERS, units=N_UNITS, mixtures=N_MIX)
app.run(host='0.0.0.0', ssl_context=('keys/cert.pem', 'keys/key.pem'))
# Command line tests.
# curl -i -k -X POST -H "Content-Type:application/json" https://127.0.0.1:5000/api/predict -d '{"perf":"time,x,y,z,moving\n0.005213, 0.711230, 0.070856, 25.524292, 0\n0.097298, 0.719251, 0.062834, 25.524292, 1\n0.126225, 0.719251, 0.057487, 25.524292, 1\n0.194616, 0.707219, 0.045455, 38.290771, 1\n0.212923, 0.704545, 0.045455, 38.290771, 1\n0.343579, 0.703209, 0.108289, 38.290771, 1\n0.495085, 0.701872, 0.070856, 38.290771, 1\n0.523921, 0.693850, 0.061497, 38.290771, 1\n0.712066, 0.711230, 0.155080, 38.290771, 1\n0.730294, 0.717914, 0.155080, 38.290771, 1\n0.896367, 0.696524, 0.041444, 38.290771, 1\n1.083786, 0.696524, 0.151070, 38.290771, 1\n1.301470, 0.684492, 0.049465, 38.290771, 1\n1.328134, 0.680481, 0.053476, 38.290771, 1\n1.504139, 0.705882, 0.136364, 38.290771, 1\n1.527875, 0.712567, 0.120321, 38.290771, 1\n1.702672, 0.675134, 0.076203, 38.290771, 1\n1.719294, 0.675134, 0.096257, 38.290771, 1\n1.901434, 0.715241, 0.145722, 38.290771, 1\n1.922717, 0.717914, 0.136364, 38.290771, 1\n2.062994, 0.684492, 0.109626, 38.290771, 1\n2.091680, 0.680481, 0.129679, 38.290771, 1\n2.231362, 0.697861, 0.207219, 38.290771, 1\n2.393213, 0.712567, 0.124332, 38.290771, 1\n2.525774, 0.632353, 0.149733, 38.290771, 1\n2.546701, 0.625668, 0.169786, 38.290771, 1\n2.686487, 0.585561, 0.360963, 38.290771, 1\n2.715316, 0.580214, 0.387701, 38.290771, 1\n2.867526, 0.490642, 0.633690, 38.290771, 1\n2.880361, 0.481283, 0.645722, 38.290771, 1\n3.054443, 0.319519, 0.689840, 38.290771, 1\n3.218741, 0.121658, 0.585561, 38.290771, 1\n3.230362, 0.102941, 0.557487, 38.290771, 1\n3.391456, 0.089572, 0.534759, 38.290771, 1"}'
# curl -i -k -X POST -H "Content-Type:application/json" https://138.197.179.234:5000/api/predict -d '{"perf":"time,x,y,z,moving\n0.005213, 0.711230, 0.070856, 25.524292, 0\n0.097298, 0.719251, 0.062834, 25.524292, 1\n0.126225, 0.719251, 0.057487, 25.524292, 1\n0.194616, 0.707219, 0.045455, 38.290771, 1\n0.212923, 0.704545, 0.045455, 38.290771, 1\n0.343579, 0.703209, 0.108289, 38.290771, 1\n0.495085, 0.701872, 0.070856, 38.290771, 1\n0.523921, 0.693850, 0.061497, 38.290771, 1\n0.712066, 0.711230, 0.155080, 38.290771, 1\n0.730294, 0.717914, 0.155080, 38.290771, 1\n0.896367, 0.696524, 0.041444, 38.290771, 1\n1.083786, 0.696524, 0.151070, 38.290771, 1\n1.301470, 0.684492, 0.049465, 38.290771, 1\n1.328134, 0.680481, 0.053476, 38.290771, 1\n1.504139, 0.705882, 0.136364, 38.290771, 1\n1.527875, 0.712567, 0.120321, 38.290771, 1\n1.702672, 0.675134, 0.076203, 38.290771, 1\n1.719294, 0.675134, 0.096257, 38.290771, 1\n1.901434, 0.715241, 0.145722, 38.290771, 1\n1.922717, 0.717914, 0.136364, 38.290771, 1\n2.062994, 0.684492, 0.109626, 38.290771, 1\n2.091680, 0.680481, 0.129679, 38.290771, 1\n2.231362, 0.697861, 0.207219, 38.290771, 1\n2.393213, 0.712567, 0.124332, 38.290771, 1\n2.525774, 0.632353, 0.149733, 38.290771, 1\n2.546701, 0.625668, 0.169786, 38.290771, 1\n2.686487, 0.585561, 0.360963, 38.290771, 1\n2.715316, 0.580214, 0.387701, 38.290771, 1\n2.867526, 0.490642, 0.633690, 38.290771, 1\n2.880361, 0.481283, 0.645722, 38.290771, 1\n3.054443, 0.319519, 0.689840, 38.290771, 1\n3.218741, 0.121658, 0.585561, 38.290771, 1\n3.230362, 0.102941, 0.557487, 38.290771, 1\n3.391456, 0.089572, 0.534759, 38.290771, 1"}'
# curl -i -k -X POST -H "Content-Type:application/json" https://138.197.179.234:5000/api/predict -d '{"perf":"time,x,y,z,moving\n0.002468, 0.106414, 0.122449, 20.000000, 0\n0.020841, 0.106414, 0.125364, 20.000000, 1\n0.043218, 0.107872, 0.137026, 20.000000, 1\n0.065484, 0.107872, 0.176385, 20.000000, 1\n0.090776, 0.107872, 0.231778, 20.000000, 1\n0.110590, 0.109329, 0.301749, 20.000000, 1\n0.133338, 0.115160, 0.357143, 20.000000, 1\n0.155677, 0.125364, 0.412536, 20.000000, 1\n0.178238, 0.134111, 0.432945, 20.000000, 1\n0.516467, 0.275510, 0.180758, 20.000000, 0\n0.542726, 0.274052, 0.205539, 20.000000, 1\n0.560772, 0.274052, 0.249271, 20.000000, 1\n0.583259, 0.282799, 0.316327, 20.000000, 1\n0.605750, 0.295918, 0.376093, 20.000000, 1\n0.628259, 0.309038, 0.415452, 20.000000, 1\n0.653835, 0.316327, 0.432945, 20.000000, 1\n0.673523, 0.325073, 0.440233, 20.000000, 1\n1.000294, 0.590379, 0.179300, 20.000000, 0\n1.022137, 0.593294, 0.183673, 20.000000, 1\n1.044706, 0.594752, 0.208455, 20.000000, 1\n1.067020, 0.606414, 0.279883, 20.000000, 1\n1.091137, 0.626822, 0.355685, 20.000000, 1\n1.111968, 0.647230, 0.425656, 20.000000, 1\n1.134535, 0.655977, 0.462099, 20.000000, 1\n1.156987, 0.657434, 0.485423, 20.000000, 1\n1.619212, 0.857143, 0.263848, 20.000000, 0\n1.642492, 0.854227, 0.281341, 20.000000, 1\n1.663123, 0.851312, 0.320700, 20.000000, 1\n1.685776, 0.846939, 0.413994, 20.000000, 1\n1.708192, 0.846939, 0.510204, 20.000000, 1\n1.730717, 0.858601, 0.591837, 20.000000, 1\n1.753953, 0.868805, 0.632653, 20.000000, 1\n1.775862, 0.876093, 0.660350, 20.000000, 1\n4.376275, 0.542274, 0.860058, 20.000000, 0\n4.419554, 0.543732, 0.860058, 20.000000, 1"}'
# curl -i -k -X POST -H "Content-Type:application/json" https://0.0.0.0:5000/api/predict -d '{"perf":"time,x,y,z,moving\n0.002468, 0.106414, 0.122449, 20.000000, 0\n0.020841, 0.106414, 0.125364, 20.000000, 1\n0.043218, 0.107872, 0.137026, 20.000000, 1\n0.065484, 0.107872, 0.176385, 20.000000, 1\n0.090776, 0.107872, 0.231778, 20.000000, 1\n0.110590, 0.109329, 0.301749, 20.000000, 1\n0.133338, 0.115160, 0.357143, 20.000000, 1\n0.155677, 0.125364, 0.412536, 20.000000, 1\n0.178238, 0.134111, 0.432945, 20.000000, 1\n0.516467, 0.275510, 0.180758, 20.000000, 0\n0.542726, 0.274052, 0.205539, 20.000000, 1\n0.560772, 0.274052, 0.249271, 20.000000, 1\n0.583259, 0.282799, 0.316327, 20.000000, 1\n0.605750, 0.295918, 0.376093, 20.000000, 1\n0.628259, 0.309038, 0.415452, 20.000000, 1\n0.653835, 0.316327, 0.432945, 20.000000, 1\n0.673523, 0.325073, 0.440233, 20.000000, 1\n1.000294, 0.590379, 0.179300, 20.000000, 0\n1.022137, 0.593294, 0.183673, 20.000000, 1\n1.044706, 0.594752, 0.208455, 20.000000, 1\n1.067020, 0.606414, 0.279883, 20.000000, 1\n1.091137, 0.626822, 0.355685, 20.000000, 1\n1.111968, 0.647230, 0.425656, 20.000000, 1\n1.134535, 0.655977, 0.462099, 20.000000, 1\n1.156987, 0.657434, 0.485423, 20.000000, 1\n1.619212, 0.857143, 0.263848, 20.000000, 0\n1.642492, 0.854227, 0.281341, 20.000000, 1\n1.663123, 0.851312, 0.320700, 20.000000, 1\n1.685776, 0.846939, 0.413994, 20.000000, 1\n1.708192, 0.846939, 0.510204, 20.000000, 1\n1.730717, 0.858601, 0.591837, 20.000000, 1\n1.753953, 0.868805, 0.632653, 20.000000, 1\n1.775862, 0.876093, 0.660350, 20.000000, 1\n4.376275, 0.542274, 0.860058, 20.000000, 0\n4.419554, 0.543732, 0.860058, 20.000000, 1"}'
| 121.337838 | 1,670 | 0.702639 |
6e63b1a8022fa7d3c4dd2cc0d17b00043e002831 | 1,024 | py | Python | youtube_sync/tasks.py | abhayagiri/youtube-sync | ce3861f1b0c1448b1d48e5ba17925f5c082f04a2 | [
"MIT"
] | null | null | null | youtube_sync/tasks.py | abhayagiri/youtube-sync | ce3861f1b0c1448b1d48e5ba17925f5c082f04a2 | [
"MIT"
] | null | null | null | youtube_sync/tasks.py | abhayagiri/youtube-sync | ce3861f1b0c1448b1d48e5ba17925f5c082f04a2 | [
"MIT"
] | null | null | null | from datetime import datetime
import os
import re
import subprocess
from . import app, celery, db
from .database import Job
| 26.947368 | 84 | 0.682617 |
6e6b8e97a66c01a64f2cca3a534d23843f440130 | 560 | py | Python | setup.py | garethrylance/python-sdk-example | 3f21c3a6c28f46050688ce1be66e33433a801e7c | [
"CC0-1.0"
] | null | null | null | setup.py | garethrylance/python-sdk-example | 3f21c3a6c28f46050688ce1be66e33433a801e7c | [
"CC0-1.0"
] | null | null | null | setup.py | garethrylance/python-sdk-example | 3f21c3a6c28f46050688ce1be66e33433a801e7c | [
"CC0-1.0"
] | null | null | null | from setuptools import setup
setup(
name="python-sdk-example",
version="0.1",
description="The dispatch model loader - lambda part.",
url="https://github.com/garethrylance/python-sdk-example",
author="Gareth Rylance",
author_email="gareth@rylance.me.uk",
packages=["example_sdk"],
install_requires=["pandas"],
zip_safe=False,
entry_points={"console_scripts": [""]},
setup_requires=["pytest-runner"],
tests_require=["pytest"],
extras_require={"development": ["flake8", "black", "pytest", "snapshottest"]},
)
| 31.111111 | 82 | 0.671429 |
6e6bf3bcb9f6b04ecf66cf6829603687c806b677 | 4,140 | py | Python | markov.py | themichaelusa/zuckerkov | d68780f987b3f032d6382ea75118c84e7f205a39 | [
"MIT"
] | 1 | 2020-03-17T23:34:17.000Z | 2020-03-17T23:34:17.000Z | markov.py | themichaelusa/zuckerkov | d68780f987b3f032d6382ea75118c84e7f205a39 | [
"MIT"
] | null | null | null | markov.py | themichaelusa/zuckerkov | d68780f987b3f032d6382ea75118c84e7f205a39 | [
"MIT"
] | null | null | null | ### IMPORTS
import json
import glob
import string
import random
import spacy
from spacy.lang.en.stop_words import STOP_WORDS
import markovify
### CONSTANTS/GLOBALS/LAMBDAS
SYMBOLS_TO_RM = tuple(list(string.punctuation) + ['\xad'])
NUMBERS_TO_RM = tuple(string.digits)
spacy.prefer_gpu()
NLP_ENGINE = spacy.load("en_core_web_sm")
if __name__ == '__main__':
mu = gen_user_corpus('Michael Usachenko', 'mu_corpus.txt')
mu_model = build_mm_for_user('Michael Usachenko', 'mu_corpus.txt')
js = gen_user_corpus('Jonathan Shobrook', 'js_corpus.txt')
js_model = build_mm_for_user('Jonathan Shobrook', 'js_corpus.txt')
# generate starting sentence
init_sent = gen_valid_sent(mu_model)
init_subj = get_next_sent_subj(init_sent)
# WIP: back and forth conversation. need to modify markovify libs
# works for a few cycles, then errors
past_init = False
prior_resp = None
"""
for i in range(100):
if not past_init:
past_init = True
js_resp = gen_valid_sent(js_model, init_state=init_subj)
print('JONATHAN:', js_resp)
prior_resp = js_resp
else:
next_subj = get_next_sent_subj(prior_resp)
mu_resp = gen_valid_sent(mu_model, init_state=next_subj)
print('MICHAEL:', mu_resp)
next_subj = get_next_sent_subj(mu_resp)
js_resp = gen_valid_sent(js_model, init_state=next_subj)
print('JONATHAN:', js_resp)
prior_resp = js_resp
"""
for i in range(100):
#next_subj = get_next_sent_subj(prior_resp)
mu_resp = gen_valid_sent(mu_model)
print('MICHAEL:', mu_resp)
#next_subj = get_next_sent_subj(mu_resp)
js_resp = gen_valid_sent(js_model)
print('JONATHAN:', js_resp)
#prior_resp = js_resp
| 23 | 67 | 0.717874 |
6e6ceb4b1bd05af797219ac67e3f71b01f520394 | 6,211 | py | Python | src/cnf_shuffler.py | jreeves3/BiPartGen-Artifact | d7c6db628cad25701a398da67ab87bb725513a61 | [
"MIT"
] | null | null | null | src/cnf_shuffler.py | jreeves3/BiPartGen-Artifact | d7c6db628cad25701a398da67ab87bb725513a61 | [
"MIT"
] | null | null | null | src/cnf_shuffler.py | jreeves3/BiPartGen-Artifact | d7c6db628cad25701a398da67ab87bb725513a61 | [
"MIT"
] | null | null | null | #/**********************************************************************************
# Copyright (c) 2021 Joseph Reeves and Cayden Codel, Carnegie Mellon University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# **************************************************************************************************/
# @file cnf_shuffler.py
#
# @usage python cnf_shuffler.py [-cnsv] <input.cnf>
#
# @author Cayden Codel (ccodel@andrew.cmu.edu)
#
# @bug No known bugs.
import random
import sys
import os
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-c", "--clauses", dest="clauses", action="store_true",
help="Shuffle the order of the clause lines in the CNF")
parser.add_option("-n", "--names", dest="names", action="store_true",
help="Shuffle the names of the literals in the clauses")
parser.add_option("-r", "--random", dest="seed",
help="Provide a randomization seed")
parser.add_option("-s", "--signs", dest="signs",
help="Switch the sign of literals with the provided prob")
parser.add_option("-v", "--variables", dest="variables",
help="Shuffle the order of the variables with prob")
(options, args) = parser.parse_args()
f_name = sys.argv[-1]
if len(sys.argv) == 1:
print("Must supply a CNF file")
exit()
# Parse the provided CNF file
if not os.path.exists(f_name) or os.path.isdir(f_name):
print("Supplied CNF file does not exist or is directory", file=sys.stderr)
exit()
cnf_file = open(f_name, "r")
cnf_lines = cnf_file.readlines()
cnf_file.close()
# Verify that the file has at least one line
if len(cnf_lines) == 0:
print("Supplied CNF file is empty", file=sys.stderr)
exit()
# Do treatment on the lines
cnf_lines = list(map(lambda x: x.strip(), cnf_lines))
# Verify that the file is a CNF file
header_line = cnf_lines[0].split(" ")
if header_line[0] != "p" or header_line[1] != "cnf":
print("Supplied file doesn't follow DIMACS CNF convention")
exit()
num_vars = int(header_line[2])
num_clauses = int(header_line[3])
print(" ".join(header_line))
cnf_lines = cnf_lines[1:]
# If the -r option is specified, initialize the random library
if options.seed is not None:
random.seed(a=int(options.seed))
else:
random.seed()
# If the -c option is specified, permute all other lines
if options.clauses:
cnf_lines = random.shuffle(cnf_lines)
# If the -v option is specified, permute the order of variables
if options.variables is not None:
var_prob = float(options.variables)
if var_prob <= 0 or var_prob > 1:
print("Prob for var shuffling not between 0 and 1", file=sys.stderr)
exit()
# TODO this doesn't work if each line is a single variable, etc.
for i in range(0, len(cnf_lines)):
line = cnf_lines[i]
atoms = line.split(" ")
if atoms[0][0] == "c" or random.random() > var_prob:
continue
if atoms[-1] == "0":
atoms = atoms[:-1]
random.shuffle(atoms)
atoms.append("0")
else:
random.shuffle(atoms)
cnf_lines[i] = " ".join(atoms)
# Now do one pass through all other lines to get the variable names
if options.names:
literals = {}
for line in cnf_lines:
if line[0] == "c":
continue
atoms = line.split(" ")
for atom in atoms:
lit = abs(int(atom))
if lit != 0:
literals[lit] = True
# After storing all the literals, permute
literal_keys = list(literals.keys())
p_keys = list(literals.keys())
random.shuffle(p_keys)
zipped = list(zip(literal_keys, p_keys))
for k, p in zipped:
literals[k] = p
for i in range(0, len(cnf_lines)):
line = cnf_lines[i]
if line[0] == "c":
continue
atoms = line.split(" ")
for j in range(0, len(atoms)):
if atoms[j] != "0":
if int(atoms[j]) < 0:
atoms[j] = "-" + str(literals[abs(int(atoms[j]))])
else:
atoms[j] = str(literals[int(atoms[j])])
cnf_lines[i] = " ".join(atoms)
if options.signs is not None:
signs_prob = float(options.signs)
if signs_prob < 0 or signs_prob > 1:
print("Sign prob must be between 0 and 1", file=sys.stderr)
exit()
flipped_literals = {}
for i in range(0, len(cnf_lines)):
line = cnf_lines[i]
if line[0] == "c":
continue
# For each symbol inside, flip weighted coin and see if flip
atoms = line.split(" ")
for j in range(0, len(atoms)):
atom = atoms[j]
if atom != "0":
if flipped_literals.get(atom) is None:
if random.random() <= signs_prob:
flipped_literals[atom] = True
else:
flipped_literals[atom] = False
if flipped_literals[atom]:
atoms[j] = str(-int(atom))
cnf_lines[i] = " ".join(atoms)
# Finally, output the transformed lines
for line in cnf_lines:
print(line)
| 34.893258 | 101 | 0.605378 |
6e6dbb5cefe12073382965816c2a9d3f10ed725c | 4,171 | py | Python | test/app_page_scraper_test.py | googleinterns/betel | 2daa56081ccc753f5b7eafbd1e9a48e3aca4b657 | [
"Apache-2.0"
] | 1 | 2020-09-21T12:52:33.000Z | 2020-09-21T12:52:33.000Z | test/app_page_scraper_test.py | googleinterns/betel | 2daa56081ccc753f5b7eafbd1e9a48e3aca4b657 | [
"Apache-2.0"
] | null | null | null | test/app_page_scraper_test.py | googleinterns/betel | 2daa56081ccc753f5b7eafbd1e9a48e3aca4b657 | [
"Apache-2.0"
] | 1 | 2020-07-31T09:55:33.000Z | 2020-07-31T09:55:33.000Z | import pathlib
import pytest
from betel import app_page_scraper
from betel import betel_errors
from betel import utils
ICON_HTML = """
<img src="%s" class="T75of sHb2Xb">
"""
CATEGORY_HTML = """
<a itemprop="genre">Example</a>
"""
FILTERED_CATEGORY_HTML = """
<a itemprop="genre">Filtered</a>
"""
SIMPLE_HTML = """
<p>Simple paragraph.</p>
"""
ICON_SUBDIR = pathlib.Path("icon_subdir")
APP_ID = "com.example"
ICON_NAME = "icon_com.example"
EXPECTED_CATEGORY = "example"
FILE = "file:"
class TestAppPageScraper:
def test_get_icon(self, play_scraper, test_dir, icon_dir):
rand_icon = _create_icon(test_dir)
_create_html_file(test_dir, ICON_HTML, icon_src=True)
play_scraper.get_app_icon(APP_ID, ICON_SUBDIR)
read_icon = icon_dir / ICON_SUBDIR / ICON_NAME
assert read_icon.exists()
assert read_icon.read_text() == rand_icon.read_text()
| 28.182432 | 86 | 0.714217 |
6e702ceebd6384acfed75804122d1e9b9864c6c7 | 2,776 | py | Python | add.py | plasticuproject/cert-dbms | 0a8f1d8eb69610fa1c0403c08d3d3ac057e3d698 | [
"MIT"
] | null | null | null | add.py | plasticuproject/cert-dbms | 0a8f1d8eb69610fa1c0403c08d3d3ac057e3d698 | [
"MIT"
] | null | null | null | add.py | plasticuproject/cert-dbms | 0a8f1d8eb69610fa1c0403c08d3d3ac057e3d698 | [
"MIT"
] | 1 | 2020-10-27T12:06:36.000Z | 2020-10-27T12:06:36.000Z | #!/usr/bin/python3
"""add.py"""
from sys import argv
import datetime
import sqlite3
import pathlib
PATH = pathlib.Path.cwd()
HELP_TEXT = '''
Usage: add.py [-h] directory
-h, --help bring up this help message
directory directory with certs to add
'''
def add_certs(cert_dir: str) -> None:
"""Add new certs to database. Initialize database if none exists."""
# If DATABASE does not exist, initialize it
d_b = cert_dir + '.db'
if (PATH / d_b).is_file() is False:
con = sqlite3.connect(d_b)
cursor_obj = con.cursor()
cursor_obj.execute(
'CREATE TABLE certs(id text PRIMARY KEY, date_added text, applied integer, date_applied text, banned integer, banned_date text, required_activation integer, currently_used integer)'
)
# Add new cert file info for all UNIQUE cert files from directory
con = sqlite3.connect(d_b)
cursor_obj = con.cursor()
added_certs = []
skipped_certs = []
add_path = PATH / cert_dir
for cert_file in add_path.iterdir():
# Check that file in directory is indeed a cert file and set values
if cert_file.is_file(
) and cert_file.suffix == '.txt': # TODO find file sig
cert_name = cert_file.name
added = datetime.datetime.now()
entities = (cert_name, added, 0, 0, 0, 0)
# Try to add UNIQUE cert file to DATABASE
try:
cursor_obj.execute(
'INSERT INTO certs(id, date_added, applied, banned, required_activation, currently_used) VALUES(?, ?, ?, ?, ?, ?)',
entities)
con.commit()
added_certs.append(cert_name)
# If cert file is already in DATABASE then skip
except sqlite3.IntegrityError:
skipped_certs.append(cert_name)
con.close()
# Print output
if skipped_certs:
print('\n[*] Already in DATABASE, skipping:\n')
for _x in skipped_certs:
print('\t' + _x)
if added_certs:
print('\n\n[*] Added to the DATABASE:\n')
for _x in added_certs:
print('\t' + _x)
print(f'\n\n[*] Added: {len(added_certs)}')
print(f'[*] Skipped {len(skipped_certs)}\n')
if __name__ == '__main__':
# Check for help flag
if len(argv) < 2 or argv[1] == '--help' or argv[1] == '-h':
print(HELP_TEXT)
quit()
# Check if directory name is valid, run stuff if so
if (PATH / argv[1]).is_dir():
CERT_DIR = argv[1]
if CERT_DIR[-1] == '/':
CERT_DIR = CERT_DIR[:-1]
try:
add_certs(CERT_DIR)
except KeyboardInterrupt:
quit()
else:
print(f'\n[*] {argv[1]} not a valid directory\n')
| 30.844444 | 193 | 0.583934 |
6e710c139901b3edb6aaa6a1f60ac54de8da8353 | 209 | py | Python | mrq_monitor.py | HyokaChen/violet | b89ddb4f909c2a40e76d89b665949e55086a7a80 | [
"Apache-2.0"
] | 1 | 2020-07-29T15:49:35.000Z | 2020-07-29T15:49:35.000Z | mrq_monitor.py | HyokaChen/violet | b89ddb4f909c2a40e76d89b665949e55086a7a80 | [
"Apache-2.0"
] | 1 | 2019-12-19T10:19:57.000Z | 2019-12-19T11:15:28.000Z | mrq_monitor.py | EmptyChan/violet | b89ddb4f909c2a40e76d89b665949e55086a7a80 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created with IntelliJ IDEA.
Description:
User: jinhuichen
Date: 3/28/2018 4:17 PM
Description:
"""
from mrq.dashboard.app import main
if __name__ == '__main__':
main() | 16.076923 | 34 | 0.650718 |
6e734b51dd3ec79fecc1a0e0800072ebad29c909 | 556 | py | Python | lang/string/reverse-words.py | joez/letspy | 9f653bc0071821fdb49da8c19787dc7e12921457 | [
"Apache-2.0"
] | null | null | null | lang/string/reverse-words.py | joez/letspy | 9f653bc0071821fdb49da8c19787dc7e12921457 | [
"Apache-2.0"
] | null | null | null | lang/string/reverse-words.py | joez/letspy | 9f653bc0071821fdb49da8c19787dc7e12921457 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
if __name__ == '__main__':
s = input()
for f in (reverse_words, reverse_words_ext):
print(f(s))
| 19.857143 | 50 | 0.491007 |
6e74495ac01d11fb500db642fc48819334b6af0a | 140 | py | Python | k8s/the-project/kubeless/ok-func.py | cjimti/mk | b303e147da77776baf5fee337e356ebeccbe2c01 | [
"MIT"
] | 1 | 2019-04-18T09:52:48.000Z | 2019-04-18T09:52:48.000Z | k8s/the-project/kubeless/ok-func.py | cjimti/mk | b303e147da77776baf5fee337e356ebeccbe2c01 | [
"MIT"
] | null | null | null | k8s/the-project/kubeless/ok-func.py | cjimti/mk | b303e147da77776baf5fee337e356ebeccbe2c01 | [
"MIT"
] | null | null | null | import requests
| 15.555556 | 43 | 0.65 |
6e74bf0ffc1a010178cf010d5be1824b1235b7ba | 11,166 | py | Python | python-scripts/gt_generate_python_curve.py | TrevisanGMW/maya | 4e3b45210d09a1cd2a1c0419defe6a5ffa97cf92 | [
"MIT"
] | 26 | 2020-11-16T12:49:05.000Z | 2022-03-09T20:39:22.000Z | python-scripts/gt_generate_python_curve.py | TrevisanGMW/maya | 4e3b45210d09a1cd2a1c0419defe6a5ffa97cf92 | [
"MIT"
] | 47 | 2020-11-08T23:35:49.000Z | 2022-03-10T03:43:00.000Z | python-scripts/gt_generate_python_curve.py | TrevisanGMW/maya | 4e3b45210d09a1cd2a1c0419defe6a5ffa97cf92 | [
"MIT"
] | 5 | 2021-01-27T06:10:34.000Z | 2021-10-30T23:29:44.000Z | """
Python Curve Generator
@Guilherme Trevisan - github.com/TrevisanGMW/gt-tools - 2020-01-02
1.1 - 2020-01-03
Minor patch adjustments to the script
1.2 - 2020-06-07
Fixed random window widthHeight issue.
Updated naming convention to make it clearer. (PEP8)
Added length checker for selection before running.
1.3 - 2020-06-17
Changed UI
Added help menu
Added icon
1.4 - 2020-06-27
No longer failing to generate curves with non-unique names
Tweaked the color and text for the title and help menu
1.5 - 2021-01-26
Fixed way the curve is generated to account for closed and opened curves
1.6 - 2021-05-12
Made script compatible with Python 3 (Maya 2022+)
"""
import maya.cmds as cmds
import sys
from decimal import *
from maya import OpenMayaUI as omui
try:
from shiboken2 import wrapInstance
except ImportError:
from shiboken import wrapInstance
try:
from PySide2.QtGui import QIcon
from PySide2.QtWidgets import QWidget
except ImportError:
from PySide.QtGui import QIcon, QWidget
# Script Name
script_name = "GT - Generate Python Curve"
# Version:
script_version = "1.6"
#Python Version
python_version = sys.version_info.major
# Default Settings
close_curve = False
add_import = False
# Function for the "Run Code" button
# Main Form ============================================================================
# Creates Help GUI
#Build UI
if __name__ == '__main__':
build_gui_py_curve() | 40.901099 | 129 | 0.5729 |
6e75ab3bf35f32714181bf627668b80eaa462378 | 1,766 | py | Python | client/core/scene/summary.py | krerkkiat/space-invader | 428b1041c9246b55cb63bc6c0b2ec20beb7a32ed | [
"MIT"
] | null | null | null | client/core/scene/summary.py | krerkkiat/space-invader | 428b1041c9246b55cb63bc6c0b2ec20beb7a32ed | [
"MIT"
] | null | null | null | client/core/scene/summary.py | krerkkiat/space-invader | 428b1041c9246b55cb63bc6c0b2ec20beb7a32ed | [
"MIT"
] | null | null | null | import pygame
from config import Config
from core.ui import Table, Button
from core.scene import Scene
from core.manager import SceneManager
from core.scene.preload import Preload | 34.627451 | 109 | 0.656285 |
6e7654580b77f1dbecf04a37ead830e9b06ecf31 | 198 | py | Python | mwptoolkit/module/Encoder/__init__.py | ShubhamAnandJain/MWP-CS229 | ce86233504fdb37e104a3944fd81d4606fbfa621 | [
"MIT"
] | 71 | 2021-03-08T06:06:15.000Z | 2022-03-30T11:59:37.000Z | mwptoolkit/module/Encoder/__init__.py | ShubhamAnandJain/MWP-CS229 | ce86233504fdb37e104a3944fd81d4606fbfa621 | [
"MIT"
] | 13 | 2021-09-07T12:38:23.000Z | 2022-03-22T15:08:16.000Z | mwptoolkit/module/Encoder/__init__.py | ShubhamAnandJain/MWP-CS229 | ce86233504fdb37e104a3944fd81d4606fbfa621 | [
"MIT"
] | 21 | 2021-02-16T07:46:36.000Z | 2022-03-23T13:41:33.000Z | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from mwptoolkit.module.Encoder import graph_based_encoder,rnn_encoder,transformer_encoder | 49.5 | 89 | 0.90404 |
6e78083845e016661893639e08ffab0d50cff621 | 546 | py | Python | src/python/intensity/components/shutdown_if_empty.py | kripken/intensityengine | 9ae352b4f526ecb180004ae4968db7f64f140762 | [
"MIT"
] | 31 | 2015-01-18T20:27:31.000Z | 2021-07-03T03:58:47.000Z | src/python/intensity/components/shutdown_if_empty.py | JamesLinus/intensityengine | 9ae352b4f526ecb180004ae4968db7f64f140762 | [
"MIT"
] | 4 | 2015-07-05T21:09:37.000Z | 2019-09-06T14:34:59.000Z | src/python/intensity/components/shutdown_if_empty.py | JamesLinus/intensityengine | 9ae352b4f526ecb180004ae4968db7f64f140762 | [
"MIT"
] | 11 | 2015-02-03T19:24:10.000Z | 2019-09-20T10:59:50.000Z |
# Copyright 2010 Alon Zakai ('kripken'). All rights reserved.
# This file is part of Syntensity/the Intensity Engine, an open source project. See COPYING.txt for licensing.
from intensity.signals import client_connect, client_disconnect
from intensity.base import quit
client_connect.connect(add, weak=False)
client_disconnect.connect(subtract, weak=False)
| 22.75 | 110 | 0.717949 |
6e780b142bddebcec890df30277381a71e204488 | 694 | py | Python | pyforms/utils/timeit.py | dominic-dev/pyformsd | 23e31ceff2943bc0f7286d25dd14450a14b986af | [
"MIT"
] | null | null | null | pyforms/utils/timeit.py | dominic-dev/pyformsd | 23e31ceff2943bc0f7286d25dd14450a14b986af | [
"MIT"
] | null | null | null | pyforms/utils/timeit.py | dominic-dev/pyformsd | 23e31ceff2943bc0f7286d25dd14450a14b986af | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Ricardo Ribeiro"
__credits__ = ["Ricardo Ribeiro"]
__license__ = "MIT"
__version__ = "0.0"
__maintainer__ = "Ricardo Ribeiro"
__email__ = "ricardojvr@gmail.com"
__status__ = "Development"
import time
from datetime import datetime, timedelta | 27.76 | 156 | 0.674352 |
6e7a4b454a8651618254290e5f7ef6b4e1cd99a9 | 1,388 | py | Python | cogs/botinfo.py | MM-coder/salbot-rewrite | 322c34ba85a2c852e02cd3c183d5a7a4a077ff6f | [
"Apache-2.0"
] | 1 | 2020-08-17T05:14:58.000Z | 2020-08-17T05:14:58.000Z | cogs/botinfo.py | MM-coder/salbot-rewrite | 322c34ba85a2c852e02cd3c183d5a7a4a077ff6f | [
"Apache-2.0"
] | null | null | null | cogs/botinfo.py | MM-coder/salbot-rewrite | 322c34ba85a2c852e02cd3c183d5a7a4a077ff6f | [
"Apache-2.0"
] | 1 | 2020-08-17T16:57:30.000Z | 2020-08-17T16:57:30.000Z | """
Created by vcokltfre at 2020-07-08
"""
import json
import logging
import time
from datetime import datetime
import discord
from discord.ext import commands
from discord.ext.commands import has_any_role
| 28.326531 | 119 | 0.591499 |
6e7b6d33ac9f184e61e6b426b75d7acfe7a99f1e | 6,486 | py | Python | ninja_extra/pagination.py | eadwinCode/django-ninja-extra | 16246c466ab8895ba1bf29d69f3d3e9337031edd | [
"MIT"
] | 43 | 2021-09-09T14:20:59.000Z | 2022-03-28T00:38:52.000Z | ninja_extra/pagination.py | eadwinCode/django-ninja-extra | 16246c466ab8895ba1bf29d69f3d3e9337031edd | [
"MIT"
] | 6 | 2022-01-04T10:53:11.000Z | 2022-03-28T19:53:46.000Z | ninja_extra/pagination.py | eadwinCode/django-ninja-extra | 16246c466ab8895ba1bf29d69f3d3e9337031edd | [
"MIT"
] | null | null | null | import inspect
import logging
from collections import OrderedDict
from functools import wraps
from typing import TYPE_CHECKING, Any, Callable, Optional, Type, Union, cast, overload
from django.core.paginator import InvalidPage, Page, Paginator
from django.db.models import QuerySet
from django.http import HttpRequest
from ninja import Schema
from ninja.constants import NOT_SET
from ninja.pagination import LimitOffsetPagination, PageNumberPagination, PaginationBase
from ninja.signature import has_kwargs
from ninja.types import DictStrAny
from pydantic import Field
from ninja_extra.conf import settings
from ninja_extra.exceptions import NotFound
from ninja_extra.schemas import PaginatedResponseSchema
from ninja_extra.urls import remove_query_param, replace_query_param
logger = logging.getLogger()
if TYPE_CHECKING:
from .controllers import ControllerBase # pragma: no cover
__all__ = [
"PageNumberPagination",
"PageNumberPaginationExtra",
"PaginationBase",
"LimitOffsetPagination",
"paginate",
"PaginatedResponseSchema",
]
def _positive_int(
integer_string: Union[str, int], strict: bool = False, cutoff: Optional[int] = None
) -> int:
"""
Cast a string to a strictly positive integer.
"""
ret = int(integer_string)
if ret < 0 or (ret == 0 and strict):
raise ValueError()
if cutoff:
return min(ret, cutoff)
return ret
def paginate(
func_or_pgn_class: Any = NOT_SET, **paginator_params: Any
) -> Callable[..., Any]:
isfunction = inspect.isfunction(func_or_pgn_class)
isnotset = func_or_pgn_class == NOT_SET
pagination_class: Type[PaginationBase] = settings.PAGINATION_CLASS
if isfunction:
return _inject_pagination(func_or_pgn_class, pagination_class)
if not isnotset:
pagination_class = func_or_pgn_class
return wrapper
def _inject_pagination(
func: Callable[..., Any],
paginator_class: Type[PaginationBase],
**paginator_params: Any,
) -> Callable[..., Any]:
func.has_kwargs = True # type: ignore
if not has_kwargs(func):
func.has_kwargs = False # type: ignore
logger.debug(
f"function {func.__name__} should have **kwargs if you want to use pagination parameters"
)
paginator: PaginationBase = paginator_class(**paginator_params)
paginator_kwargs_name = "pagination"
view_with_pagination._ninja_contribute_args = [ # type: ignore
(
paginator_kwargs_name,
paginator.Input,
paginator.InputSource,
),
]
return view_with_pagination
| 31.333333 | 101 | 0.665433 |
6e7bea4cb2b85ac4aa392fccc69253e8cb2356b9 | 547 | py | Python | text-boxes/test-textbox01.py | rajorshi-mukherjee/gui-python | 356eef26975e63de48b441d336d75a1f9c232cf3 | [
"MIT"
] | null | null | null | text-boxes/test-textbox01.py | rajorshi-mukherjee/gui-python | 356eef26975e63de48b441d336d75a1f9c232cf3 | [
"MIT"
] | 3 | 2022-01-02T18:04:24.000Z | 2022-01-12T16:35:31.000Z | text-boxes/test-textbox01.py | rajorshi-mukherjee/gui-python | 356eef26975e63de48b441d336d75a1f9c232cf3 | [
"MIT"
] | null | null | null | # !/usr/bin/python3
from tkinter import *
top = Tk()
top.geometry("400x250")
name = Label(top, text = "Name").place(x = 30,y = 50)
email = Label(top, text = "Email").place(x = 30, y = 90)
password = Label(top, text = "Password").place(x = 30, y = 130)
sbmitbtn = Button(top, text = "Submit",activebackground = "pink", activeforeground = "blue").place(x = 30, y = 170)
e1 = Entry(top).place(x = 80, y = 50)
e2 = Entry(top).place(x = 80, y = 90)
e3 = Entry(top, show="*").place(x = 95, y = 130)
top.mainloop() | 30.388889 | 117 | 0.575868 |
6e7c1a4dd0214c41c2785c1779862d06bb157d94 | 873 | py | Python | server/yafa/migrations/0002_auto_20160606_2216.py | mrmonkington/yafa | d15ba1fdaaa046e3bc07a7a44fb61213d686bb7d | [
"MIT"
] | null | null | null | server/yafa/migrations/0002_auto_20160606_2216.py | mrmonkington/yafa | d15ba1fdaaa046e3bc07a7a44fb61213d686bb7d | [
"MIT"
] | 13 | 2016-08-10T19:22:35.000Z | 2021-06-10T18:53:01.000Z | server/yafa/migrations/0002_auto_20160606_2216.py | mrmonkington/yafa | d15ba1fdaaa046e3bc07a7a44fb61213d686bb7d | [
"MIT"
] | 2 | 2016-06-23T09:02:20.000Z | 2021-03-22T11:39:20.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-06 22:16
from __future__ import unicode_literals
from django.db import migrations, models
| 24.25 | 63 | 0.54181 |
6e7d261c65a6ddf389725d10b7241f84b3620572 | 501 | py | Python | toys/urls.py | julesc00/restful | 11b5312caf4affeaa06e3ceb5b86a7c73357eed1 | [
"MIT"
] | null | null | null | toys/urls.py | julesc00/restful | 11b5312caf4affeaa06e3ceb5b86a7c73357eed1 | [
"MIT"
] | null | null | null | toys/urls.py | julesc00/restful | 11b5312caf4affeaa06e3ceb5b86a7c73357eed1 | [
"MIT"
] | null | null | null | from django.urls import path
from toys.views import (toy_list_view, toy_detail_view, toy_sql_view, toy_raw_sql_view,
toy_aggregate_view)
app_name = "toys"
urlpatterns = [
path("toys/", toy_list_view, name="toys_list"),
path("toys_sql/", toy_sql_view, name="toys_sql_list"),
path("toys/count/", toy_aggregate_view, name="toys_count"),
path("toys_raw/", toy_raw_sql_view, name="toys_raw_list"),
path("toys/<int:pk>/", toy_detail_view, name="toy_detail"),
]
| 35.785714 | 87 | 0.692615 |
6e7e694936dd85ec6e3ce90826c00f74519f89dc | 5,590 | py | Python | predict_image.py | sempwn/kaggle-cats-v-dogs | 0b0e50ca5208248d18b31bfdd456cdb6401060d7 | [
"MIT"
] | null | null | null | predict_image.py | sempwn/kaggle-cats-v-dogs | 0b0e50ca5208248d18b31bfdd456cdb6401060d7 | [
"MIT"
] | null | null | null | predict_image.py | sempwn/kaggle-cats-v-dogs | 0b0e50ca5208248d18b31bfdd456cdb6401060d7 | [
"MIT"
] | null | null | null | '''This script goes along the blog post
"Building powerful image classification models using very little data"
from blog.keras.io.
It uses data that can be downloaded at:
https://www.kaggle.com/c/dogs-vs-cats/data
In our setup, we:
- created a data/ folder
- created train/ and validation/ subfolders inside data/
- created cats/ and dogs/ subfolders inside train/ and validation/
- put the cat pictures index 0-999 in data/train/cats
- put the cat pictures index 1000-1400 in data/validation/cats
- put the dogs pictures index 12500-13499 in data/train/dogs
- put the dog pictures index 13500-13900 in data/validation/dogs
So that we have 1000 training examples for each class, and 400 validation examples for each class.
In summary, this is our directory structure:
```
data/
train/
dogs/
dog001.jpg
dog002.jpg
...
cats/
cat001.jpg
cat002.jpg
...
validation/
dogs/
dog001.jpg
dog002.jpg
...
cats/
cat001.jpg
cat002.jpg
...
```
'''
import os
import h5py
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing import image as image_utils
from keras import optimizers
from keras.models import Sequential
from keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.layers import Activation, Dropout, Flatten, Dense
#image input utils
from Tkinter import Tk
from tkFileDialog import askopenfilename
# path to the model weights files.
weights_path = 'data/models/vgg16_weights.h5'
top_model_weights_path = 'data/models/bottleneck_fc_model.h5'
# dimensions of our images.
img_width, img_height = 150, 150
train_data_dir = 'data/train'
validation_data_dir = 'data/validation'
nb_train_samples = 2000
nb_validation_samples = 800
nb_epoch = 50
# build the VGG16 network
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height)))
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
# load the weights of the VGG16 networks
# (trained on ImageNet, won the ILSVRC competition in 2014)
# note: when there is a complete match between your model definition
# and your weight savefile, you can simply call model.load_weights(filename)
assert os.path.exists(weights_path), 'Model weights not found (see "weights_path" variable in script).'
f = h5py.File(weights_path)
for k in range(f.attrs['nb_layers']):
if k >= len(model.layers):
# we don't look at the last (fully-connected) layers in the savefile
break
g = f['layer_{}'.format(k)]
weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
model.layers[k].set_weights(weights)
f.close()
print('Model loaded.')
# build a classifier model to put on top of the convolutional model
top_model = Sequential()
top_model.add(Flatten(input_shape=model.output_shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.0)) #Should have 0 dropout for predicition. But still need model structure so set to 0.
top_model.add(Dense(1, activation='sigmoid'))
print('[INFO] loading weights. May take a while...')
# note that it is necessary to start with a fully-trained
# classifier, including the top classifier,
# in order to successfully do fine-tuning
top_model.load_weights(top_model_weights_path)
# add the model on top of the convolutional base
model.add(top_model)
# TODO: create test_data in appropriate format.
print("[INFO] loading and preprocessing image...")
Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
filename = askopenfilename() # show an "Open" dialog box and return the path to the selected file
image = image_utils.load_img(filename, target_size=(img_width, img_height))
image = image_utils.img_to_array(image) #array should be (3,150,150)
image = np.expand_dims(image, axis=0) #expand to shape (1,3,150, 150)
pDOG = model.predict(image)[0][0]
pCAT = 1. - pDOG
print 'Image {} percent dog and {} percent cat'.format(pDOG*100.,pCAT*100.)
| 38.287671 | 111 | 0.719499 |
6e7f3a4c08faec09d89aa387dcfdf45492ab2264 | 163 | py | Python | ultimate-utils-proj-src/uutils/torch_uu/training/meta_training.py | brando90/ultimate-utils | 9b7ca2e9d330333c4e49722d0708d65b22ed173a | [
"MIT"
] | 5 | 2021-03-13T16:07:26.000Z | 2021-09-09T17:00:36.000Z | ultimate-utils-proj-src/uutils/torch_uu/training/meta_training.py | brando90/ultimate-utils | 9b7ca2e9d330333c4e49722d0708d65b22ed173a | [
"MIT"
] | 8 | 2021-03-09T21:52:09.000Z | 2021-12-02T17:23:33.000Z | ultimate-utils-proj-src/uutils/torch_uu/training/meta_training.py | brando90/ultimate-utils | 9b7ca2e9d330333c4e49722d0708d65b22ed173a | [
"MIT"
] | 5 | 2021-03-24T20:38:43.000Z | 2022-03-17T07:54:12.000Z | """
TODO: Once I finish the d zero and high paper, I will port the code here.
TODO: also put the epochs training, for the ml vs maml paper with synthetic data.
""" | 40.75 | 81 | 0.730061 |
6e7ff5caf482e80185273f9434f18cc9786fbe99 | 692 | py | Python | setup.py | ellwise/kedro-light | 8f5a05d880f3ded23b024d5db72b5fc615e75230 | [
"MIT"
] | 2 | 2021-10-16T12:19:50.000Z | 2022-01-20T16:50:14.000Z | setup.py | ellwise/kedro-light | 8f5a05d880f3ded23b024d5db72b5fc615e75230 | [
"MIT"
] | null | null | null | setup.py | ellwise/kedro-light | 8f5a05d880f3ded23b024d5db72b5fc615e75230 | [
"MIT"
] | null | null | null | from setuptools import setup
from os import path
# read the contents of your README file
curr_dir = path.abspath(path.dirname(__file__))
with open(path.join(curr_dir, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="kedro-light",
version="0.1",
description="A lightweight interface to Kedro",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ellwise/naive-bayes-explainer",
author="Elliott Wise",
author_email="ell.wise@gmail.com",
license="MIT",
packages=["kedro_light"],
install_requires=["kedro"],
include_package_data=True,
zip_safe=False,
)
| 27.68 | 67 | 0.710983 |
6e812cd9d9f3ad6325c8b7be7fb0c2f7d95ff84f | 1,217 | py | Python | app.py | mh-github/mh-wtgw | 0e8d9b622954e14d1e24fda6fc6a4e63af2cd822 | [
"CC0-1.0"
] | null | null | null | app.py | mh-github/mh-wtgw | 0e8d9b622954e14d1e24fda6fc6a4e63af2cd822 | [
"CC0-1.0"
] | null | null | null | app.py | mh-github/mh-wtgw | 0e8d9b622954e14d1e24fda6fc6a4e63af2cd822 | [
"CC0-1.0"
] | null | null | null | import random
from flask import Flask, request, render_template, jsonify
app = Flask(__name__)
data_list = []
with open('data.txt', 'r') as data_file:
data_list = data_file.readlines()
if __name__ == "__main__":
app.run(host='0.0.0.0') | 27.659091 | 98 | 0.57765 |
6e81c177879d88e6b010319496c61e52cdb196f1 | 13,606 | py | Python | imported_files/plotting_cswl05.py | SoumyaShreeram/Locating_AGN_in_DM_halos | 1cfbee69b2c000faee4ecb199d65c3235afbed42 | [
"MIT"
] | null | null | null | imported_files/plotting_cswl05.py | SoumyaShreeram/Locating_AGN_in_DM_halos | 1cfbee69b2c000faee4ecb199d65c3235afbed42 | [
"MIT"
] | null | null | null | imported_files/plotting_cswl05.py | SoumyaShreeram/Locating_AGN_in_DM_halos | 1cfbee69b2c000faee4ecb199d65c3235afbed42 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Plotting.py for notebook 05_Preliminary_comparison_of_simulations_AGN_fraction_with_data
This python file contains all the functions used for plotting graphs and maps in the 2nd notebook (.ipynb) of the repository: 05. Preliminary comparison of the MM between simulation and data
Script written by: Soumya Shreeram
Project supervised by Johan Comparat
Date created: 27th April 2021
"""
# astropy modules
import astropy.units as u
import astropy.io.fits as fits
from astropy.table import Table, Column
from astropy.coordinates import SkyCoord
from astropy.cosmology import FlatLambdaCDM, z_at_value
import numpy as np
# scipy modules
from scipy.spatial import KDTree
from scipy.interpolate import interp1d
import os
import importlib
# plotting imports
import matplotlib
from mpl_toolkits import axes_grid1
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from matplotlib import cm
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
import seaborn as sns
import Agn_incidence_from_Major_Mergers as aimm
import Comparison_simulation_with_literature_data as cswl
from scipy.stats import norm
def setLabel(ax, xlabel, ylabel, title='', xlim='default', ylim='default', legend=True):
"""
Function defining plot properties
@param ax :: axes to be held
@param xlabel, ylabel :: labels of the x-y axis
@param title :: title of the plot
@param xlim, ylim :: x-y limits for the axis
"""
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if xlim != 'default':
ax.set_xlim(xlim)
if ylim != 'default':
ax.set_ylim(ylim)
if legend:
l = ax.legend(loc='best', fontsize=14, frameon=False)
for legend_handle in l.legendHandles:
legend_handle._legmarker.set_markersize(12)
ax.grid(False)
ax.set_title(title, fontsize=18)
return
def plotScaleMMdistribution(halo_m_scale_arr_all_r, cosmo, dt_m_arr):
"""
Function plots the number of objects in pairs as a function of the scale of last MM
--> the cuts on delta t_mm are overplotted to see the selection criterion
"""
fig, ax = plt.subplots(1,1,figsize=(7,6))
bins = 20
hist_all_r = np.zeros((0, bins))
for i in range(len(halo_m_scale_arr_all_r)):
hist_counts, a = np.histogram(halo_m_scale_arr_all_r[i], bins=bins)
hist_all_r = np.append(hist_all_r, [hist_counts], axis=0)
ax.plot(a[1:], hist_counts, '--', marker = 'd', color='k')
scale_mm = cswl.tmmToScale(cosmo, dt_m_arr)
pal1 = sns.color_palette("Spectral", len(scale_mm)+1).as_hex()
for j, l in enumerate(scale_mm):
ax.vlines(l, np.min(hist_all_r), np.max(hist_all_r), colors=pal1[j], label=r'$t_{\rm MM}$ = %.1f Gyr'%dt_m_arr[j])
setLabel(ax, r'Scale factor, $a$', r'Counts', '', 'default',[np.min(hist_all_r), np.max(hist_all_r)], legend=False)
ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left', frameon=False)
ax.set_yscale('log')
return
def plotNpSep(ax, hd_z_halo, pairs_all, color, label, mec, errorbars = True):
"""
Function plots the n_p as a function of separation
"""
pairs_all = np.array(pairs_all)
# get shell volume and projected radius bins [Mpc]
r_p, shell_volume = aimm.shellVolume()
# get number density of pairs with and without selection cuts
n_pairs, n_pairs_err = cswl.nPairsToFracPairs(hd_z_halo, pairs_all)
# changing all unit to kpc
r_p_kpc, n_pairs = 1e3*r_p[1:len(n_pairs)+1], n_pairs
# plotting the results
ax.plot( r_p_kpc , n_pairs, 'd', mec = mec, ms = 10, color=color, label=label)
# errorbars
if errorbars:
n_pairs_err = np.array(n_pairs_err)
ax.errorbar(r_p_kpc , np.array(n_pairs), yerr=n_pairs_err, ecolor=mec, fmt='none', capsize=4.5)
return ax, n_pairs, n_pairs_err
def plotFracNdensityPairs(hd_z_halo, pairs_all, pairs_mm_dv_all, pairs_selected_all, plot_selected_pairs=True):
"""
Function to plot the fractional number density of pairs for different selection criteria
"""
flare = sns.color_palette("pastel", 5).as_hex()
mec = ['k', '#05ad2c', '#db5807', '#a30a26', 'b']
fig, ax = plt.subplots(1,1,figsize=(5,4))
# plotting the 4 cases with the 4 different cuts
ax, n_pairs, n_pairs_err = plotNpSep(ax, hd_z_halo, pairs_all[1], 'k', r' $\mathbf{\Gamma}_{m;\ \Delta v;\ t_{\rm MM};\ \tilde{X}_{\rm off}}(r)\ $', mec[0])
ax, n_mm_dv_pairs, n_pairs_mm_dv_err = plotNpSep(ax, hd_z_halo, pairs_mm_dv_all[1], flare[3], r'$\mathbf{\Gamma}_{t_{\rm MM};\ \tilde{X}_{\rm off}}(r|\ m;\ \Delta v)$', mec[3])
if plot_selected_pairs:
ax, n_selected_pairs, n_selected_err = plotNpSep(ax, hd_z_halo, pairs_selected_all[1], flare[2], r'$\mathbf{\Gamma}(r|\ m;\ \Delta v;\ t_{\rm MM};\ \tilde{X}_{\rm off} )$'+'\n'+r'$t_{\rm MM} \in [0.6-1.2]$ Gyr, $\tilde{X}_{\rm off} \in [0.17, 0.54]$', mec[1])
ax.set_yscale("log")
setLabel(ax, r'Separation, $r$ [kpc]', r'$\mathbf{\Gamma}(r)$ [Mpc$^{-3}$]', '', 'default', 'default', legend=False)
ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left', fontsize=15, frameon=False)
pairs_arr = np.array([n_pairs, n_mm_dv_pairs, n_selected_pairs], dtype=object)
pairs_arr_err = np.array([n_pairs_err, n_pairs_mm_dv_err, n_selected_err], dtype=object)
return pairs_arr, pairs_arr_err, ax
def plotCumulativeDist(vol, dt_m_arr, pairs_mm_all, pairs_mm_dv_all, n_pairs_mm_dt_all, n_pairs_mm_dv_dt_all, param = 't_mm'):
"""
Function to plot the cumulative number of pairs for the total vol (<z=2) for pairs with dz and mass ratio criteria
"""
# get shell volume and projected radius bins [Mpc]
r_p, _ = aimm.shellVolume()
fig, ax = plt.subplots(1,2,figsize=(17,6))
pal = sns.color_palette("coolwarm", len(dt_m_arr)+1).as_hex()
ax[0].plot( (1e3*r_p[1:]), (pairs_mm_all[1][1:]/(2*vol)), 'X', color='k', label='No criterion')
ax[1].plot( (1e3*r_p[1:]), (pairs_mm_dv_all[1][1:]/(2*vol)), 'X', color='k', label='No criterion')
for t_idx in range(len(dt_m_arr)):
np_mm_dt, np_mm_dv_dt = n_pairs_mm_dt_all[t_idx], n_pairs_mm_dv_dt_all[t_idx]
if param == 't_mm':
label = r'$t_{\rm MM} \in$ %.1f-%.1f Gyr'%(dt_m_arr[t_idx][0], dt_m_arr[t_idx][1])
else:
label = r'$\tilde{X}_{\rm off} \in$ %.1f-%.1f Gyr'%(dt_m_arr[t_idx][0], dt_m_arr[t_idx][1])
ax[0].plot( (1e3*r_p[1:]), (np_mm_dt[1:]/(2*vol)), 'kX', label = label, color=pal[t_idx])
ax[1].plot( (1e3*r_p[1:]), (np_mm_dv_dt[1:]/(2*vol)), 'kX', color=pal[t_idx])
ax[0].set_yscale('log')
ax[1].set_yscale('log')
setLabel(ax[0], r'Separation, $r$ [kpc]', 'Cumulative number of halo pairs\n'+r'[Mpc$^{-3}$]', r'Mass ratio 3:1, $\Delta z_{\rm R, S} < 10^{-3}$', 'default', 'default', legend=False)
setLabel(ax[1], r'Separation, $r$ [kpc]', r'', 'Mass ratio 3:1', 'default', 'default', legend=False)
ax[0].legend(bbox_to_anchor=(-0.5, -0.7), loc='lower left', ncol=4, frameon=False)
return pal
def plotParameterDistributions(xoff_all, string=r'$\tilde{X}_{\rm off}$', xmax=5, filestring='xoff'):
"""
Function to plot the parameter distribution i.e. SF and PDF
"""
fig, ax = plt.subplots(1,1,figsize=(7,6))
sf_xoff = norm.sf(np.sort(xoff_all))
if string == r'$\tilde{X}_{\rm off}$':
ax.plot(np.sort(xoff_all), sf_xoff, 'r-', label=r'Survival Function of '+string)
xmax = np.max(xoff_all)
else:
ax.plot(np.sort(xoff_all), 1-sf_xoff, 'r-', label=r'CDF of '+string)
pdf_xoff = norm.pdf(np.sort(xoff_all))
ax.plot(np.sort(xoff_all), pdf_xoff, 'k-', label=r'PDF of '+string)
setLabel(ax, string, 'Distribution of '+string, '', [np.min(xoff_all), xmax], 'default', legend=True)
plt.savefig('../figures/'+filestring+'_function.png', facecolor='w', edgecolor='w', bbox_inches='tight')
return ax
def plotContour(u_pix, matrix_2D, xmin=10, xmax=150, ymin=0, ymax=2, ax=None, cmap='YlGnBu'):
"""
Function plots a contour map
@u_pix :: number of pixels in the FOV
@Returns :: 2D matrix
"""
if ax == None:
fig, ax = plt.subplots(1,1,figsize=(7,6))
if isinstance(u_pix, (int, float)):
X, Y = np.meshgrid(np.linspace(0, u_pix, u_pix), np.linspace(0, u_pix, u_pix))
if isinstance(u_pix, (list, tuple, np.ndarray)): # if FOV is a rectangle
X, Y = np.meshgrid(np.linspace(xmin, xmax, u_pix[0]), np.linspace(ymin, ymax, u_pix[1]))
plot = ax.contourf(X, Y, matrix_2D, cmap=cmap, origin='image')
return ax, plot
def plotModelResults(ax, hd_halo, pairs_all, pairs_selected, vol):
"""
Plots the models generated for bins of Tmm and Xoff
"""
# get shell volume and projected radius bins [Mpc]
r_p, shell_volume = aimm.shellVolume()
# plotting the cumulative pairs
norm = vol*len(hd_halo)
np_all, np_selected = pairs_all/norm, pairs_selected[1]/norm
ax[0].plot( (1e3*r_p), (np_selected), 'rX', ls = '--', ms=9, label='Selected pairs')
ax[0].plot( (1e3*r_p), (np_all), 'kX', ls = '--', label = 'All pairs', ms = 9)
setLabel(ax[0], r'', r'Cumulative $n_{\rm halo\ pairs}}$ [Mpc$^{-3}$]', '', 'default', 'default', legend=True)
# plotting the pairs in bins of radius
np_all_bins, np_all_bins_err = cswl.nPairsToFracPairs(hd_halo, pairs_all)
np_selected_bins, np_selected_bins_err = cswl.nPairsToFracPairs(hd_halo, pairs_selected[1])
_ = plotFpairs(ax[1], r_p, np_all_bins, np_all_bins_err, label = 'All pairs', color='k')
_ = plotFpairs(ax[1], r_p, np_selected_bins, np_selected_bins_err, label = 'Selected pairs')
ax[1].set_yscale('log')
setLabel(ax[1], r'', r'$n_{\rm halo\ pairs}}$ [Mpc$^{-3}$]', '', 'default', 'default', legend=True)
# plotting the pairs in bins with respect to the control
_ = plotFpairs(ax[2], r_p, np_selected_bins/np_all_bins, np_selected_bins_err, label='wrt all pairs', color='orange')
setLabel(ax[2], r'Separation, $r$ [kpc]', r'Fraction of pairs, $f_{\rm halo\ pairs}}$ ', '', 'default', 'default', legend=False)
return np_selected_bins/np_all_bins | 43.193651 | 269 | 0.648317 |
6e824c90d5cc97b09e96bf2d9fa8d40cff2f3778 | 1,797 | py | Python | goatools/gosubdag/utils.py | camiloaruiz/goatools | 3da97251ccb6c5e90b616c3f625513f8aba5aa10 | [
"BSD-2-Clause"
] | null | null | null | goatools/gosubdag/utils.py | camiloaruiz/goatools | 3da97251ccb6c5e90b616c3f625513f8aba5aa10 | [
"BSD-2-Clause"
] | null | null | null | goatools/gosubdag/utils.py | camiloaruiz/goatools | 3da97251ccb6c5e90b616c3f625513f8aba5aa10 | [
"BSD-2-Clause"
] | null | null | null | """Small lightweight utilities used frequently in GOATOOLS."""
__copyright__ = "Copyright (C) 2016-2018, DV Klopfenstein, H Tang, All rights reserved."
__author__ = "DV Klopfenstein"
def extract_kwargs(args, exp_keys, exp_elems):
"""Return user-specified keyword args in a dictionary and a set (for True/False items)."""
arg_dict = {} # For arguments that have values
arg_set = set() # For arguments that are True or False (present in set if True)
for key, val in args.items():
if exp_keys is not None and key in exp_keys and val:
arg_dict[key] = val
elif exp_elems is not None and key in exp_elems and val:
arg_set.add(key)
return {'dict':arg_dict, 'set':arg_set}
def get_kwargs_set(args, exp_elem2dflt):
"""Return user-specified keyword args in a dictionary and a set (for True/False items)."""
arg_set = set() # For arguments that are True or False (present in set if True)
# Add user items if True
for key, val in args.items():
if exp_elem2dflt is not None and key in exp_elem2dflt and val:
arg_set.add(key)
# Add defaults if needed
for key, dfltval in exp_elem2dflt.items():
if dfltval and key not in arg_set:
arg_set.add(key)
return arg_set
def get_kwargs(args, exp_keys, exp_elems):
"""Return user-specified keyword args in a dictionary and a set (for True/False items)."""
arg_dict = {} # For arguments that have values
for key, val in args.items():
if exp_keys is not None and key in exp_keys and val:
arg_dict[key] = val
elif exp_elems is not None and key in exp_elems and val:
arg_dict[key] = True
return arg_dict
# Copyright (C) 2016-2018, DV Klopfenstein, H Tang, All rights reserved.
| 41.790698 | 94 | 0.668893 |
6e82b8d1720684c00d864fb512765fbff3379ce5 | 309 | py | Python | nicos_ess/ymir/setups/forwarder.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 1 | 2021-03-26T10:30:45.000Z | 2021-03-26T10:30:45.000Z | nicos_ess/ymir/setups/forwarder.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 91 | 2020-08-18T09:20:26.000Z | 2022-02-01T11:07:14.000Z | nicos_ess/ymir/setups/forwarder.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 3 | 2020-08-04T18:35:05.000Z | 2021-04-16T11:22:08.000Z | description = 'Monitors the status of the Forwarder'
devices = dict(
KafkaForwarder=device(
'nicos_ess.devices.forwarder.EpicsKafkaForwarder',
description='Monitors the status of the Forwarder',
statustopic='UTGARD_forwarderStatus',
brokers=['172.30.242.20:9092']),
)
| 30.9 | 59 | 0.68932 |
6e83557731c2fd4923e8fa481bc7d1048e5e106e | 985 | py | Python | codetree/cli.py | slank/codetree | c1aad059ad31aa1b3cca80a89861c659fce217ac | [
"MIT"
] | 2 | 2015-03-16T11:46:28.000Z | 2017-04-01T13:58:47.000Z | codetree/cli.py | slank/codetree | c1aad059ad31aa1b3cca80a89861c659fce217ac | [
"MIT"
] | null | null | null | codetree/cli.py | slank/codetree | c1aad059ad31aa1b3cca80a89861c659fce217ac | [
"MIT"
] | null | null | null | from argparse import ArgumentParser
import logging
from .config import Config
import sys
| 30.78125 | 82 | 0.655838 |
6e85d5b6b7bc4a9b52702783da32bcd642bd2255 | 5,862 | py | Python | notebooks/utils.py | cognoma/ml-workers | 781763c8361d49023222c7349350c3c4774ce4fa | [
"BSD-3-Clause"
] | null | null | null | notebooks/utils.py | cognoma/ml-workers | 781763c8361d49023222c7349350c3c4774ce4fa | [
"BSD-3-Clause"
] | 13 | 2017-01-31T22:54:03.000Z | 2021-02-02T21:42:33.000Z | notebooks/utils.py | cognoma/ml-workers | 781763c8361d49023222c7349350c3c4774ce4fa | [
"BSD-3-Clause"
] | 7 | 2017-06-29T14:19:11.000Z | 2018-04-08T12:06:21.000Z | """
Methods for building Cognoma mutation classifiers
Usage - Import only
"""
import pandas as pd
from sklearn.metrics import roc_curve, roc_auc_score
import plotnine as gg
def get_model_coefficients(classifier, feature_set, covariate_names):
"""
Extract the feature names and associate them with the coefficient values
in the final classifier object.
* Only works for expressions only model with PCA, covariates only model,
and a combined model
* Assumes the PCA features come before any covariates that are included
* Sorts the final dataframe by the absolute value of the coefficients
Args:
classifier: the final sklearn classifier object
feature_set: string of the model's name {expressions, covariates, full}
covariate_names: list of the names of the covariate features matrix
Returns:
pandas.DataFrame: mapping of feature name to coefficient value
"""
import pandas as pd
import numpy as np
coefs = classifier.coef_[0]
if feature_set == 'expressions':
features = ['PCA_%d' % cf for cf in range(len(coefs))]
elif feature_set == 'covariates':
features = covariate_names
else:
features = ['PCA_%d' % cf for cf in range(len(coefs) - len(covariate_names))]
features.extend(covariate_names)
coef_df = pd.DataFrame({'feature': features, 'weight': coefs})
coef_df['abs'] = coef_df['weight'].abs()
coef_df = coef_df.sort_values('abs', ascending=False)
coef_df['feature_set'] = feature_set
return coef_df
def get_genes_coefficients(pca_object, classifier_object,
expression_df, expression_genes_df,
num_covariates=None):
"""Identify gene coefficients from classifier after pca.
Args:
pca_object: The pca object from running pca on the expression_df.
classifier_object: The logistic regression classifier object.
expression_df: The original (pre-pca) expression data frame.
expression_genes_df: The "expression_genes" dataframe used for gene
names.
num_covariates: Optional, only needed if PCA was only performed on a
subset of the features. This should be the number of
features that PCA was not performed on. This function
assumes that the covariates features were at the end.
Returns:
gene_coefficients_df: A dataframe with entreze gene-ID, gene name,
coefficient abbsolute value of coefficient, and
gene description. The dataframe is sorted by
absolute value of coefficient.
"""
# Get the classifier coefficients.
if num_covariates:
coefficients = classifier_object.coef_[0][0:-num_covariates]
else:
coefficients = classifier_object.coef_[0]
# Get the pca weights
weights = pca_object.components_
# Combine the coefficients and weights
gene_coefficients = weights.T @ coefficients.T
# Create the dataframe with correct index
gene_coefficients_df = pd.DataFrame(gene_coefficients, columns=['weight'])
gene_coefficients_df.index = expression_df.columns
gene_coefficients_df.index.name = 'entrez_id'
expression_genes_df.index = expression_genes_df.index.map(str)
# Add gene symbol and description
gene_coefficients_df['symbol'] = expression_genes_df['symbol']
gene_coefficients_df['description'] = expression_genes_df['description']
# Add absolute value and sort by highest absolute value.
gene_coefficients_df['abs'] = gene_coefficients_df['weight'].abs()
gene_coefficients_df.sort_values(by='abs', ascending=False, inplace=True)
# Reorder columns
gene_coefficients_df = gene_coefficients_df[['symbol', 'weight', 'abs',
'description']]
return(gene_coefficients_df)
def select_feature_set_columns(X, feature_set, n_covariates):
"""
Select the feature set for the different models within the pipeline
"""
if feature_set == 'covariates':
return X[:, :n_covariates]
if feature_set == 'expressions':
return X[:, n_covariates:]
raise ValueError('feature_set not supported: {}'.format(feature_set))
| 42.788321 | 85 | 0.638519 |
6e85eafe88b2abc4b10f2eb6623ed07ecab6567b | 1,740 | py | Python | docs/fossil-help-cmd.py | smitty1eGH/pyphlogiston | 5134be190cdb31ace04ac5ce2e699a48e54e036e | [
"MIT"
] | null | null | null | docs/fossil-help-cmd.py | smitty1eGH/pyphlogiston | 5134be190cdb31ace04ac5ce2e699a48e54e036e | [
"MIT"
] | null | null | null | docs/fossil-help-cmd.py | smitty1eGH/pyphlogiston | 5134be190cdb31ace04ac5ce2e699a48e54e036e | [
"MIT"
] | null | null | null | from subprocess import run
cmds = [
"3-way-merge",
"ci",
"help",
"push",
"stash",
"add",
"clean",
"hook",
"rebuild",
"status",
"addremove",
"clone",
"http",
"reconstruct",
"sync",
"alerts",
"close",
"import",
"redo",
"tag",
"all",
"co",
"info",
"remote",
"tarball",
"amend",
"commit",
"init",
"remote-url",
"ticket",
"annotate",
"configuration",
"interwiki",
"rename",
"timeline",
"artifact",
"dbstat",
"json",
"reparent",
"tls-config",
"attachment",
"deconstruct",
"leaves",
"revert",
"touch",
"backoffice",
"delete",
"login-group",
"rm",
"ui",
"backup",
"descendants",
"ls",
"rss",
"undo",
"bisect",
"diff",
"md5sum",
"scrub",
"unpublished",
"blame",
"export",
"merge",
"search",
"unset",
"branch",
"extras",
"mv",
"server",
"unversioned",
"bundle",
"finfo",
"new",
"settings",
"update",
"cache",
"forget",
"open",
"sha1sum",
"user",
"cat",
"fts-config",
"pikchr",
"sha3sum",
"uv",
"cgi",
"gdiff",
"praise",
"shell",
"version",
"changes",
"git",
"publish",
"sql",
"whatis",
"chat",
"grep",
"pull",
"sqlar",
"wiki",
"checkout",
"hash-policy",
"purge",
"sqlite3",
"zip",
]
with open("fossile-cmds-help.org", "w") as f:
for c in cmds:
d = run(
["/home/osboxes/src/fossil-snapshot-20210429/fossil", "help", c],
capture_output=True,
)
f.write(d.stdout.decode("utf-8"))
| 14.745763 | 77 | 0.440805 |
6e89094dd4c599ed774bc54e2865f3ed2293d233 | 257 | bzl | Python | internal/copts.bzl | zaucy/bzlws | a8f3e4b0bc168059ec92971b1ea7c214db2c5454 | [
"MIT"
] | 4 | 2021-07-21T01:43:50.000Z | 2021-11-18T03:23:18.000Z | internal/copts.bzl | zaucy/bzlws | a8f3e4b0bc168059ec92971b1ea7c214db2c5454 | [
"MIT"
] | null | null | null | internal/copts.bzl | zaucy/bzlws | a8f3e4b0bc168059ec92971b1ea7c214db2c5454 | [
"MIT"
] | 1 | 2022-02-03T07:53:17.000Z | 2022-02-03T07:53:17.000Z | _msvc_copts = ["/std:c++17"]
_clang_cl_copts = ["/std:c++17"]
_gcc_copts = ["-std=c++17"]
copts = select({
"@bazel_tools//tools/cpp:msvc": _msvc_copts,
"@bazel_tools//tools/cpp:clang-cl": _clang_cl_copts,
"//conditions:default": _gcc_copts,
})
| 25.7 | 56 | 0.649805 |
6e8aa5fdaccdc2cf8e079b7b4e650e213a55472a | 1,154 | py | Python | monitor.py | projectsbyif/trillian-demo-audit | 5bb08ae3c359698d8beb47ced39d21e793539396 | [
"Apache-2.0"
] | null | null | null | monitor.py | projectsbyif/trillian-demo-audit | 5bb08ae3c359698d8beb47ced39d21e793539396 | [
"Apache-2.0"
] | 1 | 2021-06-02T02:13:46.000Z | 2021-06-02T02:13:46.000Z | monitor.py | projectsbyif/trillian-demo-audit | 5bb08ae3c359698d8beb47ced39d21e793539396 | [
"Apache-2.0"
] | null | null | null | import logging
import sys
from trillian import TrillianLog
from print_helper import Print
from pprint import pprint
if __name__ == '__main__':
main(sys.argv)
| 26.227273 | 70 | 0.707972 |
6e8b21d90213008722c8b31b5d6059ea9e59aa07 | 875 | py | Python | src/geocurrency/units/urls.py | OpenPrunus/geocurrency | 23cc075377d47ac631634cd71fd0e7d6b0a57bad | [
"MIT"
] | 5 | 2021-01-28T16:45:49.000Z | 2021-08-15T06:47:17.000Z | src/geocurrency/units/urls.py | OpenPrunus/geocurrency | 23cc075377d47ac631634cd71fd0e7d6b0a57bad | [
"MIT"
] | 8 | 2020-10-01T15:12:45.000Z | 2021-10-05T14:45:33.000Z | src/geocurrency/units/urls.py | OpenPrunus/geocurrency | 23cc075377d47ac631634cd71fd0e7d6b0a57bad | [
"MIT"
] | 2 | 2021-01-28T16:43:16.000Z | 2021-10-05T14:25:25.000Z | """
Units module URLs
"""
from django.conf.urls import url, include
from django.urls import path
from rest_framework import routers
from .viewsets import UnitSystemViewset, UnitViewset, \
ConvertView, CustomUnitViewSet
from geocurrency.calculations.viewsets import ValidateViewSet, CalculationView
app_name = 'units'
router = routers.DefaultRouter()
router.register(r'', UnitSystemViewset, basename='unit_systems')
router.register(r'(?P<system_name>\w+)/units',
UnitViewset, basename='units')
router.register(r'(?P<system_name>\w+)/custom',
CustomUnitViewSet, basename='custom')
urlpatterns = [
path('convert/', ConvertView.as_view()),
path('<str:unit_system>/formulas/validate/', ValidateViewSet.as_view()),
path('<str:unit_system>/formulas/calculate/', CalculationView.as_view()),
url(r'^', include(router.urls)),
]
| 31.25 | 78 | 0.726857 |
6e8ba5d71602dfafef83788dd25424753fb81302 | 22 | py | Python | rtk/_reports_/__init__.py | rakhimov/rtk | adc35e218ccfdcf3a6e3082f6a1a1d308ed4ff63 | [
"BSD-3-Clause"
] | null | null | null | rtk/_reports_/__init__.py | rakhimov/rtk | adc35e218ccfdcf3a6e3082f6a1a1d308ed4ff63 | [
"BSD-3-Clause"
] | null | null | null | rtk/_reports_/__init__.py | rakhimov/rtk | adc35e218ccfdcf3a6e3082f6a1a1d308ed4ff63 | [
"BSD-3-Clause"
] | 2 | 2020-04-03T04:14:42.000Z | 2021-02-22T05:30:35.000Z | from tabular import *
| 11 | 21 | 0.772727 |
6e8c6eb072fed5f8eeeb59211773c40061897cf1 | 383 | py | Python | backend/urls.py | starmarek/organize-me-2 | bd9b73d3e6d9a4ebc4cbb8a20c97729bdc6b1377 | [
"MIT"
] | 1 | 2021-03-09T20:49:51.000Z | 2021-03-09T20:49:51.000Z | backend/urls.py | starmarek/organize-me-2 | bd9b73d3e6d9a4ebc4cbb8a20c97729bdc6b1377 | [
"MIT"
] | 7 | 2021-05-08T11:05:15.000Z | 2021-05-08T11:12:27.000Z | backend/urls.py | starmarek/organize-me-2 | bd9b73d3e6d9a4ebc4cbb8a20c97729bdc6b1377 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import include, path
from rest_framework import routers
from .shifts.views import ShiftView
from .workers.views import WorkerView
router = routers.DefaultRouter()
router.register("workers", WorkerView)
router.register("shifts", ShiftView)
urlpatterns = [
path("admin/", admin.site.urls),
path("", include(router.urls)),
]
| 23.9375 | 38 | 0.762402 |
6e8d075cdc130105dd93cb71efed865a3cfcfbc8 | 257 | py | Python | ssk/alpha/api.py | jobliz/solid-state-kinetics | c5767b400b19bd0256c806001664f0b369718bab | [
"MIT"
] | 2 | 2017-03-08T21:32:11.000Z | 2017-07-19T03:27:18.000Z | ssk/alpha/api.py | jobliz/solid-state-kinetics | c5767b400b19bd0256c806001664f0b369718bab | [
"MIT"
] | null | null | null | ssk/alpha/api.py | jobliz/solid-state-kinetics | c5767b400b19bd0256c806001664f0b369718bab | [
"MIT"
] | null | null | null | from __future__ import division
import numpy as np
from scipy import integrate
__all__ = ['area', 'simple']
| 17.133333 | 72 | 0.669261 |
6e8f20f780d781f8cdc23f8a2e62a4a9d0aaaf14 | 6,451 | py | Python | randominette.py | Dutesier/randominette | 2260c0f521d9fcc97f30a8cceb36c94dbee3d474 | [
"MIT"
] | null | null | null | randominette.py | Dutesier/randominette | 2260c0f521d9fcc97f30a8cceb36c94dbee3d474 | [
"MIT"
] | null | null | null | randominette.py | Dutesier/randominette | 2260c0f521d9fcc97f30a8cceb36c94dbee3d474 | [
"MIT"
] | 2 | 2022-01-19T00:27:59.000Z | 2022-01-19T03:46:21.000Z | # **************************************************************************** #
# #
# ::: :::::::: #
# randominette.py :+: :+: :+: #
# +:+ +:+ +:+ #
# By: ayalla, sotto & dutesier +#+ +:+ +#+ #
# +#+#+#+#+#+ +#+ #
# Created: 2022/01/13 18:14:29 by dareias- #+# #+# #
# Updated: 2022/01/20 13:10:47 by dareias- ### ########.fr #
# #
# **************************************************************************** #
import requests
import json
import random
import sys
import pprint
from decouple import config
import time
if __name__ == '__main__':
main()
| 38.39881 | 148 | 0.509068 |
6e918c5815dd4774b7932aa1ec3b9fffa1176641 | 750 | py | Python | newsman/factories.py | acapitanelli/newsman | 3f109f42afe6131383fba1e118b7b9457d76096b | [
"MIT"
] | null | null | null | newsman/factories.py | acapitanelli/newsman | 3f109f42afe6131383fba1e118b7b9457d76096b | [
"MIT"
] | null | null | null | newsman/factories.py | acapitanelli/newsman | 3f109f42afe6131383fba1e118b7b9457d76096b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""This module provides a way to initialize components for processing
pipeline.
Init functions are stored into a dictionary which can be used by `Pipeline` to
load components on demand.
"""
from .pipeline import Byte2html, Html2text, Html2image, Html2meta, Text2title
def build_factories():
"""Creates default factories for Processor."""
factories = {
'byte2html': lambda config: Byte2html(config),
'html2text': lambda config: Html2text(config),
'html2image': lambda config: Html2image(config),
'html2meta': lambda config: Html2meta(config),
'text2title': lambda config: Text2title(config),
'text2title': lambda config: Text2title(config)
}
return factories
| 32.608696 | 78 | 0.698667 |
6e91c4809b083bd8e190189c7a4286818bc08e69 | 3,673 | py | Python | deprecated.py | thu-fit/DCGAN-anime | da549bd45a6ca3c4c5a8894945d3242c59f823a0 | [
"MIT"
] | null | null | null | deprecated.py | thu-fit/DCGAN-anime | da549bd45a6ca3c4c5a8894945d3242c59f823a0 | [
"MIT"
] | null | null | null | deprecated.py | thu-fit/DCGAN-anime | da549bd45a6ca3c4c5a8894945d3242c59f823a0 | [
"MIT"
] | null | null | null |
def sampler(self, z, y=None):
'''generate iamge given z'''
with tf.variable_scope("generator") as scope:
# we hope the weights defined in generator to be reused
scope.reuse_variables()
if not self.y_dim:
s_h, s_w = self.output_height, self.output_width
s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2)
s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2)
s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2)
s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2)
# project `z` and reshape
h0 = tf.reshape(
linear(z, self.gf_dim*8*s_h16*s_w16, 'g_h0_lin'),
[-1, s_h16, s_w16, self.gf_dim * 8])
h0 = tf.nn.relu(self.g_bn0(h0, train=False))
h1 = deconv2d(h0, [batch_size, s_h8, s_w8, self.gf_dim*4], name='g_h1')
h1 = tf.nn.relu(self.g_bn1(h1, train=False))
h2 = deconv2d(h1, [batch_size, s_h4, s_w4, self.gf_dim*2], name='g_h2')
h2 = tf.nn.relu(self.g_bn2(h2, train=False))
h3 = deconv2d(h2, [batch_size, s_h2, s_w2, self.gf_dim*1], name='g_h3')
h3 = tf.nn.relu(self.g_bn3(h3, train=False))
h4 = deconv2d(h3, [batch_size, s_h, s_w, self.c_dim], name='g_h4')
return tf.nn.tanh(h4)
else:
s_h, s_w = self.output_height, self.output_width
s_h2, s_h4 = int(s_h/2), int(s_h/4)
s_w2, s_w4 = int(s_w/2), int(s_w/4)
# yb = tf.reshape(y, [-1, 1, 1, self.y_dim])
yb = tf.reshape(y, [batch_size, 1, 1, self.y_dim])
z = concat([z, y], 1)
h0 = tf.nn.relu(self.g_bn0(linear(z, self.gfc_dim, 'g_h0_lin'), train=False))
h0 = concat([h0, y], 1)
h1 = tf.nn.relu(self.g_bn1(
linear(h0, self.gf_dim*2*s_h4*s_w4, 'g_h1_lin'), train=False))
h1 = tf.reshape(h1, [batch_size, s_h4, s_w4, self.gf_dim * 2])
h1 = conv_cond_concat(h1, yb)
h2 = tf.nn.relu(self.g_bn2(
deconv2d(h1, [batch_size, s_h2, s_w2, self.gf_dim * 2], name='g_h2'), train=False))
h2 = conv_cond_concat(h2, yb)
return tf.nn.sigmoid(deconv2d(h2, [batch_size, s_h, s_w, self.c_dim], name='g_h3'))
def sampler1(self, z, y=None, reuse=True):
'''Generate a given number of samples using z. The first dimension of z is the number of samples'''
with tf.variable_scope("generator") as scope:
# we hope the weights defined in generator to be reused
if reuse:
scope.reuse_variables()
num_samples = z.get_shape().as_list()[0]
s_h, s_w = self.output_height, self.output_width
s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2)
s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2)
s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2)
s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2)
# project `z` and reshape
h0 = tf.reshape(
linear(z, self.gf_dim*8*s_h16*s_w16, 'g_h0_lin'),
[-1, s_h16, s_w16, self.gf_dim * 8])
h0 = tf.nn.relu(self.g_bn0(h0, train=False))
h1 = deconv2d(h0, [num_samples, s_h8, s_w8, self.gf_dim*4], name='g_h1')
h1 = tf.nn.relu(self.g_bn1(h1, train=False))
h2 = deconv2d(h1, [num_samples, s_h4, s_w4, self.gf_dim*2], name='g_h2')
h2 = tf.nn.relu(self.g_bn2(h2, train=False))
h3 = deconv2d(h2, [num_samples, s_h2, s_w2, self.gf_dim*1], name='g_h3')
h3 = tf.nn.relu(self.g_bn3(h3, train=False))
h4 = deconv2d(h3, [num_samples, s_h, s_w, self.c_dim], name='g_h4')
return tf.nn.tanh(h4)
| 39.494624 | 103 | 0.613395 |
6e922f24956d34276912f3a429414da7e22eb9ef | 14,915 | py | Python | Prioritize/get_HPO_similarity_score.py | mbosio85/ediva | c0a1aa4dd8951fa659483164c3706fb9374beb95 | [
"MIT"
] | 1 | 2021-02-23T07:42:42.000Z | 2021-02-23T07:42:42.000Z | Prioritize/get_HPO_similarity_score.py | mbosio85/ediva | c0a1aa4dd8951fa659483164c3706fb9374beb95 | [
"MIT"
] | null | null | null | Prioritize/get_HPO_similarity_score.py | mbosio85/ediva | c0a1aa4dd8951fa659483164c3706fb9374beb95 | [
"MIT"
] | 1 | 2019-09-26T01:21:06.000Z | 2019-09-26T01:21:06.000Z | ## how we measure the similarity between two lists w/ IC per each node
## we have a DAG strucutre
## goal is for each Gene !! output a 'semantic distance'
# based on https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2756558/ [but different]
# with this two equal nodes will have distance '0'
# maximum distance is -2log(1/tot) ~~ 25
import networkx as nx
import cPickle as pickle
import numpy as np
import math
import random
| 32.852423 | 158 | 0.578947 |
6e942a1e8c0fd4f03d779fd36629d8f97651ff14 | 364 | py | Python | tests/tfgraph/utils/test_datasets.py | tfgraph/tfgraph | 19ae968b3060275c631dc601757646abaf1f58a1 | [
"Apache-2.0"
] | 4 | 2017-07-23T13:48:35.000Z | 2021-12-03T18:11:50.000Z | tests/tfgraph/utils/test_datasets.py | tfgraph/tfgraph | 19ae968b3060275c631dc601757646abaf1f58a1 | [
"Apache-2.0"
] | 21 | 2017-07-23T13:15:20.000Z | 2020-09-28T02:13:11.000Z | tests/tfgraph/utils/test_datasets.py | tfgraph/tfgraph | 19ae968b3060275c631dc601757646abaf1f58a1 | [
"Apache-2.0"
] | 1 | 2017-07-28T10:28:04.000Z | 2017-07-28T10:28:04.000Z | import tfgraph
| 24.266667 | 81 | 0.653846 |
6e94f020370af25596b5a73fe263fae2cf996278 | 668 | py | Python | deploy/virenv/lib/python2.7/site-packages/haystack/outputters/__init__.py | wangvictor2012/liuwei | 0a06f8fd56d78162f81f1e7e7def7bfdeb4472e1 | [
"BSD-3-Clause"
] | null | null | null | deploy/virenv/lib/python2.7/site-packages/haystack/outputters/__init__.py | wangvictor2012/liuwei | 0a06f8fd56d78162f81f1e7e7def7bfdeb4472e1 | [
"BSD-3-Clause"
] | null | null | null | deploy/virenv/lib/python2.7/site-packages/haystack/outputters/__init__.py | wangvictor2012/liuwei | 0a06f8fd56d78162f81f1e7e7def7bfdeb4472e1 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
:mod:`haystack.outputs` -- classes that create an output
==============================================================================
"""
from haystack import utils
| 27.833333 | 85 | 0.591317 |
6e9649858a66821226a8387a5c2ae25467b9d1c9 | 631 | py | Python | adminmgr/media/code/python/red3/BD_543_565_624_reducer.py | IamMayankThakur/test-bigdata | cef633eb394419b955bdce479699d0115d8f99c3 | [
"Apache-2.0"
] | 9 | 2019-11-08T02:05:27.000Z | 2021-12-13T12:06:35.000Z | adminmgr/media/code/config/BD_543_565_624_reducer.py | IamMayankThakur/test-bigdata | cef633eb394419b955bdce479699d0115d8f99c3 | [
"Apache-2.0"
] | 6 | 2019-11-27T03:23:16.000Z | 2021-06-10T19:15:13.000Z | adminmgr/media/code/python/red3/BD_543_565_624_reducer.py | IamMayankThakur/test-bigdata | cef633eb394419b955bdce479699d0115d8f99c3 | [
"Apache-2.0"
] | 4 | 2019-11-26T17:04:27.000Z | 2021-12-13T11:57:03.000Z | #!/usr/bin/python3
import sys
# f=open("reduce3.csv","w+")
di={}
for y in sys.stdin:
Record=list(map(str,y.split(",")))
if(len(Record)>3):
Record=[Record[0]+","+Record[1],Record[2],Record[3]]
s=int(Record[2][:-1])
if (Record[0],Record[1]) not in di:
di[(Record[0],Record[1])]=[s,1]
else:
di[(Record[0],Record[1])][0]+=s
di[(Record[0],Record[1])][1]+=1
dsr={}
for i in di:
sr=(di[i][0]*100)/di[i][1]
if i[0] not in dsr:
dsr[i[0]]=[]
else:
dsr[i[0]].append((i[1],sr,di[i][0]))
for i in sorted(dsr,key=lambda x:x):
j=sorted(dsr[i],key=lambda x:(-x[1],-x[2]))[0]
print(i,j[0],sep=",")
# f.write(i+","+j[0]+"\n")
| 24.269231 | 54 | 0.557845 |
6e9740ebd2a997095586f788ec3e7c7b37619818 | 9,622 | py | Python | hbgd_data_store_server/studies/management/commands/load_idx.py | pcstout/study-explorer | b49a6853d8155f1586360138ed7f87d165793184 | [
"Apache-2.0"
] | 2 | 2019-04-02T14:31:27.000Z | 2020-04-13T20:41:46.000Z | hbgd_data_store_server/studies/management/commands/load_idx.py | pcstout/study-explorer | b49a6853d8155f1586360138ed7f87d165793184 | [
"Apache-2.0"
] | 7 | 2019-08-07T14:44:54.000Z | 2020-06-05T17:30:51.000Z | hbgd_data_store_server/studies/management/commands/load_idx.py | pcstout/study-explorer | b49a6853d8155f1586360138ed7f87d165793184 | [
"Apache-2.0"
] | 1 | 2019-03-27T01:32:30.000Z | 2019-03-27T01:32:30.000Z | # Copyright 2017-present, Bill & Melinda Gates Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import os
import zipfile
import fnmatch
from pandas import read_csv
from django.core.management.base import BaseCommand, CommandError
from ...models import Study, Count, Variable, Domain, EMPTY_IDENTIFIERS
# Regex file pattern defining the naming convention of IDX files
FILE_PATTERN = r'^IDX_(\w*)\.csv'
# Suffixes of domain name, code and category columns
# e.g. LB domain columns are LBTEST, LBTESTCD and LBCAT
DOMAIN_FORMAT = '{domain}TEST'
DOMAIN_CODE_FORMAT = '{domain}TESTCD'
DOMAIN_CAT_FORMAT = '{domain}CAT'
def get_study(row, study_cache=None, **kwargs):
"""
Finds the study for an entry.
"""
study_id_field = kwargs['study_id_field']
if not study_cache:
study_cache = {}
study_id = row[study_id_field]
if study_id in EMPTY_IDENTIFIERS:
return None
elif study_id in study_cache:
return study_cache[study_id]
study, _ = Study.objects.get_or_create(study_id=study_id)
study_cache[study_id] = study
return study
def get_domain_variable(row, domain, variable_cache=None):
"""
Get a Variable model specifying the rows domain, category and
code.
"""
if not variable_cache:
variable_cache = {}
decode_idx = DOMAIN_FORMAT.format(domain=domain.code)
code_idx = DOMAIN_CODE_FORMAT.format(domain=domain.code)
cat_idx = DOMAIN_CAT_FORMAT.format(domain=domain.code)
code = row[code_idx]
if code in EMPTY_IDENTIFIERS:
return None
attrs = dict(domain=domain, code=code)
cache_key = (domain.id, code)
if cache_key in variable_cache:
return variable_cache[cache_key]
try:
var = Variable.objects.get(**attrs)
except Variable.DoesNotExist:
category = row.get(cat_idx)
if category not in EMPTY_IDENTIFIERS:
attrs['category'] = category
var = Variable.objects.create(label=row[decode_idx], **attrs)
variable_cache[cache_key] = var
return var
def get_qualifiers(row, valid_qualifiers, qualifier_cache=None):
"""
Extract qualifier variables from row
"""
if not qualifier_cache:
qualifier_cache = {}
qualifiers = []
for qualifier, qual_code, suffix in valid_qualifiers:
code = row.get(qual_code + suffix)
if code in EMPTY_IDENTIFIERS:
raise ValueError('Qualifiers cannot be empty')
elif isinstance(code, float) and code.is_integer():
code = int(code)
attrs = dict(domain=qualifier, code=str(code))
cache_key = (qualifier.id, str(code))
if cache_key in qualifier_cache:
qualifiers.append(qualifier_cache[cache_key])
continue
try:
var = Variable.objects.get(**attrs)
except Variable.DoesNotExist:
var = Variable.objects.create(label=row[qual_code],
**attrs)
qualifier_cache[cache_key] = var
qualifiers.append(var)
return qualifiers
def get_valid_qualifiers(columns):
"""
Returns a list of the valid qualifier columns.
"""
valid_qualifiers = []
qualifiers = Domain.objects.filter(is_qualifier=True)
for qual in qualifiers:
wildcard_re = fnmatch.translate(qual.code)
cols = [col for col in columns if re.match(wildcard_re, col)]
if not cols:
continue
elif len(cols) > 1:
raise Exception('Qualifier code must match only one column per file.')
qual_code = cols[0]
suffix_re = qual_code + r'(\w{1,})'
potential_suffixes = [re.match(suffix_re, col).group(1) for col in columns
if re.match(suffix_re, col)]
suffix = ''
if len(potential_suffixes) > 0:
suffix = potential_suffixes[0]
valid_qualifiers.append((qual, qual_code, suffix))
return valid_qualifiers
def process_idx_df(df, domain, **kwargs):
"""
Process an IDX csv file, creating Code, Count and Study
objects.
"""
count_subj_field = kwargs['count_subj_field']
count_obs_field = kwargs['count_obs_field']
study_id_field = kwargs['study_id_field']
for required in [study_id_field, count_subj_field, count_obs_field]:
if required not in df.columns:
raise ValueError('IDX file does not contain %s column, '
'skipping.' % required)
valid_qualifiers = get_valid_qualifiers(df.columns)
study_cache, variable_cache, qualifier_cache = {}, {}, {}
df = df.fillna('NaN')
for _, row in df.iterrows():
count = row[count_obs_field]
subjects = row[count_subj_field]
if any(c in EMPTY_IDENTIFIERS for c in (count, subjects)):
continue
try:
qualifiers = get_qualifiers(row, valid_qualifiers, qualifier_cache)
except ValueError:
continue
study = get_study(row, study_cache, **kwargs)
if not study:
continue
variable = get_domain_variable(row, domain, variable_cache)
if variable:
qualifiers = [variable] + qualifiers
query = Count.objects.create(count=count, subjects=subjects, study=study)
query.codes = qualifiers
query.save()
| 35.902985 | 84 | 0.610788 |
6e97ddc9ef075e7d004c1410ff22b946e2b0175d | 1,937 | py | Python | setup.py | hojinYang/neureca | b1eb7246b731b7a0c7264b47c1c27239b9fe1224 | [
"Apache-2.0"
] | 7 | 2021-08-24T14:34:33.000Z | 2021-12-10T12:43:50.000Z | setup.py | hojinYang/neureca | b1eb7246b731b7a0c7264b47c1c27239b9fe1224 | [
"Apache-2.0"
] | null | null | null | setup.py | hojinYang/neureca | b1eb7246b731b7a0c7264b47c1c27239b9fe1224 | [
"Apache-2.0"
] | 1 | 2021-09-10T17:50:38.000Z | 2021-09-10T17:50:38.000Z | from setuptools import setup, find_packages
with open("README.md", encoding="utf-8") as f:
long_description = f.read()
setup(
name="neureca",
version="0.0.1",
description="A framework for building conversational recommender systems",
long_description=long_description,
long_description_content_type="text/markdown",
author="Hojin Yang",
author_email="hojin.yang7@gmail.com",
url="https://github.com/hojinYang/neureca",
entry_points={
"console_scripts": [
"neureca-train = neureca.cmd:neureca_train_command",
],
},
install_requires=[
"click==7.1.2",
"Flask==1.1.2",
"joblib==1.0.1",
"numpy==1.20.2",
"pandas==1.2.3",
"pytorch-crf==0.7.2",
"pytorch-lightning==1.2.7",
"scikit-learn==0.24.1",
"scipy==1.6.2",
"sklearn==0.0",
"spacy==3.0.6",
"summarizers==1.0.4",
"tokenizers==0.10.2",
"toml==0.10.2",
"torch==1.8.1",
"TorchCRF==1.1.0",
"torchmetrics==0.3.1",
"tqdm==4.60.0",
"transformers==4.5.0",
"typer==0.3.2",
],
packages=find_packages(exclude=["demo-toronto"]),
python_requires=">=3",
package_data={"neureca": ["interface/static/*/*", "interface/templates/index.html"]},
zip_safe=False,
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: Libraries",
],
)
| 32.283333 | 89 | 0.565823 |
6e98642f2b6b958a07ac0e545cf862d4394aa56c | 786 | py | Python | Thread.PY/thread-rlock.py | Phoebus-Ma/Python-Helper | d880729f0bbfbc2b1503602fd74c9177ecd4e970 | [
"MIT"
] | null | null | null | Thread.PY/thread-rlock.py | Phoebus-Ma/Python-Helper | d880729f0bbfbc2b1503602fd74c9177ecd4e970 | [
"MIT"
] | null | null | null | Thread.PY/thread-rlock.py | Phoebus-Ma/Python-Helper | d880729f0bbfbc2b1503602fd74c9177ecd4e970 | [
"MIT"
] | null | null | null | ###
# Thread rlock test.
#
# License - MIT.
###
import time
from threading import Thread, RLock
# thread_test2 - Thread test2 function.
# }
# thread_test1 - Thread test1 function.
# }
# Main function.
# }
# Program entry.
if '__main__' == __name__:
main()
| 14.290909 | 64 | 0.619593 |
6e98aa2320fefc8b613e9eb26ab879e97d03ea24 | 1,319 | py | Python | api/python/tests/test_bingo_nosql.py | tsingdao-Tp/Indigo | b2d73faebb6a450e9b3d34fed553fad4f9d0012f | [
"Apache-2.0"
] | 204 | 2015-11-06T21:34:34.000Z | 2022-03-30T16:17:01.000Z | api/python/tests/test_bingo_nosql.py | tsingdao-Tp/Indigo | b2d73faebb6a450e9b3d34fed553fad4f9d0012f | [
"Apache-2.0"
] | 509 | 2015-11-05T13:54:43.000Z | 2022-03-30T22:15:30.000Z | api/python/tests/test_bingo_nosql.py | tsingdao-Tp/Indigo | b2d73faebb6a450e9b3d34fed553fad4f9d0012f | [
"Apache-2.0"
] | 89 | 2015-11-17T08:22:54.000Z | 2022-03-17T04:26:28.000Z | import shutil
import tempfile
from indigo.bingo import Bingo
from tests import TestIndigoBase
| 33.820513 | 87 | 0.64746 |
6e9910237b294e11a1a1bbded611300e71f69a4f | 3,932 | py | Python | src/core/src/tortuga/scripts/get_kit.py | sutasu/tortuga | 48d7cde4fa652346600b217043b4a734fa2ba455 | [
"Apache-2.0"
] | 33 | 2018-03-02T17:07:39.000Z | 2021-05-21T18:02:51.000Z | src/core/src/tortuga/scripts/get_kit.py | sutasu/tortuga | 48d7cde4fa652346600b217043b4a734fa2ba455 | [
"Apache-2.0"
] | 201 | 2018-03-05T14:28:24.000Z | 2020-11-23T19:58:27.000Z | src/core/src/tortuga/scripts/get_kit.py | sutasu/tortuga | 48d7cde4fa652346600b217043b4a734fa2ba455 | [
"Apache-2.0"
] | 23 | 2018-03-02T17:21:59.000Z | 2020-11-18T14:52:38.000Z | # Copyright 2008-2018 Univa Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=no-member
import json
import sys
from tortuga.exceptions.kitNotFound import KitNotFound
from tortuga.kit.kitCli import KitCli
from tortuga.wsapi.kitWsApi import KitWsApi
| 31.96748 | 74 | 0.559003 |
6e99d90082f82cff092fcb68582087a7ab692e17 | 2,264 | py | Python | examples.py | ThyagoFRTS/power-eletric | dd26cd5ffb2aca0741d8983e57351488badc64da | [
"MIT"
] | 1 | 2021-11-07T02:31:58.000Z | 2021-11-07T02:31:58.000Z | examples.py | ThyagoFRTS/power-eletric | dd26cd5ffb2aca0741d8983e57351488badc64da | [
"MIT"
] | null | null | null | examples.py | ThyagoFRTS/power-eletric | dd26cd5ffb2aca0741d8983e57351488badc64da | [
"MIT"
] | null | null | null | from power_sizing import calculate_power_luminance
from power_sizing import calculate_number_and_power_of_tugs
from conductor_sizing import conduction_capacity
from conductor_sizing import minimum_section
from conductor_sizing import voltage_drop
from conductor_sizing import harmonic_rate
from neutral_sizing import get_neutral_section
from protection_sizing import get_conductor_protection_section
import pathlib
#IMPORTANT: all inputs are in portuguese, remember this
# Calculate power luminance of an ambient
# inputs: Area (m^2)
calculate_power_luminance(12)
# Calculate power luminance of an ambient
# inputs: AmbientName (str), perimeter (m)
calculate_number_and_power_of_tugs('cozinha',13.3)
# Sizing conductor by capacity conduction
# inputs: power (Watts/VA), tension: optional (default 220), Potency-factor: optional (used if Watts, default 1)
# circuit_type: optional mono/tri (str) (default mono)
section1 = conduction_capacity(21000, fp=0.9 ,ft=0.87, fg=0.8, circuit_type='tri')
# Sizing conductor by section minimum
# inputs: Circuit type (str)
section2 = minimum_section('forca')
# Sizing conductor by voltage drop
# inputs: power (Watts/VA), distance in (m), fp: (default 1), circuit_type: optional 'mono'/'tri' (default 'mono')
# isolation_type = optional 0 to Non-Magnetic 1 to Magnetic (default 0), drop_rate: optional (default 0.04)
section3 = voltage_drop(13000,40, drop_rate=0.02, circuit_type='tri', fp = 0.75, isolation_type = 0)
# Sizing conductor by harmonic
# inputs: harmonics [I1, I3, I5...] circuit_type: optional 'tri'/'bi' (default 'tri')
section4, thd3 = harmonic_rate(harmonics = [100,60,45,30,20], fp = 1, ft=1, fg=1 , circuit_type = 'tri', installation_method = 'B1')
# Sizing neutral
# inputs: phase_section (mm), Ib: project current, balanced_circuit: optional bool (default True), circuit_type: optional 'mono'/'tri' (default 'mono')
neutral_section1 = get_neutral_section(95, 10, circuit_type = 'tri', index_THD3 = 0.14, balanced_circuit = True)
# Sizing protection
# inputs: phase_section (mm), Ib: Project current
neutral_section1 = get_neutral_section(95, 127, index_THD3 = 0.14, circuit_type = 'tri', balanced_circuit = True, installation_method = 'B1', ft=1, fg=1)
get_conductor_protection_section(95) | 48.170213 | 153 | 0.774293 |
6e9df45528e4294de8ca5838baa62293adbb002d | 784 | py | Python | myapp/migrations/0008_doctordata_specialist.py | sarodemayur55/Hospital_Management_Website | a90e64d2b02482d7ad69a807365bdc0abfca4212 | [
"Apache-2.0"
] | 1 | 2022-02-08T16:37:43.000Z | 2022-02-08T16:37:43.000Z | myapp/migrations/0008_doctordata_specialist.py | sarodemayur55/Hospital_Management_Website | a90e64d2b02482d7ad69a807365bdc0abfca4212 | [
"Apache-2.0"
] | null | null | null | myapp/migrations/0008_doctordata_specialist.py | sarodemayur55/Hospital_Management_Website | a90e64d2b02482d7ad69a807365bdc0abfca4212 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.1.6 on 2021-05-15 11:46
from django.db import migrations, models
| 27.034483 | 83 | 0.53699 |
6e9f10181a7ecfeffe5b3e63362769aa8677cc14 | 12,338 | py | Python | eventide/message.py | blakev/python-eventide | ef547a622c52eec8acb9d7ca4cc01fae0ab7bad0 | [
"MIT"
] | 1 | 2021-01-14T18:35:44.000Z | 2021-01-14T18:35:44.000Z | eventide/message.py | blakev/python-eventide | ef547a622c52eec8acb9d7ca4cc01fae0ab7bad0 | [
"MIT"
] | null | null | null | eventide/message.py | blakev/python-eventide | ef547a622c52eec8acb9d7ca4cc01fae0ab7bad0 | [
"MIT"
] | 2 | 2021-04-20T22:09:02.000Z | 2021-07-29T21:52:30.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# >>
# python-eventide, 2020
# LiveViewTech
# <<
from uuid import UUID, uuid4
from datetime import datetime
from operator import attrgetter
from functools import total_ordering
from dataclasses import (
field,
asdict,
fields,
dataclass,
_process_class,
make_dataclass,
)
from typing import (
Dict,
List,
Type,
Mapping,
Callable,
Optional,
NamedTuple,
)
from pydantic import BaseModel, Field
from eventide.utils import jdumps, jloads, dense_dict
from eventide._types import JSON
f_blank = Field(default=None)
def messagecls(
cls_=None,
*,
msg_meta: Type[Metadata] = Metadata,
init=True,
repr=True,
eq=True,
order=False,
unsafe_hash=False,
frozen=False,
) -> Type[Message]:
"""Decorator used to build a custom Message type, with the ability to bind
a custom Metadata class with additional fields. When these instances are built,
serialized, or de-serialized from the database all the correct fields will be
filled out with no interference on in-editor linters.
The parameters for this decorator copy @dataclass with the addition of ``msg_meta``
which allows the definition to have a custom Metadata class assigned to it.
All @messagecls decorated classes behave like normal dataclasses.
"""
# ensure this class definition follows basic guidelines
if not hasattr(msg_meta, '__dataclass_fields__'):
raise ValueError('custom message metadata class must be a @dataclass')
if not issubclass(msg_meta, Metadata):
raise ValueError('custom message metadata class must inherit eventide.Metadata')
# "wrap" the Metadata class with @dataclass so we don't have to on its definition
msg_meta = _process_class(msg_meta, True, False, True, False, False, False)
# mimic @dataclass functionality
if cls_ is None:
return wrap
return wrap(cls_)
message_cls = messagecls # alias
| 33.710383 | 92 | 0.623197 |
6e9f30208ea04fa7ad96c88e5f93a7fce170ab1e | 10,926 | py | Python | utils/minifier.py | MateuszDabrowski/elquent | 9ff9c57d01a8ade7ebc7a903f228d4b7ed7324c4 | [
"MIT"
] | 4 | 2021-05-26T19:48:31.000Z | 2022-03-01T03:52:39.000Z | utils/minifier.py | MateuszDabrowski/ELQuent | 9ff9c57d01a8ade7ebc7a903f228d4b7ed7324c4 | [
"MIT"
] | null | null | null | utils/minifier.py | MateuszDabrowski/ELQuent | 9ff9c57d01a8ade7ebc7a903f228d4b7ed7324c4 | [
"MIT"
] | 3 | 2021-03-05T23:06:38.000Z | 2021-10-05T19:56:28.000Z | #!/usr/bin/env python3.6
# -*- coding: utf8 -*-
'''
ELQuent.minifier
E-mail code minifier
Mateusz Dbrowski
github.com/MateuszDabrowski
linkedin.com/in/mateusz-dabrowski-marketing/
'''
import os
import re
import sys
import json
import pyperclip
from colorama import Fore, Style, init
# ELQuent imports
import utils.api.api as api
# Initialize colorama
init(autoreset=True)
# Globals
naming = None
source_country = None
# Predefined messege elements
ERROR = f'{Fore.WHITE}[{Fore.RED}ERROR{Fore.WHITE}] {Fore.YELLOW}'
WARNING = f'{Fore.WHITE}[{Fore.YELLOW}WARNING{Fore.WHITE}] '
SUCCESS = f'{Fore.WHITE}[{Fore.GREEN}SUCCESS{Fore.WHITE}] '
YES = f'{Style.BRIGHT}{Fore.GREEN}y{Fore.WHITE}{Style.NORMAL}'
NO = f'{Style.BRIGHT}{Fore.RED}n{Fore.WHITE}{Style.NORMAL}'
def country_naming_setter(country):
'''
Sets source_country for all functions
Loads json file with naming convention
'''
global source_country
source_country = country
# Loads json file with naming convention
with open(file('naming'), 'r', encoding='utf-8') as f:
global naming
naming = json.load(f)
'''
=================================================================================
File Path Getter
=================================================================================
'''
def file(file_path, file_name=''):
'''
Returns file path to template files
'''
def find_data_file(filename, directory='outcomes'):
'''
Returns correct file path for both script and frozen app
'''
if directory == 'main': # Files in main directory
if getattr(sys, 'frozen', False):
datadir = os.path.dirname(sys.executable)
else:
datadir = os.path.dirname(os.path.dirname(__file__))
return os.path.join(datadir, filename)
elif directory == 'api': # For reading api files
if getattr(sys, 'frozen', False):
datadir = os.path.dirname(sys.executable)
else:
datadir = os.path.dirname(os.path.dirname(__file__))
return os.path.join(datadir, 'utils', directory, filename)
elif directory == 'outcomes': # For writing outcome files
if getattr(sys, 'frozen', False):
datadir = os.path.dirname(sys.executable)
else:
datadir = os.path.dirname(os.path.dirname(__file__))
return os.path.join(datadir, directory, filename)
file_paths = {
'naming': find_data_file('naming.json', directory='api'),
'mail_html': find_data_file(f'WK{source_country}_{file_name}.txt')
}
return file_paths.get(file_path)
'''
=================================================================================
Code Output Helper
=================================================================================
'''
def output_method(html_code):
'''
Allows user choose how the program should output the results
Returns email_id if creation/update in Eloqua was selected
'''
# Asks which output
print(
f'\n{Fore.GREEN}New code should be:',
f'\n{Fore.WHITE}[{Fore.YELLOW}0{Fore.WHITE}]\t',
f'{Fore.WHITE}[{Fore.YELLOW}FILE{Fore.WHITE}] Only saved to Outcomes folder',
f'\n{Fore.WHITE}[{Fore.YELLOW}1{Fore.WHITE}]\t',
f'{Fore.WHITE}[{Fore.YELLOW}HTML{Fore.WHITE}] Copied to clipboard as HTML for pasting [CTRL+V]',
f'\n{Fore.WHITE}[{Fore.YELLOW}2{Fore.WHITE}]\t',
f'{Fore.WHITE}[{Fore.YELLOW}CREATE{Fore.WHITE}] Uploaded to Eloqua as a new E-mail',
f'\n{Fore.WHITE}[{Fore.YELLOW}3{Fore.WHITE}]\t',
f'{Fore.WHITE}[{Fore.YELLOW}UPDATE{Fore.WHITE}] Uploaded to Eloqua as update to existing E-mail')
email_id = ''
while True:
print(f'{Fore.YELLOW}Enter number associated with chosen utility:', end='')
choice = input(' ')
if choice == '0':
break
elif choice == '1' and html_code:
pyperclip.copy(html_code)
print(
f'\n{SUCCESS}You can now paste the HTML code [CTRL+V]')
break
elif choice == '2':
print(
f'\n{Fore.WHITE}[{Fore.YELLOW}NAME{Fore.WHITE}] Write or copypaste name of the E-mail:')
name = api.eloqua_asset_name()
api.eloqua_create_email(name, html_code)
break
elif choice == '3':
print(
f'\n{Fore.WHITE}[{Fore.YELLOW}ID{Fore.WHITE}] Write or copypaste ID of the E-mail to update:')
email_id = input(' ')
if not email_id:
email_id = pyperclip.paste()
api.eloqua_update_email(email_id, html_code)
break
else:
print(f'{ERROR}Entered value does not belong to any utility!')
choice = ''
return
'''
=================================================================================
E-mail Minifier
=================================================================================
'''
def email_minifier(code):
'''
Requires html code of an e-mail
Returns minified html code of an e-mail
'''
# HTML Minifier
html_attr = ['html', 'head', 'style', 'body',
'table', 'tbody', 'tr', 'td', 'th', 'div']
for attr in html_attr:
code = re.sub(rf'{attr}>\s*\n\s*', f'{attr}>', code)
code = re.sub(rf'\s*\n\s+<{attr}', f'<{attr}', code)
code = re.sub(r'"\n+\s*', '" ', code)
for attr in ['alt', 'title', 'data-class']:
code = re.sub(rf'{attr}=""', '', code)
code = re.sub(r'" />', '"/>', code)
code = re.sub(r'<!--[^\[\]]*?-->', '', code)
for attr in html_attr:
code = re.sub(rf'{attr}>\s*\n\s*', f'{attr}>', code)
code = re.sub(rf'\s*\n\s+<{attr}', f'<{attr}', code)
# Conditional Comment Minifier
code = re.sub(
r'\s*\n*\s*<!--\[if mso \| IE\]>\s*\n\s*', '\n<!--[if mso | IE]>', code)
code = re.sub(
r'\s*\n\s*<!\[endif\]-->\s*\n\s*', '<![endif]-->\n', code)
# CSS Minifier
code = re.sub(r'{\s*\n\s*', '{', code)
code = re.sub(r';\s*\n\s*}\n\s*', '} ', code)
code = re.sub(r';\s*\n\s*', '; ', code)
code = re.sub(r'}\n+', '} ', code)
# Whitespace Minifier
code = re.sub(r'\t', '', code)
code = re.sub(r'\n+', ' ', code)
while ' ' in code:
code = re.sub(r' {2,}', ' ', code)
# Trim lines to maximum of 500 characters
count = 0
newline_indexes = []
for i, letter in enumerate(code):
if count > 450:
if letter in ['>', ' ']:
newline_indexes.append(i)
count = 0
else:
count += 1
for index in reversed(newline_indexes):
output = code[:index+1] + '\n' + code[index+1:]
code = output
# Takes care of lengthy links that extends line over 500 characters
while True:
lengthy_lines_list = re.findall(r'^.{500,}$', code, re.MULTILINE)
if not lengthy_lines_list:
break
lengthy_link_regex = re.compile(r'href=\".{40,}?\"|src=\".{40,}?\"')
for line in lengthy_lines_list:
lengthy_link_list = re.findall(lengthy_link_regex, line)
code = code.replace(
lengthy_link_list[0], f'\n{lengthy_link_list[0]}')
return code
def email_workflow(email_code=''):
'''
Minifies the e-mail code
'''
if email_code:
module = True
# Gets e-mail code if not delivered via argument
elif not email_code:
module = False
print(
f'\n{Fore.WHITE}[{Fore.YELLOW}Code{Fore.WHITE}] Copy code of the E-mail to minify and click [Enter]:')
input()
email_code = pyperclip.paste()
# Gets the code from the user
while True:
email_code = pyperclip.paste()
is_html = re.compile(r'<html[\s\S\n]*?</html>', re.UNICODE)
if is_html.findall(email_code):
print(f'{Fore.WHITE} {SUCCESS}Code copied from clipboard')
break
print(
f'{Fore.WHITE} {ERROR}Invalid HTML. Copy valid code and click [Enter]', end='')
input(' ')
# Saves original code to outcomes folder
with open(file('mail_html', file_name='original_code'), 'w', encoding='utf-8') as f:
f.write(email_code)
# Gets file size of original file
original_size = os.path.getsize(
file('mail_html', file_name='original_code'))
# Minified the code
minified_code = email_minifier(email_code)
# Saves minified code to outcomes folder
with open(file('mail_html', file_name='minified_code'), 'w', encoding='utf-8') as f:
f.write(minified_code)
# Gets file size of minified file
minified_size = os.path.getsize(
file('mail_html', file_name='minified_code'))
print(f'\n{Fore.WHITE} {SUCCESS}E-mail was minified from {Fore.YELLOW}{round(original_size/1024)}kB'
f'{Fore.WHITE} to {Fore.YELLOW}{round(minified_size/1024)}kB'
f' {Fore.WHITE}({Fore.GREEN}-{round((original_size-minified_size)/original_size*100)}%{Fore.WHITE})!')
if not module:
# Outputs the code
output_method(minified_code)
# Asks user if he would like to repeat
print(f'\n{Fore.YELLOW} {Fore.WHITE}Do you want to {Fore.YELLOW}minify another Email{Fore.WHITE}?',
f'{Fore.WHITE}({YES}/{NO}):', end=' ')
choice = input('')
if choice.lower() == 'y':
print(
f'\n{Fore.GREEN}-----------------------------------------------------------------------------')
email_workflow()
return
'''
=================================================================================
Minifier module menu
=================================================================================
'''
def minifier_module(country):
'''
Lets user minify the HTML code
'''
# Create global source_country and load json file with naming convention
country_naming_setter(country)
# Report type chooser
print(
f'\n{Fore.GREEN}ELQuent.minifier Utilites:'
f'\n{Fore.WHITE}[{Fore.YELLOW}1{Fore.WHITE}]\t [{Fore.YELLOW}E-mail{Fore.WHITE}] Minifies e-mail code'
f'\n{Fore.WHITE}[{Fore.YELLOW}Q{Fore.WHITE}]\t [{Fore.YELLOW}Quit to main menu{Fore.WHITE}]'
)
while True:
print(f'{Fore.YELLOW}Enter number associated with chosen utility:', end='')
choice = input(' ')
if choice.lower() == 'q':
break
elif choice == '1':
email_workflow()
break
else:
print(f'{Fore.RED}Entered value does not belong to any utility!')
choice = ''
return
| 33.618462 | 116 | 0.532857 |
6ea22002e9ef59fb7dc0ae80af6cf9fc57e8fc02 | 2,305 | py | Python | doc/conf.py | safay/uta | bf3cf5a531aec4cca61f8926e79a624d01c76682 | [
"Apache-2.0"
] | 48 | 2016-09-20T16:28:46.000Z | 2022-02-02T10:32:02.000Z | doc/conf.py | safay/uta | bf3cf5a531aec4cca61f8926e79a624d01c76682 | [
"Apache-2.0"
] | 45 | 2016-12-12T23:41:12.000Z | 2022-02-09T11:48:04.000Z | doc/conf.py | safay/uta | bf3cf5a531aec4cca61f8926e79a624d01c76682 | [
"Apache-2.0"
] | 20 | 2016-10-09T10:16:44.000Z | 2021-06-18T02:19:58.000Z | ############################################################################
# Theme setup
html_theme = 'invitae'
html_theme_path = ['themes']
if html_theme == 'sphinx_rtd_theme':
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
elif html_theme == 'bootstrap':
import sphinx_bootstrap_theme
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
############################################################################
# Project config
import uta
version = uta.__version__
release = str(uta.__version__)
project = u'UTA'
authors = project + ' Contributors'
copyright = u'2015, ' + authors
extlinks = {
'issue': ('https://bitbucket.org/biocommons/uta/issue/%s', 'UTA issue '),
}
man_pages = [
('index', 'uta', u'UTA Documentation', [u'UTA Contributors'], 1)
]
############################################################################
# Boilerplate
# , 'inherited-members']
autodoc_default_flags = ['members', 'undoc-members', 'show-inheritance']
exclude_patterns = ['build', 'static', 'templates', 'themes']
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.intersphinx',
'sphinx.ext.pngmath',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinxcontrib.fulltoc',
]
html_favicon = 'static/favicon.ico'
html_logo = 'static/logo.png'
html_static_path = ['static']
html_title = '{project} {release}'.format(project=project, release=release)
intersphinx_mapping = {
'http://docs.python.org/': None,
}
master_doc = 'index'
pygments_style = 'sphinx'
source_suffix = '.rst'
templates_path = ['templates']
# <LICENSE>
# Copyright 2014 UTA Contributors (https://bitbucket.org/biocommons/uta)
##
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
##
# http://www.apache.org/licenses/LICENSE-2.0
##
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# </LICENSE>
| 29.935065 | 77 | 0.647722 |
6ea3527b6763af10afefd4e777c572e2ac4172fc | 997 | py | Python | exercises_gustguan/ex113.py | Ewerton12F/Python-Notebook | 85c4d38c35c6063fb475c25ebf4645688ec9dbcb | [
"MIT"
] | null | null | null | exercises_gustguan/ex113.py | Ewerton12F/Python-Notebook | 85c4d38c35c6063fb475c25ebf4645688ec9dbcb | [
"MIT"
] | null | null | null | exercises_gustguan/ex113.py | Ewerton12F/Python-Notebook | 85c4d38c35c6063fb475c25ebf4645688ec9dbcb | [
"MIT"
] | null | null | null |
li = leiaInt('Digite um nmero inteiro: ')
lr = leiaFloat('Digite um nmero real: ')
print(f'\033[1;3;34mO valor inteiro foi {li} e o real foi {lr}.\033[0;0;0m') | 33.233333 | 93 | 0.565697 |
6ea43d3eb6ab1823ba2e818e55cba7f4297fc931 | 10,851 | py | Python | frameworks/kafka/tests/auth.py | minyk/dcos-activemq | 57a0cf01053a7e2dc59020ed5cbb93f0d1c9cff1 | [
"Apache-2.0"
] | null | null | null | frameworks/kafka/tests/auth.py | minyk/dcos-activemq | 57a0cf01053a7e2dc59020ed5cbb93f0d1c9cff1 | [
"Apache-2.0"
] | null | null | null | frameworks/kafka/tests/auth.py | minyk/dcos-activemq | 57a0cf01053a7e2dc59020ed5cbb93f0d1c9cff1 | [
"Apache-2.0"
] | null | null | null | import json
import logging
import retrying
import sdk_cmd
LOG = logging.getLogger(__name__)
def wait_for_brokers(client: str, brokers: list):
"""
Run bootstrap on the specified client to resolve the list of brokers
"""
LOG.info("Running bootstrap to wait for DNS resolution")
bootstrap_cmd = ['/opt/bootstrap',
'-print-env=false',
'-template=false',
'-install-certs=false',
'-resolve-hosts', ','.join(brokers)]
bootstrap_output = sdk_cmd.task_exec(client, ' '.join(bootstrap_cmd))
LOG.info(bootstrap_output)
assert "SDK Bootstrap successful" in ' '.join(str(bo) for bo in bootstrap_output)
def write_client_properties(id: str, task: str, lines: list) -> str:
"""Write a client properties file containing the specified lines"""
output_file = "{id}-client.properties".format(id=id)
LOG.info("Generating %s", output_file)
output = sdk_cmd.create_task_text_file(task, output_file, lines)
LOG.info(output)
return output_file
log = LOG
| 35.345277 | 117 | 0.599853 |
6ea45f9b51639f8a0b82e891df2cc0bae0501648 | 1,242 | py | Python | python/problem-060.py | mbuhot/mbuhot-euler-solutions | 30066543cfd2d84976beb0605839750b64f4b8ef | [
"MIT"
] | 1 | 2015-12-18T13:25:41.000Z | 2015-12-18T13:25:41.000Z | python/problem-060.py | mbuhot/mbuhot-euler-solutions | 30066543cfd2d84976beb0605839750b64f4b8ef | [
"MIT"
] | null | null | null | python/problem-060.py | mbuhot/mbuhot-euler-solutions | 30066543cfd2d84976beb0605839750b64f4b8ef | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
import prime
description = '''
Prime pair sets
Problem 60
The primes 3, 7, 109, and 673, are quite remarkable. By taking any two primes and concatenating them in any order the result will always be prime. For example, taking 7 and 109, both 7109 and 1097 are prime. The sum of these four primes, 792, represents the lowest sum for a set of four primes with this property.
Find the lowest sum for a set of five primes for which any two primes concatenate to produce another prime.
'''
prime.loadPrimes('primes.bin')
result = next(findPairSets(5))
print(result, sum(result))
| 29.571429 | 313 | 0.681159 |
6ea54be459981a2401f315126f120b27aa749589 | 5,298 | py | Python | multilanguage_frappe_website/hooks.py | developmentforpeople/frappe-multilingual-website | c0bf74453f3d1de6127ad174aab6c05360cc1ec1 | [
"MIT"
] | null | null | null | multilanguage_frappe_website/hooks.py | developmentforpeople/frappe-multilingual-website | c0bf74453f3d1de6127ad174aab6c05360cc1ec1 | [
"MIT"
] | null | null | null | multilanguage_frappe_website/hooks.py | developmentforpeople/frappe-multilingual-website | c0bf74453f3d1de6127ad174aab6c05360cc1ec1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "multilanguage_frappe_website"
app_title = "Multilanguage Frappe Website"
app_publisher = "DFP developmentforpeople"
app_description = "Multilanguage Frappe Framework website example"
app_icon = "octicon octicon-file-directory"
app_color = "green"
app_email = "developmentforpeople@gmail.com"
app_license = "MIT"
# App name (used to override only sites with this app installed)
multilanguage_app_site_name = app_name
# Hosts/sites where this app will be enabled
multilanguage_app_site_hosts = ["mf.local", "frappe-multilingual-website.developmentforpeople.com"]
# Languages available for site
translated_languages_for_website = ["en", "es"]
# First one on list will be the default one
language_default = translated_languages_for_website[0]
# Home page
home_page = "index"
# Url 301 redirects
website_redirects = [
# Remove duplicated pages for home:
{ "source": "/index", "target": "/" },
{ "source": "/index.html", "target": "/" },
# Languages: Remove main language segment. For example,
# if "en" is first one in "translated_languages_for_website"
# then route "/en/example" will be redirected 301 to "/example"
{ "source": r"/{0}".format(language_default), "target": "/" },
{ "source": r"/{0}/(.*)".format(language_default), "target": r"/\1" },
# Foce url language for some Frappe framework dynamic pages:
{ "source": "/en/login", "target": "/login?_lang=en" },
{ "source": "/es/login", "target": "/login?_lang=es" },
{ "source": "/en/contact", "target": "/contact?_lang=en" },
{ "source": "/es/contact", "target": "/contact?_lang=es" },
# Foce url language for not language specific pages:
{ "source": "/en/translations", "target": "/translations?_lang=en" },
{ "source": "/es/translations", "target": "/translations?_lang=es" },
]
# Setup some global context variables related to languages
website_context = {
"languages": translated_languages_for_website,
"language_default": language_default,
"app_site_name": app_name,
}
# Calculate active language from url first segment
update_website_context = [
"{0}.context_extend".format(app_name),
]
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
# app_include_css = "/assets/multilanguage_frappe_website/css/multilanguage_frappe_website.css"
# app_include_js = "/assets/multilanguage_frappe_website/js/multilanguage_frappe_website.js"
# include js, css files in header of web template
web_include_css = "/assets/multilanguage_frappe_website/css/multilanguage_frappe_website.css"
# web_include_js = "/assets/multilanguage_frappe_website/js/multilanguage_frappe_website.js"
# include js in page
# page_js = {"page" : "public/js/file.js"}
# include js in doctype views
# doctype_js = {"doctype" : "public/js/doctype.js"}
# doctype_list_js = {"doctype" : "public/js/doctype_list.js"}
# doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"}
# doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"}
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# Website user home page (by function)
# get_website_user_home_page = "multilanguage_frappe_website.utils.get_home_page"
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "multilanguage_frappe_website.install.before_install"
# after_install = "multilanguage_frappe_website.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "multilanguage_frappe_website.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {
# "*": {
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }
# }
# Scheduled Tasks
# ---------------
# scheduler_events = {
# "all": [
# "multilanguage_frappe_website.tasks.all"
# ],
# "daily": [
# "multilanguage_frappe_website.tasks.daily"
# ],
# "hourly": [
# "multilanguage_frappe_website.tasks.hourly"
# ],
# "weekly": [
# "multilanguage_frappe_website.tasks.weekly"
# ]
# "monthly": [
# "multilanguage_frappe_website.tasks.monthly"
# ]
# }
# Testing
# -------
# before_tests = "multilanguage_frappe_website.install.before_tests"
# Overriding Methods
# ------------------------------
#
# override_whitelisted_methods = {
# "frappe.desk.doctype.event.event.get_events": "multilanguage_frappe_website.event.get_events"
# }
#
# each overriding function accepts a `data` argument;
# generated from the base implementation of the doctype dashboard,
# along with any modifications made in other Frappe apps
# override_doctype_dashboards = {
# "Task": "multilanguage_frappe_website.task.get_dashboard_data"
# }
| 29.597765 | 99 | 0.714232 |
6ea56221c4382d050ea20b187d845407bd8d039d | 90 | py | Python | renormalizer/mps/tdh/__init__.py | liwt31/Renormalizer | 123a9d53f4f5f32c0088c255475f0ee60d02c745 | [
"Apache-2.0"
] | null | null | null | renormalizer/mps/tdh/__init__.py | liwt31/Renormalizer | 123a9d53f4f5f32c0088c255475f0ee60d02c745 | [
"Apache-2.0"
] | null | null | null | renormalizer/mps/tdh/__init__.py | liwt31/Renormalizer | 123a9d53f4f5f32c0088c255475f0ee60d02c745 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from renormalizer.mps.tdh.propagation import unitary_propagation
| 22.5 | 64 | 0.755556 |
6ea5d0975fd4eec1bb06ec6bc86c9a210abd074c | 398 | py | Python | items/Boots_Of_Speed.py | ivoryhuang/LOL_simple_text_version | 13c98721ad094c4eb6b835c838805c77dc9075c5 | [
"MIT"
] | 2 | 2017-01-08T15:53:49.000Z | 2017-01-19T17:24:53.000Z | items/Boots_Of_Speed.py | ivoryhuang/LOL_simple_text_version | 13c98721ad094c4eb6b835c838805c77dc9075c5 | [
"MIT"
] | null | null | null | items/Boots_Of_Speed.py | ivoryhuang/LOL_simple_text_version | 13c98721ad094c4eb6b835c838805c77dc9075c5 | [
"MIT"
] | null | null | null | from items.Item import Item | 28.428571 | 75 | 0.701005 |
6ea618363d6a6f275346b95643dd61b27b8e3d12 | 12,045 | py | Python | RsNet/train_models.py | gehuangyi20/random_spiking | c98b550420ae4061b9d47ca475e86c981caf5514 | [
"MIT"
] | 1 | 2020-08-03T17:47:40.000Z | 2020-08-03T17:47:40.000Z | RsNet/train_models.py | gehuangyi20/random_spiking | c98b550420ae4061b9d47ca475e86c981caf5514 | [
"MIT"
] | null | null | null | RsNet/train_models.py | gehuangyi20/random_spiking | c98b550420ae4061b9d47ca475e86c981caf5514 | [
"MIT"
] | null | null | null | ## train_models.py -- train the neural network models for attacking
##
## Copyright (C) 2016, Nicholas Carlini <nicholas@carlini.com>.
##
## This program is licenced under the BSD 2-Clause licence,
## contained in the LICENCE file in this directory.
## Modified for the needs of MagNet.
import os
import argparse
import utils
import numpy as np
import tensorflow as tf
from keras import backend as k
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Input, Dense, Dropout, Activation, Flatten, Lambda
from keras.models import Model
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
from RsNet.setup_mnist import MNIST, MNISTModel
from RsNet.tf_config import gpu_config, setup_visibile_gpus, CHANNELS_LAST, CHANNELS_FIRST
from RsNet.dataset_nn import model_mnist_meta
from RsNet.random_spiking.nn_ops import random_spike_sample_scaling, random_spike_sample_scaling_per_sample
def train(data, file_name, params, rand_params, num_epochs=50, batch_size=128, is_batch=True,
dropout=0.0, data_format=None, init_model=None, train_temp=1, data_gen=None):
"""
Standard neural network training procedure.
"""
_input = Input(shape=data.train_data.shape[1:])
x = _input
x = Conv2D(params[0], (3, 3), padding="same", data_format=data_format)(x)
x = Activation('relu')(x)
x = Lambda(function=random_spike, arguments={
"sample_rate": rand_params[0], "scaling": rand_params[1], "is_batch": is_batch})(x)
x = Conv2D(params[1], (3, 3), padding="same", data_format=data_format)(x)
x = Activation('relu')(x)
x = Lambda(function=random_spike, arguments={
"sample_rate": rand_params[2], "scaling": rand_params[3], "is_batch": is_batch})(x)
x = MaxPooling2D(pool_size=(2, 2), data_format=data_format)(x)
x = Lambda(function=random_spike, arguments={
"sample_rate": rand_params[4], "scaling": rand_params[5], "is_batch": is_batch})(x)
x = Conv2D(params[2], (3, 3), padding="same", data_format=data_format)(x)
x = Activation('relu')(x)
x = Lambda(function=random_spike, arguments={
"sample_rate": rand_params[6], "scaling": rand_params[7], "is_batch": is_batch})(x)
x = Conv2D(params[3], (3, 3), padding="same", data_format=data_format)(x)
x = Activation('relu')(x)
x = Lambda(function=random_spike, arguments={
"sample_rate": rand_params[8], "scaling": rand_params[9], "is_batch": is_batch})(x)
x = MaxPooling2D(pool_size=(2, 2), data_format=data_format)(x)
x = Lambda(function=random_spike, arguments={
"sample_rate": rand_params[10], "scaling": rand_params[11], "is_batch": is_batch})(x)
x = Flatten()(x)
x = Dense(params[4])(x)
x = Activation('relu')(x)
x = Lambda(function=random_spike, arguments={
"sample_rate": rand_params[12], "scaling": rand_params[13], "is_batch": is_batch})(x)
if dropout > 0:
x = Dropout(dropout)(x, training=True)
x = Dense(params[5])(x)
x = Activation('relu')(x)
x = Lambda(function=random_spike, arguments={
"sample_rate": rand_params[14], "scaling": rand_params[15], "is_batch": is_batch})(x)
x = Dense(10)(x)
model = Model(_input, x)
model.summary()
if init_model is not None:
model.load_weights(init_model)
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss=fn,
optimizer=sgd,
metrics=['accuracy'])
if data_gen is None:
model.fit(data.train_data, data.train_labels,
batch_size=batch_size,
validation_data=(data.test_data, data.test_labels),
nb_epoch=num_epochs,
shuffle=True)
else:
data_flow = data_gen.flow(data.train_data, data.train_labels, batch_size=128, shuffle=True)
model.fit_generator(data_flow,
steps_per_epoch=len(data_flow),
validation_data=(data.validation_data, data.validation_labels),
nb_epoch=num_epochs,
shuffle=True)
if file_name is not None:
model.save(file_name)
# save idx
utils.save_model_idx(file_name, data)
return model
parser = argparse.ArgumentParser(description='Train mnist model')
parser.add_argument('--data_dir', help='data dir, required', type=str, default=None)
parser.add_argument('--data_name', help='data name, required', type=str, default=None)
parser.add_argument('--model_dir', help='save model directory, required', type=str, default=None)
parser.add_argument('--model_name', help='save model name, required', type=str, default=None)
parser.add_argument('--validation_size', help='size of validation dataset', type=int, default=5000)
parser.add_argument('--random_spike', help='parameter used for random spiking', type=str, default=None)
parser.add_argument('--random_spike_batch', help='whether to use batch-wised random noise', type=str, default='yes')
parser.add_argument('--dropout', help='dropout rate', type=float, default=0.5)
parser.add_argument('--rotation', help='rotation angle', type=float, default=10)
parser.add_argument('--gpu_idx', help='gpu index', type=int, default=0)
parser.add_argument('--data_format', help='channels_last or channels_first', type=str, default=CHANNELS_FIRST)
parser.add_argument('--is_dis', help='whether to use distillation training', type=str, default='no')
parser.add_argument('--is_trans', help='whether do transfer training using soft label', type=str, default='no')
parser.add_argument('--is_data_gen', help='whether train on data generator, zoom, rotation', type=str, default='no')
parser.add_argument('--trans_model', help='transfer model name', type=str, default='no')
parser.add_argument('--trans_drop', help='dropout trans model name', type=float, default=0.5)
parser.add_argument('--trans_random_spike', help='random spiking parameter used for trans model',
type=str, default=None)
parser.add_argument('--train_sel_rand', help='whether to random select the training data', type=str, default='no')
parser.add_argument('--train_size', help='number of training example', type=int, default=0)
parser.add_argument('--pre_idx', help='predefined idx, duplicated training dataset', type=str, default=None)
parser.add_argument('--ex_data_dir', help='extra data dir, required', type=str, default=None)
parser.add_argument('--ex_data_name', help='extra data name, required', type=str, default=None)
parser.add_argument('--ex_data_size', help='number of extra training example', type=int, default=0)
parser.add_argument('--ex_data_sel_rand', help='whether to random select the extra training data',
type=str, default='no')
args = parser.parse_args()
data_dir = args.data_dir
data_name = args.data_name
save_model_dir = args.model_dir
save_model_name = args.model_name
validation_size = args.validation_size
train_size = args.train_size
train_sel_rand = args.train_sel_rand == 'yes'
para_random_spike = None if args.random_spike is None else parse_rand_spike(args.random_spike)
_is_batch = args.random_spike_batch == 'yes'
dropout = args.dropout
gpu_idx = args.gpu_idx
rotation = args.rotation
data_format = args.data_format
is_distillation = args.is_dis == 'yes'
is_data_gen = args.is_data_gen == 'yes'
ex_data_dir = args.ex_data_dir
ex_data_name = args.ex_data_name
ex_data_size = args.ex_data_size
ex_data_sel_rand = args.ex_data_sel_rand == 'yes'
pre_idx_path = args.pre_idx
setup_visibile_gpus(str(gpu_idx))
k.tensorflow_backend.set_session(tf.Session(config=gpu_config))
if not os.path.exists(save_model_dir):
os.makedirs(save_model_dir)
data = MNIST(data_dir, data_name, validation_size, model_meta=model_mnist_meta,
input_data_format=CHANNELS_LAST, output_data_format=data_format,
train_size=train_size, train_sel_rand=train_sel_rand)
if pre_idx_path is not None:
pre_idx = utils.load_model_idx(pre_idx_path)
data.apply_pre_idx(pre_idx)
if ex_data_dir is not None and ex_data_name is not None and ex_data_size > 0:
data.append_train_data(ex_data_dir, ex_data_name, ex_data_size,
input_data_format=CHANNELS_LAST, output_data_format=data_format, sel_rand=ex_data_sel_rand)
# config data if using transfer training here
is_trans = args.is_trans == 'yes'
if is_trans:
print("Get the soft label of the transfer model")
trans_random_spike = None if args.trans_random_spike is None else parse_rand_spike(args.trans_random_spike)
trans_model = MNISTModel(args.trans_model, None, output_logits=False,
input_data_format=data_format, data_format=data_format, dropout=0,
rand_params=trans_random_spike, is_batch=True)
predicted = trans_model.model.predict(data.train_data, batch_size=500, verbose=1)
train_data_acc = np.mean(np.argmax(predicted, 1) == np.argmax(data.train_labels, 1))
data.train_labels = predicted
print("trasfer model acc on training data:", train_data_acc)
if is_data_gen:
data_gen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=rotation,
shear_range=0.2,
zoom_range=0.2,
fill_mode='reflect',
width_shift_range=4,
height_shift_range=4,
horizontal_flip=False,
vertical_flip=False,
data_format=data_format
)
else:
data_gen = None
if is_distillation:
print("train init model")
train(data, save_model_dir + "/" + save_model_name + '_init',
[32, 32, 64, 64, 200, 200], para_random_spike, num_epochs=1, is_batch=_is_batch,
data_format=data_format, dropout=dropout, data_gen=data_gen)
print("train teacher model")
train(data, save_model_dir + "/" + save_model_name + '_teacher',
[32, 32, 64, 64, 200, 200], para_random_spike, num_epochs=50, is_batch=_is_batch,
data_format=data_format, dropout=dropout,
init_model=save_model_dir + "/" + save_model_name + '_init', train_temp=100, data_gen=data_gen)
# evaluate label with teacher model
model_teacher = MNISTModel(os.path.join(save_model_dir, save_model_name + '_teacher'), None, output_logits=True,
input_data_format=data_format, data_format=data_format, dropout=0,
rand_params=para_random_spike, is_batch=True)
predicted = model_teacher.model.predict(data.train_data, batch_size=500, verbose=1)
train_data_acc = np.mean(np.argmax(predicted, 1) == np.argmax(data.train_labels, 1))
print("train teacher acc:", train_data_acc)
with tf.Session() as sess:
y = sess.run(tf.nn.softmax(predicted/100))
print(y)
data.train_labels = y
print("train student model")
train(data, save_model_dir + "/" + save_model_name,
[32, 32, 64, 64, 200, 200], para_random_spike, num_epochs=50, is_batch=_is_batch,
data_format=data_format, dropout=dropout,
init_model=save_model_dir + "/" + save_model_name + '_init', train_temp=100, data_gen=data_gen)
else:
train(data, save_model_dir + "/" + save_model_name,
[32, 32, 64, 64, 200, 200], para_random_spike, num_epochs=50, is_batch=_is_batch,
data_format=data_format, dropout=dropout, data_gen=data_gen)
| 47.235294 | 118 | 0.703778 |
6ea71b4513f1f9f11b82f5034de5e9e21242e450 | 3,151 | py | Python | link_crawler.py | Stevearzh/greedy-spider | ca8b1d892e4ac5066ab33aafe7755ee959ef630a | [
"MIT"
] | null | null | null | link_crawler.py | Stevearzh/greedy-spider | ca8b1d892e4ac5066ab33aafe7755ee959ef630a | [
"MIT"
] | null | null | null | link_crawler.py | Stevearzh/greedy-spider | ca8b1d892e4ac5066ab33aafe7755ee959ef630a | [
"MIT"
] | null | null | null | import datetime
import re
import time
import urllib
from urllib import robotparser
from urllib.request import urlparse
from downloader import Downloader
DEFAULT_DELAY = 5
DEFAULT_DEPTH = -1
DEFAULT_URL = -1
DEFAULT_AGENT = 'wswp'
DEFAULT_RETRY = 1
DEFAULT_TIMEOUT = 60
DEFAULT_IGNORE_ROBOTS = False
def link_crawler(seed_url, link_regex=None, delay=DEFAULT_DELAY, max_depth=DEFAULT_DEPTH,
max_urls=DEFAULT_URL, user_agent=DEFAULT_AGENT, proxies=None, num_retries=DEFAULT_RETRY,
timeout=DEFAULT_TIMEOUT, ignore_robots=DEFAULT_IGNORE_ROBOTS, scrape_callback=None, cache=None):
'''
Crawl from the given seed URL following links matched by link_regex
'''
# the queue of URL's that still need to be crawled
crawl_queue = [seed_url]
# the URL's that have been seen and at what depth
seen = {seed_url: 0}
# track how many URL's have been downloaded
num_urls = 0
rp = get_robots(seed_url)
D = Downloader(delay=delay, user_agent=user_agent, proxies=proxies,
num_retries=num_retries, timeout=timeout, cache=cache)
while crawl_queue:
url = crawl_queue.pop()
depth = seen[url]
# check url passes robots.txt restrictions
if ignore_robots or rp.can_fetch(user_agent, url):
html = D(url)
links = []
if scrape_callback:
links.extend(scrape_callback(url, html) or [])
if depth != max_depth:
# can still crawl further
if link_regex:
# filter for links matching our regular expression
links.extend(link for link in get_links(html) if \
re.match(link_regex, link))
for link in links:
link = normalize(seed_url, link)
# check whether already crawled this link
if link not in seen:
seen[link] = depth + 1
# check link is within same domain
if same_domain(seed_url, link):
# success add this new link to queue
crawl_queue.append(link)
# check whether have reached downloaded maximum
num_urls += 1
if num_urls == max_urls:
break
else:
print('Blocked by robots.txt', url)
def normalize(seed_url, link):
'''
Normalize this URL by removing hash and adding domain
'''
link, _ = urllib.parse.urldefrag(link) # remove hash to avoid duplicates
return urllib.parse.urljoin(seed_url, link)
def same_domain(url1, url2):
'''
Return True if both URL's belong to same domain
'''
return urllib.parse.urlparse(url1).netloc == urllib.parse.urlparse(url2).netloc
def get_robots(url):
'''
Initialize robots parser for this domain
'''
rp = robotparser.RobotFileParser()
rp.set_url(urllib.parse.urljoin(url, '/robots.txt'))
rp.read()
return rp
def get_links(html):
'''
Return a list of links from html
'''
# a regular expression to extract all links from the webpage
webpage_regex = re.compile('<a[^>]+href=["\'](.*?)["\']', re.IGNORECASE)
# list of all links from the webpage
return webpage_regex.findall(html)
if __name__ == '__main__':
# execute only if run as a script
pass
| 28.645455 | 100 | 0.668042 |
6ea734988dbfada1408954f978d47bd46b1b2de0 | 1,994 | py | Python | Array.diff.py | ErosMLima/last-classes-js-py-node-php | 14775adaa3372c03c1e73d0699516f759e162dc5 | [
"MIT"
] | 2 | 2020-08-01T03:31:28.000Z | 2021-02-02T15:17:31.000Z | Array.diff.py | ErosMLima/last-classes-js-py-node-php | 14775adaa3372c03c1e73d0699516f759e162dc5 | [
"MIT"
] | null | null | null | Array.diff.py | ErosMLima/last-classes-js-py-node-php | 14775adaa3372c03c1e73d0699516f759e162dc5 | [
"MIT"
] | null | null | null | #Array.diff.py OKS
function array_diff(a, b) {
return a.filter(function(x) { return b,index(x) == -1; });
}
#solution 2 for array,diff
function array_diff(a, b) {
return a.filter(e => !b.includes(e));
}
function array_diff(a, b) {
return a.filter(e => !b.includes(e));
}
#Bouncing Balls ok
function boucingBall(h, boumce, window) {
var rebounds = -1;
if (bounce > 0 && bounce < 1) while (h > window) rebounds+=2, h *= bounce;
return rebounds;
}
#Backspaces in string ok
function cleanString(str) {
let result = [];
for(let i=0; i<str.length; i++) {
const char = str[i];
if(char === `#`) {
result.pop();
} else {
result.push(char);
}
}
return result.join('');
}
function clean_string(string) {
while (string.indexOf(`#`) >= 0)
string = string.replace(\(^|[^#])#/g, '');
return string;
}
#Expression Matter OKs
function expressionMatter(a, b, c) {
const x1 = a * (b + c);
const x2 = a * b * c;
const x3 = a + b * c;
const x4 = a + b + c;
const x5 = (a + b) * c;
return Math.max(x1, x2, x3, x4, x5);
}
function expressionMatter(a, b, c) {
return Math.max(
a+b+c,
a*b*c,
a*(b+c),
(a+b)*c,
a+b*c,
a*b+c,
);
}
#Extract the domain name from a URL
function moreZeros(s){
return s.split('')
.fliter(removeDoubles)
.map(convertToAscii)
.map(converToBinary)
.filter(ateMoreZeros)
.map(convertToDecimal)
.map(convertToChar);
}
function removeDoubles(item, idx, arr) {
return arr.indexOf(item) === idx;
}
function convertToAscii(c) {
return c.charCodeAt(0);
}
function convertToBinary(num) {
return num.toString(2);
}
function areMoreZeros(str) {
const zeros = str.replace(/1/g, '').length;
const ones = str.replace(/0/g, '').length;
return zeros > ones;
}
function convertToDecimal(bi) {
return parseInt(bi, 2);
}
function convertToChar(num) {
return String.fromCharCode(num);
} | 18.127273 | 76 | 0.587763 |