hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
665956b19edea097158d528d01890795f973fee1 | 19,404 | py | Python | app/apps/address/migrations/0001_initial.py | brsrtc/mini-erp-docker | f5c37c71384c76e029a26e89f4771a59ed02f925 | [
"MIT"
] | 1 | 2021-01-18T07:11:31.000Z | 2021-01-18T07:11:31.000Z | app/apps/address/migrations/0001_initial.py | brsrtc/mini-erp-docker | f5c37c71384c76e029a26e89f4771a59ed02f925 | [
"MIT"
] | null | null | null | app/apps/address/migrations/0001_initial.py | brsrtc/mini-erp-docker | f5c37c71384c76e029a26e89f4771a59ed02f925 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.3 on 2020-12-05 17:27
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
import core.cache
| 59.521472 | 93 | 0.44563 |
665bab55df7c6bcde1b85c9c43014205b79501eb | 2,984 | py | Python | pybf/image_settings.py | Sergio5714/pybf | bf56b353cd715c1bdb16d6cbb79aef44e3ef49bc | [
"Apache-2.0"
] | 1 | 2021-11-02T09:54:41.000Z | 2021-11-02T09:54:41.000Z | pybf/image_settings.py | Sergio5714/pybf | bf56b353cd715c1bdb16d6cbb79aef44e3ef49bc | [
"Apache-2.0"
] | null | null | null | pybf/image_settings.py | Sergio5714/pybf | bf56b353cd715c1bdb16d6cbb79aef44e3ef49bc | [
"Apache-2.0"
] | 2 | 2020-04-17T10:50:06.000Z | 2021-11-02T09:54:47.000Z | """
Copyright (C) 2020 ETH Zurich. All rights reserved.
Author: Sergei Vostrikov, ETH Zurich
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np | 31.410526 | 101 | 0.655831 |
665ca8b455ad5fa005ae44eb4ff2f68155d6d9ba | 9,912 | py | Python | pre-receive.d/net.twistedbytes.gitlab-protector.py | andgeno/GitLab-Protector | b05f39a23213bd832cbbf30bc63731aca1fce18d | [
"MIT"
] | 7 | 2020-12-14T10:05:13.000Z | 2021-11-25T15:14:26.000Z | pre-receive.d/net.twistedbytes.gitlab-protector.py | andgeno/GitLab-Protector | b05f39a23213bd832cbbf30bc63731aca1fce18d | [
"MIT"
] | 1 | 2021-04-19T13:47:12.000Z | 2021-04-24T12:39:47.000Z | pre-receive.d/net.twistedbytes.gitlab-protector.py | andgeno/GitLab-Protector | b05f39a23213bd832cbbf30bc63731aca1fce18d | [
"MIT"
] | 1 | 2021-04-19T14:06:54.000Z | 2021-04-19T14:06:54.000Z | #!/usr/bin/env python
import sys
import os
import re
import subprocess
from enum import Enum
GitLabProtector()
| 38.123077 | 161 | 0.602502 |
665d3713837abc4149228da527c02f71d0d908ef | 1,151 | py | Python | tests/test_cli.py | joshbduncan/word-search-generator | 3c527f0371cbe4550a24403c660d1c6511b4cf79 | [
"MIT"
] | 4 | 2021-09-18T21:21:54.000Z | 2022-03-02T03:53:54.000Z | tests/test_cli.py | joshbduncan/word-search-generator | 3c527f0371cbe4550a24403c660d1c6511b4cf79 | [
"MIT"
] | 4 | 2021-09-18T21:50:33.000Z | 2022-03-22T04:29:33.000Z | tests/test_cli.py | joshbduncan/word-search-generator | 3c527f0371cbe4550a24403c660d1c6511b4cf79 | [
"MIT"
] | 1 | 2021-11-17T14:53:50.000Z | 2021-11-17T14:53:50.000Z | import os
import pathlib
import tempfile
TEMP_DIR = tempfile.TemporaryDirectory()
| 26.159091 | 84 | 0.709818 |
665d77836b64427e5626b7f66bfbf1c6d819e02b | 1,167 | py | Python | karas/__init__.py | TuXiaokang/karas | 2549502424b2d4c67047b867b0315f33b2e997c5 | [
"MIT"
] | 3 | 2019-02-28T13:53:48.000Z | 2022-01-18T12:53:37.000Z | karas/__init__.py | TuXiaokang/karas | 2549502424b2d4c67047b867b0315f33b2e997c5 | [
"MIT"
] | null | null | null | karas/__init__.py | TuXiaokang/karas | 2549502424b2d4c67047b867b0315f33b2e997c5 | [
"MIT"
] | 1 | 2022-01-18T12:53:42.000Z | 2022-01-18T12:53:42.000Z | import pickle
from karas.version import __version__
| 22.442308 | 60 | 0.548415 |
665e40e33fdd973b30b29de0d4999dd092a29402 | 681 | py | Python | calc.py | fja05680/calc | 6959bdd740722c7e3024f4e5a9a21607ad5ffccf | [
"MIT"
] | null | null | null | calc.py | fja05680/calc | 6959bdd740722c7e3024f4e5a9a21607ad5ffccf | [
"MIT"
] | null | null | null | calc.py | fja05680/calc | 6959bdd740722c7e3024f4e5a9a21607ad5ffccf | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import calc
if __name__ == '__main__':
main()
| 23.482759 | 46 | 0.444934 |
666446282fcb45a4a20b926c54fc47be65a01ac8 | 8,534 | py | Python | aiida_environ/workflows/pw/grandcanonical.py | environ-developers/aiida-environ | c39ac70227a41e084b74df630c3cb4b4caa27094 | [
"MIT"
] | null | null | null | aiida_environ/workflows/pw/grandcanonical.py | environ-developers/aiida-environ | c39ac70227a41e084b74df630c3cb4b4caa27094 | [
"MIT"
] | 1 | 2021-12-07T17:03:44.000Z | 2021-12-07T17:03:44.000Z | aiida_environ/workflows/pw/grandcanonical.py | environ-developers/aiida-environ | c39ac70227a41e084b74df630c3cb4b4caa27094 | [
"MIT"
] | null | null | null | import numpy as np
from aiida.common import AttributeDict
from aiida.engine import WorkChain, append_
from aiida.orm import Dict, List, StructureData
from aiida.orm.nodes.data.upf import get_pseudos_from_structure
from aiida.orm.utils import load_node
from aiida.plugins import WorkflowFactory
from aiida_quantumespresso.utils.mapping import prepare_process_inputs
from aiida_environ.calculations.adsorbate.gen_supercell import (
adsorbate_gen_supercell,
gen_hydrogen,
)
from aiida_environ.calculations.adsorbate.post_supercell import adsorbate_post_supercell
from aiida_environ.data.charge import EnvironChargeData
from aiida_environ.utils.charge import get_charge_range
from aiida_environ.utils.vector import get_struct_bounds
EnvPwBaseWorkChain = WorkflowFactory("environ.pw.base")
PwBaseWorkChain = WorkflowFactory("quantumespresso.pw.base")
| 44.915789 | 88 | 0.664284 |
6664aaeb4a16b83003b59cd285e9bdc4f631fdb5 | 6,481 | py | Python | tabnet/utils.py | huangyz0918/tabnet | a93d52c6f33e9ea8ad0f152cdaf5a0cabec8e6d4 | [
"MIT"
] | 1 | 2021-06-17T04:47:41.000Z | 2021-06-17T04:47:41.000Z | tabnet/utils.py | huangyz0918/tabnet | a93d52c6f33e9ea8ad0f152cdaf5a0cabec8e6d4 | [
"MIT"
] | null | null | null | tabnet/utils.py | huangyz0918/tabnet | a93d52c6f33e9ea8ad0f152cdaf5a0cabec8e6d4 | [
"MIT"
] | null | null | null | import torch
import numpy as np
import pandas as pd
from collections import OrderedDict
def generate_categorical_to_ordinal_map(inputs):
if isinstance(inputs, pd.Series):
inputs = inputs.values
uq_inputs = np.unique(inputs)
return dict(zip(list(uq_inputs), list(range(len(uq_inputs)))))
def map_categoricals_to_ordinals(categoricals, mapping):
unmapped_targets = set(np.unique(categoricals).flatten()) - set(mapping.keys())
if len(unmapped_targets) > 0:
raise ValueError(
"Mapping missing the following keys: {}".format(unmapped_targets)
)
return torch.from_numpy(
np.vectorize(mapping.get)(categoricals).astype(float)
).long()
def map_categoricals_to_one_hot(categoricals, mapping):
unmapped_elements = set(np.unique(categoricals).flatten()) - set(mapping.keys())
if len(unmapped_elements) > 0:
raise ValueError(
"Mapping missing the following keys: {}".format(unmapped_elements)
)
return torch.from_numpy(
np.squeeze(
np.eye(len(mapping.keys()))[
np.vectorize(mapping.get)(categoricals).reshape(-1)
]
).astype(float)
).long()
def map_ordinals_to_categoricals(ordinals, mapping):
if isinstance(ordinals, torch.Tensor):
ordinals = ordinals.detach().cpu().numpy()
elif isinstance(ordinals, list):
ordinals = np.array(ordinals)
inv_target_mapping = {v: k for k, v in mapping.items()}
return np.vectorize(inv_target_mapping.get)(ordinals).squeeze()
| 33.755208 | 91 | 0.577843 |
6664d9c361d76731e630fab7db18a3314ba27f7a | 699 | py | Python | ex022.py | nascimentobrenda24/PythonExercises | 2055f42a0454ae25cba6a6457c85822eaad2df01 | [
"MIT"
] | 1 | 2021-11-23T21:41:25.000Z | 2021-11-23T21:41:25.000Z | ex022.py | nascimentobrenda24/PythonExercises | 2055f42a0454ae25cba6a6457c85822eaad2df01 | [
"MIT"
] | null | null | null | ex022.py | nascimentobrenda24/PythonExercises | 2055f42a0454ae25cba6a6457c85822eaad2df01 | [
"MIT"
] | null | null | null | # Analisador de textos
# Crie um programa que leia o nome completo de uma pessoa e mostre:
# - O nome com todas as letras maisculas e minsculas.
# - Quantas letras ao todo (sem considerar espaos).
print('=*'*20, 'CADASTRO', '=*'*20)
nome = str(input('Nome Completo:')).strip()#Para ler com letras maisculas
print('Analisando seu nome...')
print('Seu nome em minsculo {}'.format(nome.lower()))
print('Seu nome em MAISCULO {}'.format(nome.upper()))
print('Seu nome tem ano todo {} letras'.format(len(nome)-nome.count(' ')))#menos o contador de espaos
primeiro_nome = nome.split() #Vai quebrar os caracteres
print('Seu primeiro nome tem {} letras'.format(len(primeiro_nome[0])))
| 34.95 | 102 | 0.703863 |
66654d5cfc565e697020cd64524f69662efe7ca5 | 312 | py | Python | urls.py | stephenmcd/gamblor | a12f43339e2a6d34e4ed5ea3d02a3629ed5b8616 | [
"BSD-2-Clause"
] | 12 | 2015-06-09T02:31:43.000Z | 2021-12-11T21:35:38.000Z | urls.py | binarygrrl/gamblor | a12f43339e2a6d34e4ed5ea3d02a3629ed5b8616 | [
"BSD-2-Clause"
] | null | null | null | urls.py | binarygrrl/gamblor | a12f43339e2a6d34e4ed5ea3d02a3629ed5b8616 | [
"BSD-2-Clause"
] | 9 | 2016-11-14T23:56:51.000Z | 2021-04-14T07:47:44.000Z |
from django.conf.urls.defaults import patterns, include, url
from django.contrib import admin
from core import game
admin.autodiscover()
game.autodiscover()
urlpatterns = patterns("",
("^admin/", include(admin.site.urls)),
url("", include("social_auth.urls")),
url("", include("core.urls")),
)
| 18.352941 | 60 | 0.692308 |
666552755de681921ce121bf7878b38237804c08 | 3,258 | py | Python | DCGAN/train.py | drone911/Mnist-GANs | 6b5ffc6ecf5070522ebcb6a41374cfffd674b684 | [
"MIT"
] | null | null | null | DCGAN/train.py | drone911/Mnist-GANs | 6b5ffc6ecf5070522ebcb6a41374cfffd674b684 | [
"MIT"
] | null | null | null | DCGAN/train.py | drone911/Mnist-GANs | 6b5ffc6ecf5070522ebcb6a41374cfffd674b684 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 13 20:11:22 2019
@author: drone911
"""
from helper import *
from models import *
import numpy as np
from keras.datasets import mnist
from tqdm import tqdm
import warnings
if __name__=="__main__":
warnings.filterwarnings("ignore")
(train_images, train_labels), (test_images, test_labels)=mnist.load_data()
random_dim=100
batch_size=128
lr=0.0002
beta_1=0.5
train_images=np.concatenate((train_images, test_images), axis=0)
train_images=train_images.reshape(-1,28,28,1)
train_images=(train_images.astype(np.float32) - 127.5) / 127.5
generator=get_gen_nn(random_dim=random_dim, lr=lr, beta_1=beta_1,verbose=False)
discriminator=get_disc_nn(lr=lr, beta_1=beta_1,verbose=False)
gan=create_gan(discriminator, generator, random_dim=random_dim, lr=lr, beta_1=beta_1,verbose=False)
train(train_images, generator, discriminator, gan, random_dim=random_dim, epochs=50, batch_size=128)
| 41.240506 | 116 | 0.612339 |
6666b27d9a32939d312fcb0f1e04eb3582ec3f56 | 275 | py | Python | 03 - Types/3.2 - InbuiltTypes-ListsTuples/07-method-errors-index.py | python-demo-codes/basics | 2a151bbff4b528cefd52978829c632fd087c8f20 | [
"DOC"
] | 2 | 2019-08-23T06:05:55.000Z | 2019-08-26T03:56:07.000Z | 03 - Types/3.2 - InbuiltTypes-ListsTuples/07-method-errors-index.py | python-lang-codes/basics | 2a151bbff4b528cefd52978829c632fd087c8f20 | [
"DOC"
] | null | null | null | 03 - Types/3.2 - InbuiltTypes-ListsTuples/07-method-errors-index.py | python-lang-codes/basics | 2a151bbff4b528cefd52978829c632fd087c8f20 | [
"DOC"
] | 4 | 2020-10-01T07:16:07.000Z | 2021-07-17T07:55:08.000Z | # HEAD
# DataType - List method -index() Usage Error
# DESCRIPTION
# Describes index method of lists
# and its error incase item is not there
# RESOURCES
#
lists = ['hello', 'hi', 'howdy', 'heyas']
# returns an error - ValueError
print(lists.index('hello hello'))
| 21.153846 | 46 | 0.676364 |
6667684709a7e3192cfea4fd79e3ee7e997e694d | 2,418 | py | Python | Model/predictor-dl-model/tests/experiments/7day_variance_uckey_weight_in_slotid.py | rangaswamymr/blue-marlin | 2ab39a6af01e14f40386f640fe087aeb284b5524 | [
"Apache-2.0"
] | null | null | null | Model/predictor-dl-model/tests/experiments/7day_variance_uckey_weight_in_slotid.py | rangaswamymr/blue-marlin | 2ab39a6af01e14f40386f640fe087aeb284b5524 | [
"Apache-2.0"
] | null | null | null | Model/predictor-dl-model/tests/experiments/7day_variance_uckey_weight_in_slotid.py | rangaswamymr/blue-marlin | 2ab39a6af01e14f40386f640fe087aeb284b5524 | [
"Apache-2.0"
] | null | null | null | from pyspark import SparkContext, SparkConf, SQLContext
from pyspark.sql.functions import count, lit, col, udf, expr, collect_list, explode
from pyspark.sql.types import IntegerType, StringType, MapType, ArrayType, BooleanType, FloatType
from pyspark.sql import HiveContext
from datetime import datetime, timedelta
from pyspark.sql.functions import broadcast
query = "select count_array,day,uckey from factdata where day in ('2020-05-15','2020-05-14','2020-05-13','2020-05-12','2020-05-11','2020-05-10','2020-05-09')"
sc = SparkContext()
hive_context = HiveContext(sc)
df = hive_context.sql(query)
df = add_count_map(df)
df = df.select('uckey', 'day', explode(df.count_map)).withColumnRenamed("value", "impr_count")
df = df.withColumn('impr_count', udf(lambda x: int(x), IntegerType())(df.impr_count))
df = df.groupBy('uckey', 'day').sum('impr_count').withColumnRenamed("sum(impr_count)", 'impr_count')
split_uckey_udf = udf(lambda x: x.split(","), ArrayType(StringType()))
df = df.withColumn('col', split_uckey_udf(df.uckey))
df = df.select('uckey', 'impr_count', 'day', df.col[1]).withColumnRenamed("col[1]", 'slot_id')
df_slot = df.select('slot_id', 'impr_count', 'day')
df_slot = df_slot.groupBy('slot_id', 'day').sum('impr_count').withColumnRenamed("sum(impr_count)", "impr_total")
bc_df_slot = broadcast(df_slot)
df_new = df.join(bc_df_slot, on=["slot_id", 'day'], how="inner")
df_new = df_new.withColumn('percent', udf(lambda x, y: (x*100)/y, FloatType())(df_new.impr_count, df_new.impr_total))
df2 = df_new.groupBy("uckey").agg(collect_list('percent').alias('percent'))
df2 = df2.withColumn('var', udf(lambda x: variance(x), FloatType())(df2.percent))
df2.select("uckey", "var").orderBy(["var"], ascending=False).show(300, truncate=False)
df2.cache()
print("% uckeys having varience > 0.01 ", df2.filter((df2.var <= 0.01)).count()*100/df2.count())
| 37.78125 | 158 | 0.706369 |
66698e346f68c9e447122b0d937db33190f58a61 | 4,443 | py | Python | tests/test_metrohash.py | thihara/pyfasthash | 20a53f9bb7bf15f98e3e549f523b49e1e0f62e15 | [
"Apache-2.0"
] | 234 | 2015-02-05T13:41:58.000Z | 2022-03-30T08:55:23.000Z | tests/test_metrohash.py | thihara/pyfasthash | 20a53f9bb7bf15f98e3e549f523b49e1e0f62e15 | [
"Apache-2.0"
] | 50 | 2015-03-19T05:53:34.000Z | 2022-03-30T16:20:17.000Z | tests/test_metrohash.py | thihara/pyfasthash | 20a53f9bb7bf15f98e3e549f523b49e1e0f62e15 | [
"Apache-2.0"
] | 44 | 2015-04-23T18:51:43.000Z | 2022-03-30T21:07:57.000Z | import pytest
import pyhash
| 39.669643 | 83 | 0.765699 |
66698ee5453f94b084a237ee9ea9e607d1b0395c | 9,922 | py | Python | main_fed.py | berserkersss/FL_CNN_Diff_Acc | f78651b426ff700108b62f2afbd99134b30af1e6 | [
"MIT"
] | null | null | null | main_fed.py | berserkersss/FL_CNN_Diff_Acc | f78651b426ff700108b62f2afbd99134b30af1e6 | [
"MIT"
] | null | null | null | main_fed.py | berserkersss/FL_CNN_Diff_Acc | f78651b426ff700108b62f2afbd99134b30af1e6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import copy
import numpy as np
from torchvision import datasets, transforms
import torch
import math
from utils.sampling import mnist_iid, mnist_noniid, cifar_iid
from utils.options import args_parser
from models.Update import LocalUpdate
from models.Update import CLUpdate
from models.Nets import MLP, CNNMnist, CNNCifar
from models.Fed import FedAvg
from models.test import test_img
if __name__ == '__main__':
# parse args
args = args_parser()
args.device = torch.device('cuda:{}'.format(args.gpu) if torch.cuda.is_available() and args.gpu != -1 else 'cpu')
# load dataset and split users
if args.dataset == 'mnist':
trans_mnist = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
dataset_train = datasets.MNIST('../data/mnist/', train=True, download=True, transform=trans_mnist)
dataset_test = datasets.MNIST('../data/mnist/', train=False, download=True, transform=trans_mnist)
# sample users
#if args.iid:
dict_users_iid_temp = mnist_iid(dataset_train, args.num_users)
#else:
dict_users = mnist_noniid(dataset_train, args.num_users)
#dict_users_iid_temp = dict_users
elif args.dataset == 'cifar':
trans_cifar = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
dataset_train = datasets.CIFAR10('../data/cifar', train=True, download=True, transform=trans_cifar)
dataset_test = datasets.CIFAR10('../data/cifar', train=False, download=True, transform=trans_cifar)
if args.iid:
dict_users = cifar_iid(dataset_train, args.num_users)
else:
exit('Error: only consider IID setting in CIFAR10')
else:
exit('Error: unrecognized dataset')
img_size = dataset_train[0][0].shape
#print('img_size=',img_size)
# build model
if args.model == 'cnn' and args.dataset == 'cifar':
net_glob = CNNCifar(args=args).to(args.device)
elif args.model == 'cnn' and args.dataset == 'mnist':
net_glob = CNNMnist(args=args).to(args.device)
elif args.model == 'mlp':
len_in = 1
for x in img_size:
len_in *= x
net_glob_fl = MLP(dim_in=len_in, dim_hidden=64, dim_out=args.num_classes).to(args.device)
net_glob_cl = MLP(dim_in=len_in, dim_hidden=64, dim_out=args.num_classes).to(args.device)
else:
exit('Error: unrecognized model')
net_glob_fl.train()
net_glob_cl.train()
# copy weights
w_glob_fl = net_glob_fl.state_dict()
w_glob_cl = net_glob_cl.state_dict()
# training
eta = 0.01
Nepoch = 5 # num of epoch
loss_train_fl, loss_train_cl = [], []
cv_loss, cv_acc = [], []
val_loss_pre, counter = 0, 0
net_best = None
best_loss = None
val_acc_list, net_list = [], []
para_g = []
loss_grad = []
delta_batch_loss_list = []
beta_list = []
count_list = np.zeros(256).tolist()
line1_iter_list = []
line2_iter_list = []
wgfed_list = []
wgcl_list = []
w_locals, loss_locals = [], []
w0_locals,loss0_locals =[], []
weight_div_list = []
para_cl = []
para_fl = []
beta_locals, mu_locals, sigma_locals = [],[],[]
x_stat_loacals, pxm_locals =[],[]
data_locals = [[] for i in range(args.epochs)]
w_fl_iter,w_cl_iter = [], []
beta_max_his, mu_max_his, sigma_max_his = [], [], []
acc_train_cl_his, acc_train_fl_his = [], []
net_glob_fl.eval()
acc_train_cl, loss_train_clxx = test_img(net_glob_cl, dataset_train, args)
acc_test_cl, loss_test_clxx = test_img(net_glob_cl, dataset_test, args)
acc_train_cl_his.append(acc_test_cl)
acc_train_fl_his.append(acc_test_cl)
print("Training accuracy: {:.2f}".format(acc_train_cl))
print("Testing accuracy: {:.2f}".format(acc_test_cl))
dict_users_iid = []
for iter in range(args.num_users):
dict_users_iid.extend(dict_users_iid_temp[iter])
# Centralized learning
for iter in range(args.epochs):
w_locals, loss_locals = [], []
glob_cl = CLUpdate(args=args, dataset=dataset_train, idxs=dict_users_iid)
w_cl, loss_cl = glob_cl.cltrain(net=copy.deepcopy(net_glob_cl).to(args.device))
w_cl_iter.append(copy.deepcopy(w_cl))
net_glob_cl.load_state_dict(w_cl)
loss_train_cl.append(loss_cl) # loss of CL
print('cl,iter = ', iter, 'loss=', loss_cl)
# testing
acc_train_cl, loss_train_clxx = test_img(net_glob_cl, dataset_train, args)
acc_test_cl, loss_test_clxx = test_img(net_glob_cl, dataset_test, args)
print("Training accuracy: {:.2f}".format(acc_train_cl))
print("Testing accuracy: {:.2f}".format(acc_test_cl))
acc_train_cl_his.append(acc_test_cl.item())
# FL setting
for iter in range(args.epochs): # num of iterations
w_locals, loss_locals, d_locals = [], [], []
beta_locals, mu_locals, sigma_locals = [], [], []
x_stat_loacals, pxm_locals =[],[]
# M clients local update
m = max(int(args.frac * args.num_users), 1) # num of selected users
idxs_users = np.random.choice(range(args.num_users), m, replace=False) # select randomly m clients
for idx in idxs_users:
local = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx]) # data select
w, loss, delta_bloss, beta, x_stat, d_local = local.train(net=copy.deepcopy(net_glob_fl).to(args.device))
x_value, count = np.unique(x_stat,return_counts=True) # compute the P(Xm)
w_locals.append(copy.deepcopy(w))# collect local model
loss_locals.append(copy.deepcopy(loss))#collect local loss fucntion
d_locals.extend(d_local)# collect the isx of local training data in FL
beta_locals.append(np.max(beta))# beta value
mu_locals.append(np.max(delta_bloss)) # mu value
sigma_locals.append(np.std(delta_bloss))#sigma value
x_stat_loacals.append(x_stat) # Xm
pxm_locals.append(np.array(count/(np.sum(count)))) #P(Xm)
data_locals[iter] = d_locals#collect dta
w_glob_fl = FedAvg(w_locals)# update the global model
net_glob_fl.load_state_dict(w_glob_fl)# copy weight to net_glob
w_fl_iter.append(copy.deepcopy(w_glob_fl))
loss_fl = sum(loss_locals) / len(loss_locals)
loss_train_fl.append(loss_fl) # loss of FL
# compute P(Xg)
xg_value, xg_count = np.unique(x_stat_loacals,return_counts=True)
xg_count = np.array(xg_count)/(np.sum(xg_count))
print('fl,iter = ',iter,'loss=',loss_fl)
# compute beta, mu, sigma
beta_max = (np.max(beta_locals))
mu_max = (np.max(mu_locals))
sigma_max = (np.max(sigma_locals))
beta_max_his.append(np.max(beta_locals))
mu_max_his.append(np.max(mu_locals))
sigma_max_his.append(np.max(sigma_locals))
# print('beta=', beta_max)
# print('mu=', mu_max)
# print('sigma=', sigma_max)
# testing
net_glob_fl.eval()
acc_train_fl, loss_train_flxx = test_img(net_glob_fl, dataset_train, args)
acc_test_fl, loss_test_flxx = test_img(net_glob_fl, dataset_test, args)
print("Training accuracy: {:.2f}".format(acc_train_fl))
print("Testing accuracy: {:.2f}".format(acc_test_fl))
line1_list=[]
# the weight divergence of numerical line
for j in range(len(pxm_locals)):
lditem1 = sigma_max*(np.sqrt(2/(np.pi*50*(iter+1)))+np.sqrt(2/(np.pi*50*m*(iter+1))))
lditem2 = mu_max*(np.abs(pxm_locals[j]-xg_count))
lditem3= 50*(iter+1)*(((1+eta*beta_max)**((iter+1)*Nepoch))-1)/(50*m*(iter+1)*beta_max) # 50 is batch size (10)* num of epoch (5)
line1 = lditem3*(lditem1+lditem2)
line1_list.append(line1) # m clients
line1_iter_list.append(np.sum(line1_list)) # iter elements
acc_train_fl_his.append(acc_test_fl.item())
#weight divergence of simulation
for i in range(len(w_cl_iter)):
para_cl = w_cl_iter[i]['layer_input.weight']
para_fl = w_fl_iter[i]['layer_input.weight']
line2 = torch.norm(para_cl-para_fl)
print(torch.norm(para_cl-para_fl)/torch.norm(para_cl))
line2_iter_list.append(line2.item())
print('y_line1=',line1_iter_list)# numerical
print('y_line2=',line2_iter_list) # simulation
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(line2_iter_list, c="red")
plt.xlabel('Iterations')
plt.ylabel('Difference')
plt.savefig('Figure/different.png')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(beta_max_his, c="red")
plt.xlabel('Iterations')
plt.ylabel('Beta_max')
plt.savefig('Figure/beta_max.png')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(sigma_max_his, c="red")
plt.xlabel('Iterations')
plt.ylabel('Sigma_max')
plt.savefig('Figure/sigma_max.png')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(mu_max_his, c="red")
plt.xlabel('Iterations')
plt.ylabel('Mu_max')
plt.savefig('Figure/mu_max.png')
colors = ["blue", "red"]
labels = ["non-iid", "iid"]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(acc_train_fl_his, c=colors[0], label=labels[0])
ax.plot(acc_train_cl_his, c=colors[1], label=labels[1])
ax.legend()
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.savefig('Figure/Accuracy_non_iid2_temp.png')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(line1_iter_list, c=colors[0])
plt.xlabel('Local_Iterations')
plt.ylabel('Grad')
plt.savefig('Figure/numerical _temp.png')
| 36.884758 | 141 | 0.651078 |
666a08a2699afb54d288c230c2b9f22bf4716df5 | 1,375 | py | Python | scaner/controllers/communities.py | dearbornlavern/scaner | 401de0ec7caef5c5a23aedec106db136bd4e4658 | [
"Apache-2.0"
] | 12 | 2016-09-30T12:43:44.000Z | 2022-02-17T17:17:02.000Z | scaner/controllers/communities.py | dearbornlavern/scaner | 401de0ec7caef5c5a23aedec106db136bd4e4658 | [
"Apache-2.0"
] | null | null | null | scaner/controllers/communities.py | dearbornlavern/scaner | 401de0ec7caef5c5a23aedec106db136bd4e4658 | [
"Apache-2.0"
] | 7 | 2016-09-28T09:48:48.000Z | 2020-05-15T04:56:11.000Z | from flask import current_app
from scaner.utils import add_metadata
import json
# PRUEBA EXTRACION USUARIOS
# @add_metadata()
# def get(userId, fields=None, *args, **kwargs):
# #get_task = current_app.tasks.get_users_from_twitter.delay()
# get_task = current_app.tasks.execute_metrics.delay()
# return {'result': "In progress"}, 200 | 39.285714 | 87 | 0.749818 |
666ce6df66f28481199af4b25376a59418b9191f | 395 | py | Python | cct/cases/create_snapshot.py | LmangoLemon/mind | 1b269acca41f840c5c71cb6c92ec92ecfb977ad4 | [
"Apache-2.0"
] | null | null | null | cct/cases/create_snapshot.py | LmangoLemon/mind | 1b269acca41f840c5c71cb6c92ec92ecfb977ad4 | [
"Apache-2.0"
] | null | null | null | cct/cases/create_snapshot.py | LmangoLemon/mind | 1b269acca41f840c5c71cb6c92ec92ecfb977ad4 | [
"Apache-2.0"
] | null | null | null | import logging
from time import sleep
from cct.case import Case
logger = logging.getLogger(__file__)
| 17.173913 | 62 | 0.668354 |
666d3c5b51416d64a4d8d00ca1cc2533f85b4bf8 | 296 | py | Python | venv/Lib/site-packages/IPython/terminal/ptshell.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 6,989 | 2017-07-18T06:23:18.000Z | 2022-03-31T15:58:36.000Z | venv/Lib/site-packages/IPython/terminal/ptshell.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 1,978 | 2017-07-18T09:17:58.000Z | 2022-03-31T14:28:43.000Z | venv/Lib/site-packages/IPython/terminal/ptshell.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 1,228 | 2017-07-18T09:03:13.000Z | 2022-03-29T05:57:40.000Z | raise DeprecationWarning("""DEPRECATED:
After Popular request and decision from the BDFL:
`IPython.terminal.ptshell` has been moved back to `IPython.terminal.interactiveshell`
during the beta cycle (after IPython 5.0.beta3) Sorry about that.
This file will be removed in 5.0 rc or final.
""")
| 32.888889 | 85 | 0.777027 |
6670c507913d776c7f3759690ef2c0ab2aa02880 | 591 | py | Python | ex078.py | raquelEllem/exerciciosPython | 489c2360de84c69dbe9da7710660fb064cd605fa | [
"MIT"
] | null | null | null | ex078.py | raquelEllem/exerciciosPython | 489c2360de84c69dbe9da7710660fb064cd605fa | [
"MIT"
] | null | null | null | ex078.py | raquelEllem/exerciciosPython | 489c2360de84c69dbe9da7710660fb064cd605fa | [
"MIT"
] | null | null | null | lista = []
for n in range(0, 5):
lista.append(int(input(f'Digite um valor para a posio {n}: ')))
print('=-=' * 10)
print(f'Voc digitou os valores {lista}')
maior = lista[0]
menor = lista[0]
for n in lista:
if maior < n:
maior = n
if menor > n:
menor = n
print(f'O maior valor digitado foi {maior} nas posies ', end='')
for i, v in enumerate(lista):
if v == maior:
print(f'{i}...', end='')
print()
print(f'O menor valor digitado foi {menor} nas posies ', end='')
for i, v in enumerate(lista):
if v == menor:
print(f'{i}...', end='') | 26.863636 | 69 | 0.575296 |
6674228e20201842275a8416c646d65895ba336f | 6,461 | py | Python | chb/x86/opcodes/X86RotateLeftCF.py | kestreltechnology/CodeHawk-Binary | aa0b2534e0318e5fb3770ec7b4d78feb0feb2394 | [
"MIT"
] | null | null | null | chb/x86/opcodes/X86RotateLeftCF.py | kestreltechnology/CodeHawk-Binary | aa0b2534e0318e5fb3770ec7b4d78feb0feb2394 | [
"MIT"
] | null | null | null | chb/x86/opcodes/X86RotateLeftCF.py | kestreltechnology/CodeHawk-Binary | aa0b2534e0318e5fb3770ec7b4d78feb0feb2394 | [
"MIT"
] | null | null | null | # ------------------------------------------------------------------------------
# CodeHawk Binary Analyzer
# Author: Henny Sipma
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016-2020 Kestrel Technology LLC
# Copyright (c) 2020 Henny Sipma
# Copyright (c) 2021 Aarno Labs LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
from typing import cast, List, Sequence, TYPE_CHECKING
from chb.app.InstrXData import InstrXData
from chb.invariants.XVariable import XVariable
from chb.invariants.XXpr import XXpr
import chb.simulation.SimUtil as SU
import chb.simulation.SimValue as SV
import chb.util.fileutil as UF
from chb.util.IndexedTable import IndexedTableValue
from chb.x86.X86DictionaryRecord import x86registry
from chb.x86.X86Opcode import X86Opcode
from chb.x86.X86Operand import X86Operand
if TYPE_CHECKING:
from chb.x86.X86Dictionary import X86Dictionary
from chb.x86.simulation.X86SimulationState import X86SimulationState
| 40.130435 | 80 | 0.629005 |
6674ff922f4c82dfa03dc7390843f76b68565580 | 283 | py | Python | error_handlers/access_token.py | Egor2005l/cho | c7cb165394089b277be5c306edde0b8fb42e466d | [
"MIT"
] | null | null | null | error_handlers/access_token.py | Egor2005l/cho | c7cb165394089b277be5c306edde0b8fb42e466d | [
"MIT"
] | null | null | null | error_handlers/access_token.py | Egor2005l/cho | c7cb165394089b277be5c306edde0b8fb42e466d | [
"MIT"
] | null | null | null | from asyncio import sleep
from vkbottle.exceptions import VKError
from vkbottle.framework.blueprint.user import Blueprint
user = Blueprint(
name='access_token_error_blueprint'
)
| 20.214286 | 56 | 0.756184 |
667689203557923536a76893ffda9eef2e58e85a | 2,135 | py | Python | test_challenges.py | UPstartDeveloper/Graph-Applications | 45a3fa83f9e3fff243be35dd169edfcfd020f1a1 | [
"MIT"
] | null | null | null | test_challenges.py | UPstartDeveloper/Graph-Applications | 45a3fa83f9e3fff243be35dd169edfcfd020f1a1 | [
"MIT"
] | null | null | null | test_challenges.py | UPstartDeveloper/Graph-Applications | 45a3fa83f9e3fff243be35dd169edfcfd020f1a1 | [
"MIT"
] | null | null | null | import challenges
import unittest
if __name__ == '__main__':
unittest.main() | 26.6875 | 79 | 0.516628 |
66769c379769d62d8db4f6ca3c7ed84d674f3460 | 1,293 | py | Python | 2020-08-month-long-challenge/day06.py | jkbockstael/leetcode | 8ef5c907fb153c37dc97f6524493ceca2044ea38 | [
"Unlicense"
] | null | null | null | 2020-08-month-long-challenge/day06.py | jkbockstael/leetcode | 8ef5c907fb153c37dc97f6524493ceca2044ea38 | [
"Unlicense"
] | null | null | null | 2020-08-month-long-challenge/day06.py | jkbockstael/leetcode | 8ef5c907fb153c37dc97f6524493ceca2044ea38 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
# Day 6: Find All Duplicates in an Array
#
# Given an array of integers, 1 a[i] n (n = size of array), some elements
# appear twice and others appear once.
# Find all the elements that appear twice in this array.
# Could you do it without extra space and in O(n) runtime?
# Test
assert Solution().findDuplicates([4,3,2,7,8,2,3,1]) == [2,3]
| 40.40625 | 79 | 0.608662 |
66776ed63d7e38eb38a9559cc44798e48137c63c | 10,519 | py | Python | napari/_vispy/experimental/tiled_image_visual.py | harripj/napari | 7a284b1efeb14b1f812f0d98c608f70f0dd66ad2 | [
"BSD-3-Clause"
] | null | null | null | napari/_vispy/experimental/tiled_image_visual.py | harripj/napari | 7a284b1efeb14b1f812f0d98c608f70f0dd66ad2 | [
"BSD-3-Clause"
] | null | null | null | napari/_vispy/experimental/tiled_image_visual.py | harripj/napari | 7a284b1efeb14b1f812f0d98c608f70f0dd66ad2 | [
"BSD-3-Clause"
] | null | null | null | """TiledImageVisual class
A visual that draws tiles using a texture atlas.
"""
from typing import List, Set
import numpy as np
from ...layers.image.experimental.octree_util import OctreeChunk
from ..vendored import ImageVisual
from ..vendored.image import _build_color_transform
from .texture_atlas import TextureAtlas2D
from .tile_set import TileSet
# Shape of she whole texture in tiles. Hardcode for now.
SHAPE_IN_TILES = (16, 16)
def add_one_tile(self, octree_chunk: OctreeChunk) -> None:
"""Add one tile to the tiled image.
Parameters
----------
octree_chunk : OctreeChunk
The data for the tile we are adding.
Return
------
int
The tile's index.
"""
atlas_tile = self._texture_atlas.add_tile(octree_chunk)
if atlas_tile is None:
return # No slot available in the atlas.
self._tiles.add(octree_chunk, atlas_tile)
self._need_vertex_update = True
def remove_tile(self, tile_index: int) -> None:
"""Remove one tile from the image.
Parameters
----------
tile_index : int
The tile to remove.
"""
try:
self._tiles.remove(tile_index)
self._texture_atlas.remove_tile(tile_index)
self._need_vertex_update = True
except IndexError:
raise RuntimeError(f"Tile index {tile_index} not found.")
def prune_tiles(self, visible_set: Set[OctreeChunk]) -> None:
"""Remove tiles that are not part of the given visible set.
visible_set : Set[OctreeChunk]
The set of currently visible chunks.
"""
for tile_data in list(self._tiles.tile_data):
if tile_data.octree_chunk.key not in visible_set:
tile_index = tile_data.atlas_tile.index
self.remove_tile(tile_index)
def _build_vertex_data(self) -> None:
"""Build vertex and texture coordinate buffers.
This overrides ImageVisual._build_vertex_data(), it is called from
our _prepare_draw().
This is the heart of tiled rendering. Instead of drawing one quad
with one texture, we draw one quad per tile. And for each quad its
texture coordinates will pull from the right slot in the atlas.
So as the card draws the tiles, where it's sampling from the
texture will hop around in the atlas texture.
"""
if len(self._tiles) == 0:
return # Nothing to draw.
verts = np.zeros((0, 2), dtype=np.float32)
tex_coords = np.zeros((0, 2), dtype=np.float32)
# TODO_OCTREE: We can probably avoid vstack here if clever,
# maybe one one vertex buffer sized according to the max
# number of tiles we expect. But grow if needed.
for tile_data in self._tiles.tile_data:
tile = tile_data.atlas_tile
verts = np.vstack((verts, tile.verts))
tex_coords = np.vstack((tex_coords, tile.tex_coords))
# Set the base ImageVisual _subdiv_ buffers
self._subdiv_position.set_data(verts)
self._subdiv_texcoord.set_data(tex_coords)
self._need_vertex_update = False
def _build_texture(self) -> None:
"""Override of ImageVisual._build_texture().
TODO_OCTREE: This needs work. Need to do the clim stuff in in the
base ImageVisual._build_texture but do it for each tile?
"""
self._clim = np.array([0, 1])
self._texture_limits = np.array([0, 1]) # hardcode
self._need_colortransform_update = True
self._need_texture_upload = False
def _prepare_draw(self, view) -> None:
"""Override of ImageVisual._prepare_draw()
TODO_OCTREE: See how much this changes from base class, if we can
avoid too much duplication. Or factor out some common methods.
"""
if self._need_interpolation_update:
# Call the base ImageVisual._build_interpolation()
self._build_interpolation()
# But override to use our texture atlas.
self._data_lookup_fn['texture'] = self._texture_atlas
# We call our own _build_texture
if self._need_texture_upload:
self._build_texture()
# TODO_OCTREE: how does colortransform change for tiled?
if self._need_colortransform_update:
prg = view.view_program
grayscale = len(self.tile_shape) == 2 or self.tile_shape[2] == 1
self.shared_program.frag[
'color_transform'
] = _build_color_transform(
grayscale, self.clim_normalized, self.gamma, self.cmap
)
self._need_colortransform_update = False
prg['texture2D_LUT'] = (
self.cmap.texture_lut()
if (hasattr(self.cmap, 'texture_lut'))
else None
)
# We call our own _build_vertex_data()
if self._need_vertex_update:
self._build_vertex_data()
# Call the normal ImageVisual._update_method() unchanged.
if view._need_method_update:
self._update_method(view)
| 35.537162 | 79 | 0.64027 |
6683c0d1956dae22490efd4a21cbb16c9e118a7c | 339 | py | Python | tf_prac.py | akapoorx00/machinelearning-stuff | 53184019b77d3387fd15b13d3bfa75529b8ed003 | [
"Apache-2.0"
] | null | null | null | tf_prac.py | akapoorx00/machinelearning-stuff | 53184019b77d3387fd15b13d3bfa75529b8ed003 | [
"Apache-2.0"
] | null | null | null | tf_prac.py | akapoorx00/machinelearning-stuff | 53184019b77d3387fd15b13d3bfa75529b8ed003 | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
x = tf.constant(35, name='x')
print(x)
y = tf.Variable(x+5, name='y')
with tf.Session() as session:
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter("output", session.graph)
model = tf.global_variables_initializer()
session.run(model)
print (session.run(y))
writer.close()
| 21.1875 | 59 | 0.672566 |
6683d7523bb35e6eea7af58dcc94e299c8b5221f | 523 | py | Python | patterns/adapter/app.py | mattskone/head-first-design-patterns | 3f0d3a5c39475b418f09e2c45505f88fa673dd41 | [
"MIT"
] | null | null | null | patterns/adapter/app.py | mattskone/head-first-design-patterns | 3f0d3a5c39475b418f09e2c45505f88fa673dd41 | [
"MIT"
] | 1 | 2015-01-13T17:19:19.000Z | 2015-03-11T16:02:28.000Z | patterns/adapter/app.py | mattskone/head-first-design-patterns | 3f0d3a5c39475b418f09e2c45505f88fa673dd41 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from implementations import MallardDuck, WildTurkey, TurkeyAdapter
if __name__ == '__main__':
d = MallardDuck()
print '\nThe Duck says...'
d.quack()
d.fly()
t = WildTurkey()
print '\nThe Turkey says...'
t.gobble()
t.fly()
# Now we use the adapter to show how a Turkey can be made to
# behave like a Duck (expose the same methods, and fly the same
# distance):
td = TurkeyAdapter(t)
print '\nThe TurkeyAdapter says...'
td.quack()
td.fly() | 23.772727 | 67 | 0.625239 |
66849fe8ffb1c558532c4307c57805110b8abc4c | 134 | py | Python | app/config/task.py | atulmishra-one/dairy_management_portal | a07320dc0f4419d4c78f7d2453c63b1c9544aba8 | [
"MIT"
] | 2 | 2020-08-02T10:06:19.000Z | 2022-03-29T06:10:57.000Z | app/config/task.py | atulmishra-one/dairy_management_portal | a07320dc0f4419d4c78f7d2453c63b1c9544aba8 | [
"MIT"
] | null | null | null | app/config/task.py | atulmishra-one/dairy_management_portal | a07320dc0f4419d4c78f7d2453c63b1c9544aba8 | [
"MIT"
] | 2 | 2019-02-03T15:44:02.000Z | 2021-03-09T07:30:28.000Z | CELERY_BROKER_URL = 'redis://localhost:6379/0'
CELERY_RESULT_BACKEND = 'redis://localhost:6379/0'
CELERY_IMPORTS=('app.users.tasks') | 26.8 | 50 | 0.768657 |
6684d6354c57bdba0d562fbf5c959a7bb01edb22 | 5,697 | py | Python | GCR.py | goodot/character-recognition | 71cd3664670ec2d672d344e8b1842ce3c3ff47d5 | [
"Apache-2.0"
] | 1 | 2019-04-25T10:34:21.000Z | 2019-04-25T10:34:21.000Z | GCR.py | goodot/character-recognition | 71cd3664670ec2d672d344e8b1842ce3c3ff47d5 | [
"Apache-2.0"
] | null | null | null | GCR.py | goodot/character-recognition | 71cd3664670ec2d672d344e8b1842ce3c3ff47d5 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from PIL import Image
from numpy import array
import sqlite3
import tkMessageBox
import matplotlib.pyplot as plt
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure.modules import SoftmaxLayer
from pybrain.structure.modules import TanhLayer
from pybrain.structure.modules import SigmoidLayer
# global db, x, dimage, image,alphabet
alphabet = {0: 'a', 1: 'b', 2: 'c', 3: 'd', 4: 'e', 5: 'f', 6: 'g', 7: 'h', 8: 'i', 9: 'j',
10: 'k', 11: 'l', 12: 'm', 13: 'n', 14: 'o', 15: 'p', 16: 'q', 17: 'r', 18: 's',
19: 't', 20: 'u', 21: 'v', 22: 'w', 23: 'x', 24: 'y', 25: 'z'}
def getcharkey(char):
for key, ch in alphabet.iteritems():
if ch.decode('utf-8') == char:
return key
init()
| 22.429134 | 110 | 0.566438 |
6686b772848e4502d8bad3bd405870762f442216 | 2,966 | py | Python | grano/logic/projects.py | ANCIR/grano | cee2ec1974df5df2bc6ed5e214f6bd5d201397a4 | [
"MIT"
] | 30 | 2018-08-23T15:42:17.000Z | 2021-11-16T13:11:36.000Z | grano/logic/projects.py | ANCIR/grano | cee2ec1974df5df2bc6ed5e214f6bd5d201397a4 | [
"MIT"
] | null | null | null | grano/logic/projects.py | ANCIR/grano | cee2ec1974df5df2bc6ed5e214f6bd5d201397a4 | [
"MIT"
] | 5 | 2019-05-30T11:36:53.000Z | 2021-08-11T16:17:14.000Z | import colander
from datetime import datetime
from grano.core import app, db, celery
from grano.logic.validation import database_name
from grano.logic.references import AccountRef
from grano.plugins import notify_plugins
from grano.model import Project
def save(data, project=None):
""" Create or update a project with a given slug. """
data = validate(data, project)
operation = 'create' if project is None else 'update'
if project is None:
project = Project()
project.slug = data.get('slug')
project.author = data.get('author')
from grano.logic import permissions as permissions_logic
permissions_logic.save({
'account': data.get('author'),
'project': project,
'admin': True
})
project.settings = data.get('settings')
project.label = data.get('label')
project.private = data.get('private')
project.updated_at = datetime.utcnow()
db.session.add(project)
# TODO: make this nicer - separate files?
from grano.logic.schemata import import_schema
with app.open_resource('fixtures/base.yaml') as fh:
import_schema(project, fh)
db.session.flush()
_project_changed(project.slug, operation)
return project
def delete(project):
""" Delete the project and all related data. """
_project_changed(project.slug, 'delete')
db.session.delete(project)
def truncate(project):
""" Delete all entities and relations from this project,
but leave the project, schemata and attributes intact. """
from grano.logic import relations
from grano.logic import entities
project.updated_at = datetime.utcnow()
for relation in project.relations:
relations.delete(relation)
for entity in project.entities:
entities.delete(entity)
| 31.892473 | 76 | 0.650371 |
6686c68bcf9dc01f99b52c42230df5b834e570c1 | 63 | py | Python | code/yahoo_procon2019_qual_a_02.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | 3 | 2019-08-16T16:55:48.000Z | 2021-04-11T10:21:40.000Z | code/yahoo_procon2019_qual_a_02.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | null | null | null | code/yahoo_procon2019_qual_a_02.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | null | null | null | N,K=map(int,input().split())
print("YES" if N>=2*K-1 else "NO") | 31.5 | 34 | 0.603175 |
668a69950d894c5be476b21543db749add8b52d5 | 180 | py | Python | allauth/socialaccount/providers/pivotaltracker/urls.py | rawjam/django-allauth | 2daa33178aa1ab749581c494f4c39e1c72ad5c7b | [
"MIT"
] | null | null | null | allauth/socialaccount/providers/pivotaltracker/urls.py | rawjam/django-allauth | 2daa33178aa1ab749581c494f4c39e1c72ad5c7b | [
"MIT"
] | null | null | null | allauth/socialaccount/providers/pivotaltracker/urls.py | rawjam/django-allauth | 2daa33178aa1ab749581c494f4c39e1c72ad5c7b | [
"MIT"
] | null | null | null | from allauth.socialaccount.providers.oauth2.urls import default_urlpatterns
from provider import PivotalTrackerProvider
urlpatterns = default_urlpatterns(PivotalTrackerProvider)
| 30 | 75 | 0.888889 |
668cea27bdbc4f6209d2380260dbf5312ca4bad1 | 2,944 | py | Python | Dorta/sales_modification/wizard/sale_order_popup.py | aaparicio87/Odoo12 | 25cfc349b2e85fa1b5f5846ffe693029f77b3b7d | [
"MIT"
] | null | null | null | Dorta/sales_modification/wizard/sale_order_popup.py | aaparicio87/Odoo12 | 25cfc349b2e85fa1b5f5846ffe693029f77b3b7d | [
"MIT"
] | null | null | null | Dorta/sales_modification/wizard/sale_order_popup.py | aaparicio87/Odoo12 | 25cfc349b2e85fa1b5f5846ffe693029f77b3b7d | [
"MIT"
] | null | null | null | from odoo import fields, models, api, _
from odoo.exceptions import UserError
| 42.057143 | 116 | 0.567935 |
668da6a3dfe98b38ca927b8c9945a7980761c6b8 | 830 | py | Python | tyson-py/udp-echo.py | asheraryam/tyson | 44317a4e3367ef4958c3bb8d3ad538a3908a4566 | [
"MIT"
] | null | null | null | tyson-py/udp-echo.py | asheraryam/tyson | 44317a4e3367ef4958c3bb8d3ad538a3908a4566 | [
"MIT"
] | null | null | null | tyson-py/udp-echo.py | asheraryam/tyson | 44317a4e3367ef4958c3bb8d3ad538a3908a4566 | [
"MIT"
] | null | null | null | """UDP hole punching server."""
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor
import sys
DEFAULT_PORT = 4000
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: ./server.py PORT")
port = DEFAULT_PORT
# sys.exit(1)
else:
port = int(sys.argv[1])
reactor.listenUDP(port, ServerProtocol())
print('Listening on *:%d' % (port))
reactor.run() | 28.62069 | 73 | 0.631325 |
668e417b3a6306ecd6bbd0fcf013eefd855c3921 | 12,972 | py | Python | src/fhir_types/FHIR_StructureMap_Source.py | anthem-ai/fhir-types | 42348655fb3a9b3f131b911d6bc0782da8c14ce4 | [
"Apache-2.0"
] | 2 | 2022-02-03T00:51:30.000Z | 2022-02-03T18:42:43.000Z | src/fhir_types/FHIR_StructureMap_Source.py | anthem-ai/fhir-types | 42348655fb3a9b3f131b911d6bc0782da8c14ce4 | [
"Apache-2.0"
] | null | null | null | src/fhir_types/FHIR_StructureMap_Source.py | anthem-ai/fhir-types | 42348655fb3a9b3f131b911d6bc0782da8c14ce4 | [
"Apache-2.0"
] | null | null | null | from typing import Any, List, Literal, TypedDict
from .FHIR_Address import FHIR_Address
from .FHIR_Age import FHIR_Age
from .FHIR_Annotation import FHIR_Annotation
from .FHIR_Attachment import FHIR_Attachment
from .FHIR_CodeableConcept import FHIR_CodeableConcept
from .FHIR_Coding import FHIR_Coding
from .FHIR_ContactDetail import FHIR_ContactDetail
from .FHIR_ContactPoint import FHIR_ContactPoint
from .FHIR_Contributor import FHIR_Contributor
from .FHIR_Count import FHIR_Count
from .FHIR_DataRequirement import FHIR_DataRequirement
from .FHIR_Distance import FHIR_Distance
from .FHIR_Dosage import FHIR_Dosage
from .FHIR_Duration import FHIR_Duration
from .FHIR_Element import FHIR_Element
from .FHIR_Expression import FHIR_Expression
from .FHIR_HumanName import FHIR_HumanName
from .FHIR_id import FHIR_id
from .FHIR_Identifier import FHIR_Identifier
from .FHIR_integer import FHIR_integer
from .FHIR_Meta import FHIR_Meta
from .FHIR_Money import FHIR_Money
from .FHIR_ParameterDefinition import FHIR_ParameterDefinition
from .FHIR_Period import FHIR_Period
from .FHIR_Quantity import FHIR_Quantity
from .FHIR_Range import FHIR_Range
from .FHIR_Ratio import FHIR_Ratio
from .FHIR_Reference import FHIR_Reference
from .FHIR_RelatedArtifact import FHIR_RelatedArtifact
from .FHIR_SampledData import FHIR_SampledData
from .FHIR_Signature import FHIR_Signature
from .FHIR_string import FHIR_string
from .FHIR_Timing import FHIR_Timing
from .FHIR_TriggerDefinition import FHIR_TriggerDefinition
from .FHIR_UsageContext import FHIR_UsageContext
# A Map of relationships between 2 structures that can be used to transform data.
FHIR_StructureMap_Source = TypedDict(
"FHIR_StructureMap_Source",
{
# Unique id for the element within a resource (for internal references). This may be any string value that does not contain spaces.
"id": FHIR_string,
# May be used to represent additional information that is not part of the basic definition of the element. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension.
"extension": List[Any],
# May be used to represent additional information that is not part of the basic definition of the element and that modifies the understanding of the element in which it is contained and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions.Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself).
"modifierExtension": List[Any],
# Type or variable this rule applies to.
"context": FHIR_id,
# Extensions for context
"_context": FHIR_Element,
# Specified minimum cardinality for the element. This is optional; if present, it acts an implicit check on the input content.
"min": FHIR_integer,
# Extensions for min
"_min": FHIR_Element,
# Specified maximum cardinality for the element - a number or a "*". This is optional; if present, it acts an implicit check on the input content (* just serves as documentation; it's the default value).
"max": FHIR_string,
# Extensions for max
"_max": FHIR_Element,
# Specified type for the element. This works as a condition on the mapping - use for polymorphic elements.
"type": FHIR_string,
# Extensions for type
"_type": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueBase64Binary": str,
# Extensions for defaultValueBase64Binary
"_defaultValueBase64Binary": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueBoolean": bool,
# Extensions for defaultValueBoolean
"_defaultValueBoolean": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueCanonical": str,
# Extensions for defaultValueCanonical
"_defaultValueCanonical": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueCode": str,
# Extensions for defaultValueCode
"_defaultValueCode": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueDate": str,
# Extensions for defaultValueDate
"_defaultValueDate": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueDateTime": str,
# Extensions for defaultValueDateTime
"_defaultValueDateTime": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueDecimal": float,
# Extensions for defaultValueDecimal
"_defaultValueDecimal": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueId": str,
# Extensions for defaultValueId
"_defaultValueId": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueInstant": str,
# Extensions for defaultValueInstant
"_defaultValueInstant": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueInteger": float,
# Extensions for defaultValueInteger
"_defaultValueInteger": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueMarkdown": str,
# Extensions for defaultValueMarkdown
"_defaultValueMarkdown": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueOid": str,
# Extensions for defaultValueOid
"_defaultValueOid": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValuePositiveInt": float,
# Extensions for defaultValuePositiveInt
"_defaultValuePositiveInt": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueString": str,
# Extensions for defaultValueString
"_defaultValueString": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueTime": str,
# Extensions for defaultValueTime
"_defaultValueTime": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueUnsignedInt": float,
# Extensions for defaultValueUnsignedInt
"_defaultValueUnsignedInt": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueUri": str,
# Extensions for defaultValueUri
"_defaultValueUri": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueUrl": str,
# Extensions for defaultValueUrl
"_defaultValueUrl": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueUuid": str,
# Extensions for defaultValueUuid
"_defaultValueUuid": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueAddress": FHIR_Address,
# A value to use if there is no existing value in the source object.
"defaultValueAge": FHIR_Age,
# A value to use if there is no existing value in the source object.
"defaultValueAnnotation": FHIR_Annotation,
# A value to use if there is no existing value in the source object.
"defaultValueAttachment": FHIR_Attachment,
# A value to use if there is no existing value in the source object.
"defaultValueCodeableConcept": FHIR_CodeableConcept,
# A value to use if there is no existing value in the source object.
"defaultValueCoding": FHIR_Coding,
# A value to use if there is no existing value in the source object.
"defaultValueContactPoint": FHIR_ContactPoint,
# A value to use if there is no existing value in the source object.
"defaultValueCount": FHIR_Count,
# A value to use if there is no existing value in the source object.
"defaultValueDistance": FHIR_Distance,
# A value to use if there is no existing value in the source object.
"defaultValueDuration": FHIR_Duration,
# A value to use if there is no existing value in the source object.
"defaultValueHumanName": FHIR_HumanName,
# A value to use if there is no existing value in the source object.
"defaultValueIdentifier": FHIR_Identifier,
# A value to use if there is no existing value in the source object.
"defaultValueMoney": FHIR_Money,
# A value to use if there is no existing value in the source object.
"defaultValuePeriod": FHIR_Period,
# A value to use if there is no existing value in the source object.
"defaultValueQuantity": FHIR_Quantity,
# A value to use if there is no existing value in the source object.
"defaultValueRange": FHIR_Range,
# A value to use if there is no existing value in the source object.
"defaultValueRatio": FHIR_Ratio,
# A value to use if there is no existing value in the source object.
"defaultValueReference": FHIR_Reference,
# A value to use if there is no existing value in the source object.
"defaultValueSampledData": FHIR_SampledData,
# A value to use if there is no existing value in the source object.
"defaultValueSignature": FHIR_Signature,
# A value to use if there is no existing value in the source object.
"defaultValueTiming": FHIR_Timing,
# A value to use if there is no existing value in the source object.
"defaultValueContactDetail": FHIR_ContactDetail,
# A value to use if there is no existing value in the source object.
"defaultValueContributor": FHIR_Contributor,
# A value to use if there is no existing value in the source object.
"defaultValueDataRequirement": FHIR_DataRequirement,
# A value to use if there is no existing value in the source object.
"defaultValueExpression": FHIR_Expression,
# A value to use if there is no existing value in the source object.
"defaultValueParameterDefinition": FHIR_ParameterDefinition,
# A value to use if there is no existing value in the source object.
"defaultValueRelatedArtifact": FHIR_RelatedArtifact,
# A value to use if there is no existing value in the source object.
"defaultValueTriggerDefinition": FHIR_TriggerDefinition,
# A value to use if there is no existing value in the source object.
"defaultValueUsageContext": FHIR_UsageContext,
# A value to use if there is no existing value in the source object.
"defaultValueDosage": FHIR_Dosage,
# A value to use if there is no existing value in the source object.
"defaultValueMeta": FHIR_Meta,
# Optional field for this source.
"element": FHIR_string,
# Extensions for element
"_element": FHIR_Element,
# How to handle the list mode for this element.
"listMode": Literal["first", "not_first", "last", "not_last", "only_one"],
# Extensions for listMode
"_listMode": FHIR_Element,
# Named context for field, if a field is specified.
"variable": FHIR_id,
# Extensions for variable
"_variable": FHIR_Element,
# FHIRPath expression - must be true or the rule does not apply.
"condition": FHIR_string,
# Extensions for condition
"_condition": FHIR_Element,
# FHIRPath expression - must be true or the mapping engine throws an error instead of completing.
"check": FHIR_string,
# Extensions for check
"_check": FHIR_Element,
# A FHIRPath expression which specifies a message to put in the transform log when content matching the source rule is found.
"logMessage": FHIR_string,
# Extensions for logMessage
"_logMessage": FHIR_Element,
},
total=False,
)
| 56.4 | 836 | 0.712458 |
668f3e390bdd48e5a8dc955598a92ec70a35392d | 2,484 | py | Python | ip/ip/ecommerce/views.py | SuryaVamsiKrishna/Inner-Pieces | deb9e83af891dac58966230446a5a32fe10e86f2 | [
"MIT"
] | 1 | 2021-02-17T06:06:50.000Z | 2021-02-17T06:06:50.000Z | ip/ip/ecommerce/views.py | SuryaVamsiKrishna/Inner-Pieces | deb9e83af891dac58966230446a5a32fe10e86f2 | [
"MIT"
] | null | null | null | ip/ip/ecommerce/views.py | SuryaVamsiKrishna/Inner-Pieces | deb9e83af891dac58966230446a5a32fe10e86f2 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from .models import *
from .forms import address_form
from django.http import JsonResponse
from .utils import cartData,guestobj
import json,datetime
| 28.883721 | 84 | 0.67029 |
6690a37ed9d0e2c4e7eeabdedc6f1bdca84bc1a4 | 2,899 | py | Python | ecogdata/expconfig/config_decode.py | miketrumpis/ecogdata | ff65820198e69608634c12686a86b97ac3a77558 | [
"BSD-3-Clause"
] | null | null | null | ecogdata/expconfig/config_decode.py | miketrumpis/ecogdata | ff65820198e69608634c12686a86b97ac3a77558 | [
"BSD-3-Clause"
] | null | null | null | ecogdata/expconfig/config_decode.py | miketrumpis/ecogdata | ff65820198e69608634c12686a86b97ac3a77558 | [
"BSD-3-Clause"
] | null | null | null | import os
from ecogdata.util import Bunch
__all__ = ['Parameter', 'TypedParam', 'BoolOrNum', 'NSequence', 'NoneOrStr', 'Path', 'parse_param',
'uniform_bunch_case']
def parse_param(name, command, table):
p = table.get(name.lower(), Parameter)(command)
return p.value()
def uniform_bunch_case(b):
b_lower = Bunch()
for k, v in b.items():
if isinstance(k, str):
b_lower[k.lower()] = v
else:
b_lower[k] = v
return b_lower
| 25.883929 | 99 | 0.58089 |
66941e3ed65b1efe5312473285b552d665a56ecc | 29,897 | py | Python | lpjguesstools/lgt_createinput/main.py | lukasbaumbach/lpjguesstools | f7cc14c2931b4ac9a3b8dddc89c469b8fedd42e3 | [
"BSD-3-Clause"
] | 2 | 2020-08-03T11:33:00.000Z | 2021-07-05T21:00:46.000Z | lpjguesstools/lgt_createinput/main.py | lukasbaumbach/lpjguesstools | f7cc14c2931b4ac9a3b8dddc89c469b8fedd42e3 | [
"BSD-3-Clause"
] | 8 | 2020-08-03T12:45:31.000Z | 2021-02-23T19:51:32.000Z | lpjguesstools/lgt_createinput/main.py | lukasbaumbach/lpjguesstools | f7cc14c2931b4ac9a3b8dddc89c469b8fedd42e3 | [
"BSD-3-Clause"
] | 2 | 2020-08-03T12:11:43.000Z | 2022-01-29T10:59:00.000Z | """FILE lgt_createinput.main.py
This script creates condensed LPJ netcdf files
for landforms and soil properties
landforms.nc:
- lfcnt (landid) number of landforms in cell
- frac (landid, lfid/ standid) area fraction this landform represents
- slope (landid, lfid/ standid)
- elevation (landid, lfid/ standid) avg. elevation in this landform
- soildepth (landid, lfid/ standid) [implemented later const in model for now]
sites.nc:
- soildepth
- clay
- silt
- sand
- totc
- elevation (reference elevation for grid, 0.5deg)
Christian Werner, SENCKENBERG Biodiversity and Climate Research Centre (BiK-F)
email: christian.werner@senkenberg.de
2017/02/07
"""
from collections import OrderedDict
import datetime
import glob
import logging
import math
import numpy as np
import os
import pandas as pd
import string
import time
import xarray as xr
from ._geoprocessing import analyze_filename_dem, \
classify_aspect, \
classify_landform, \
calculate_asp_slope, \
compute_spatial_dataset
from ._srtm1 import split_srtm1_dataset
__version__ = "0.0.2"
log = logging.getLogger(__name__)
# import constants
from . import NODATA
from . import ENCODING
# quick helpers
# TODO: move to a dedicated file later
def time_dec(func):
"""A decorator to measure execution time of function"""
return wrapper
varSoil = {'TOTC': ('soc', 'Soil Organic Carbon', 'soc', 'percent', 0.1),
'SDTO': ('sand', 'Sand', 'sand', 'percent', 1.0),
'STPC': ('silt', 'Silt', 'silt', 'percent', 1.0),
'CLPC': ('clay', 'Clay', 'clay', 'percent', 1.0)}
varLF = {'lfcnt': ('lfcnt', 'Number of landforms', 'lfcnt', '-', 1.0),
'slope': ('slope', 'Slope', 'slope', 'deg', 1.0),
'aspect': ('aspect', 'Aspect', 'aspect', 'deg', 1.0),
'asp_slope': ('asp_slope', 'Aspect-corrected Slope', 'asp_slope', 'deg', 1.0),
'fraction': ('fraction', 'Landform Fraction', 'fraction', '1/1', 1.0),
'elevation': ('elevation', 'Elevation', 'elevation', 'm', 1.0),
'soildepth': ('soildepth', 'Soil Depth', 'soildepth', 'm', 1.0)
}
soil_vars = sorted(varSoil.keys())
lf_vars = sorted(varLF.keys())
def convert_float_coord_to_string(coord, p=2):
"""Convert a (lon,lat) coord to string."""
lon, lat = round(coord[0], p), round(coord[1], p)
LA, LO = 'n', 'e'
if lat < 0: LA = 's'
if lon < 0: LO = 'w'
lat_s = "%.2f" % round(abs(lat),2)
lon_s = "%.2f" % round(abs(lon),2)
coord_s = '%s%s%s%s' % (LA, lat_s.zfill(p+3), LO, lon_s.zfill(p+4))
return coord_s
def has_significant_land(ds, min_frac=0.01):
"""Test if land fraction in tile is significant."""
# min_frac in %, default: 0.001 %
if (ds['mask'].values.sum() / float(len(ds.lat.values) * len(ds.lon.values))) * 100 > min_frac:
return True
return False
def define_landform_classes(step, limit, TYPE='SIMPLE'):
"""Define the landform classes."""
# Parameters:
# - step: elevation interval for landform groups (def: 400m )
# - limit: elevation limit [inclusive, in m]
ele_breaks = [-1000] + list(range(step, limit, step)) + [10000]
ele_cnt = range(1, len(ele_breaks))
# code system [code position 2 & 3, 1= elevations_tep]
# code: [slopeid<1..6>][aspectid<0,1..4>]
#
# slope:
#
# Name SIMPLE WEISS
#
# hilltop 1 1
# upper slope 2*
# mid slope 3* 3*
# flats 4 4
# lower slope 5*
# valley 6 6
#
#
# aspect:
#
# Name SIMPLE WEISS
#
# north 1 1
# east 2 2
# south 3 3
# west 4 4
if TYPE == 'WEISS':
lf_set = [10,21,22,23,24,31,32,33,34,40,51,52,53,54,60]
lf_full_set = []
for e in ele_cnt:
lf_full_set += [x+(100*e) for x in lf_set]
elif TYPE == 'SIMPLE':
# TYPE: SIMPLE (1:hilltop, 3:midslope, 4:flat, 6:valley)
lf_set = [10,31,32,33,34,40,60]
lf_full_set = []
for e in ele_cnt:
lf_full_set += [x+(100*e) for x in lf_set]
else:
log.error('Currently only classifiation schemes WEISS, SIMPLE supported.')
return (lf_full_set, ele_breaks)
def tiles_already_processed(TILESTORE_PATH):
"""Check if the tile exists."""
existing_tiles = glob.glob(os.path.join(TILESTORE_PATH, '*.nc'))
#existing_tiles = [os.path.basename(x) for x in glob.glob(glob_string)]
processed_tiles = []
for existing_tile in existing_tiles:
with xr.open_dataset(existing_tile) as ds:
source = ds.tile.get('source')
if source is not None:
processed_tiles.append(source)
else:
log.warn('Source attr not set in file %s.' % existing_tile)
return processed_tiles
def match_watermask_shpfile(glob_string):
"""Check if the generated shp glob_string exists."""
found=False
if len(glob.glob(glob_string)) == 0:
shp = None
elif len(glob.glob(glob_string)) == 1:
shp = glob.glob(glob_string)[0]
found = True
else:
log.error("Too many shape files.")
exit()
# second try: look for zip file
if found is False:
shp = glob_string.replace(".shp", ".zip")
if len(glob.glob(shp)) == 0:
shp = None
elif len(glob.glob(shp)) == 1:
shp = glob.glob(shp)[0]
else:
log.error("Too many shape files.")
exit()
return shp
def get_tile_summary(ds, cutoff=0):
"""Compute the fractional cover of the landforms in this tile."""
unique, counts = np.unique(ds['landform_class'].to_masked_array(), return_counts=True)
counts = np.ma.masked_array(counts, mask=unique.mask)
unique = np.ma.compressed(unique)
counts = np.ma.compressed(counts)
total_valid = float(np.sum(counts))
df = pd.DataFrame({'lf_id': unique.astype('int'), 'cells': counts})
df['frac'] = (df['cells'] / df['cells'].sum())*100
df = df[df['frac'] >= cutoff]
df['frac_scaled'] = (df['cells'] / df['cells'].sum())*100
# also get lf-avg of elevation and slope
df['elevation'] = -1
df['slope'] = -1
df['asp_slope'] = -1
df['aspect'] = -1
if 'soildepth' in ds.data_vars:
df['soildepth'] = -1
a_lf = ds['landform_class'].to_masked_array()
# average aspect angles
# calculate the avg. elevation and slope in landforms
for i, r in df.iterrows():
ix = a_lf == int(r['lf_id'])
lf_slope = ds['slope'].values[ix].mean()
lf_asp_slope = ds['asp_slope'].values[ix].mean()
lf_elevation = ds['elevation'].values[ix].mean()
lf_aspect = avg_aspect(ds['aspect'].values[ix])
if 'soildepth' in ds.data_vars:
lf_soildepth = ds['soildepth'].values[ix].mean()
df.loc[i, 'soildepth'] = lf_soildepth
df.loc[i, 'slope'] = lf_slope
df.loc[i, 'asp_slope'] = lf_asp_slope
df.loc[i, 'elevation'] = lf_elevation
df.loc[i, 'aspect'] = lf_aspect
if 'soildepth' in ds.data_vars:
df.loc[i, 'soildepth'] = lf_soildepth
return df
def tile_files_compatible(files):
"""Get global attribute from all tile netcdf files and check
they were created with an identical elevation step.
"""
fingerprints = []
for file in files:
with xr.open_dataset(file) as ds:
fingerprint = (ds.tile.get('elevation_step'), ds.tile.get('classification'))
fingerprints.append(fingerprint)
# check if elements are equal
if all(x==fingerprints[0] for x in fingerprints):
# check if there are Nones' in any fingerprint
if not all(fingerprints):
return False
return True
return False
def create_stats_table(df, var):
"""Create a landform info table for all coords and given var."""
df_ = df[var].unstack(level=-1, fill_value=NODATA)
# rename columns and split coord tuple col to lon and lat col
df_.columns = ['lf' + str(col) for col in df_.columns]
if 'lf0' in df_.columns:
del df_['lf0']
df_ = df_.reset_index()
df_[['lon', 'lat', 'lf_cnt']] = df_['coord'].apply(pd.Series)
df_['lf_cnt'] = df_['lf_cnt'].astype(int)
# cleanup (move lon, lat to front, drop coord col)
df_.drop('coord', axis=1, inplace=True)
latloncnt_cols = ['lon', 'lat', 'lf_cnt']
new_col_order = latloncnt_cols + \
[x for x in df_.columns.tolist() if x not in latloncnt_cols]
return df_[new_col_order]
def is_3d(ds, v):
"""Check if xr.DataArray has 3 dimensions."""
dims = ds[v].dims
if len(dims) == 3:
return True
return False
def assign_to_dataarray(data, df, lf_full_set, refdata=False):
"""Place value into correct location of data array."""
if refdata==True:
data[:] = NODATA
else:
data[:] = np.nan
for _, r in df.iterrows():
if refdata:
data.loc[r.lat, r.lon] = r.lf_cnt
else:
for lf in r.index[3:]:
if r[lf] > NODATA:
lf_id = int(lf[2:])
lf_pos = lf_full_set.index(lf_id)
data.loc[dict(lf_id=lf_id, lat=r.lat, lon=r.lon)] = r[lf]
return data
def spatialclip_df(df, extent):
"""Clip dataframe wit lat lon columns by extent."""
if any(e is None for e in extent):
log.warn("SpatialClip: extent passed is None.")
lon1, lat1, lon2, lat2 = extent
if ('lon' not in df.columns) or ('lat' not in df.columns):
log.warn("SpatialClip: lat/ lon cloumn missing in df.")
return df[((df.lon >= lon1) & (df.lon <= lon2)) &
((df.lat >= lat1) & (df.lat <= lat2))]
def build_site_netcdf(soilref, elevref, extent=None):
"""Build the site netcdf file."""
# extent: (x1, y1, x2, y2)
ds_soil_orig = xr.open_dataset(soilref)
ds_ele_orig = xr.open_dataset(elevref)
if extent is not None:
lat_min, lat_max = extent[1], extent[3]
lon_min, lon_max = extent[0], extent[2]
# slice simulation domain
ds_soil = ds_soil_orig.where((ds_soil_orig.lon >= lon_min) & (ds_soil_orig.lon <= lon_max) &
(ds_soil_orig.lat >= lat_min) & (ds_soil_orig.lat <= lat_max) &
(ds_soil_orig.lev==1.0), drop=True).squeeze(drop=True)
ds_ele = ds_ele_orig.where((ds_ele_orig.longitude >= lon_min) & (ds_ele_orig.longitude <= lon_max) &
(ds_ele_orig.latitude >= lat_min) & (ds_ele_orig.latitude <= lat_max), drop=True).squeeze(drop=True)
else:
ds_soil = ds_soil_orig.sel(lev=1.0).squeeze(drop=True)
ds_ele = ds_ele_orig.squeeze(drop=True)
del ds_soil['lev']
# identify locations that need filling and use left neighbor
smask = np.where(ds_soil['TOTC'].to_masked_array().mask, 1, 0)
emask = np.where(ds_ele['data'].to_masked_array().mask, 1, 0)
# no soil data but elevation: gap-fill wioth neighbors
missing = np.where((smask == 1) & (emask == 0), 1, 0)
ix, jx = np.where(missing == 1)
if len(ix) > 0:
log.debug('Cells with elevation but no soil data [BEFORE GF: %d].' % len(ix))
for i, j in zip(ix, jx):
for v in soil_vars:
if (j > 0) and np.isfinite(ds_soil[v][i, j-1]):
ds_soil[v][i, j] = ds_soil[v][i, j-1].copy(deep=True)
elif (j < ds_soil[v].shape[1]-1) and np.isfinite(ds_soil[v][i, j+1]):
ds_soil[v][i, j] = ds_soil[v][i, j+1].copy(deep=True)
else:
log.warn('neighbours have nodata !')
x = ds_soil[v][i, j].to_masked_array()
smask2 = np.where(ds_soil['TOTC'].to_masked_array().mask, 1, 0)
missing = np.where((smask2 == 1) & (emask == 0), 1, 0)
ix, jx = np.where(missing == 1)
log.debug('Cells with elevation but no soil data [AFTER GF: %d].' % len(ix))
dsout = xr.Dataset()
# soil vars
for v in soil_vars:
conv = varSoil[v][-1]
da = ds_soil[v].copy(deep=True) * conv
da.name = varSoil[v][0]
vattr = {'name': varSoil[v][0],
'long_name': varSoil[v][1],
'standard_name': varSoil[v][2],
'units': varSoil[v][3],
'coordinates': "lat lon"}
da.tile.update_attrs(vattr)
da.tile.update_encoding(ENCODING)
da[:] = np.ma.masked_where(emask, da.to_masked_array())
dsout[da.name] = da
# ele var
da = xr.full_like(da.copy(deep=True), np.nan)
da.name = 'elevation'
vattr = {'name': 'elevation', 'long_name': 'Elevation',
'units': 'meters', 'standard_name': 'elevation'}
da.tile.update_attrs(vattr)
da.tile.update_encoding(ENCODING)
da[:] = ds_ele['data'].to_masked_array()
dsout[da.name] = da
return dsout
def build_compressed(ds):
"""Build LPJ-Guess 4.0 compatible compressed netcdf file."""
# identify landforms netcdf
if 'lfcnt' in ds.data_vars:
v = 'lfcnt'
elif 'elevation' in ds.data_vars:
v = 'elevation'
else:
log.error("Not a valid xr.Dataset (landforms or site only).")
# create id position dataarray
da_ids = xr.ones_like(ds[v]) * NODATA
latL = []
lonL = []
d = ds[v].to_masked_array()
# REVIEW: why is 'to_masked_array()'' not working here?
d = np.ma.masked_where(d == NODATA, d)
land_id = 0
D_ids = OrderedDict()
for j in reversed(range(len(d))):
for i in range(len(d[0])):
if d[j, i] is not np.ma.masked:
lat = float(ds['lat'][j].values)
lon = float(ds['lon'][i].values)
latL.append(lat)
lonL.append(lon)
da_ids.loc[lat, lon] = land_id
D_ids[(lat, lon)] = land_id
land_id += 1
LFIDS = range(land_id)
# create coordinate variables
_blank = np.zeros(len(LFIDS))
lats = xr.DataArray(latL, name='lat', coords=[('land_id', LFIDS)])
lons = xr.DataArray(lonL, name='lon', coords=[('land_id', LFIDS)])
lats.tile.update_attrs(dict(standard_name='latitude',
long_name='latitude',
units='degrees_north'))
lons.tile.update_attrs(dict(standard_name='longitude',
long_name='longitude',
units='degrees_east'))
# create land_id reference array
# TODO: clip land_id array to Chile country extent?
da_ids.tile.update_encoding(ENCODING)
ds_ids = da_ids.to_dataset(name='land_id')
# create xr.Dataset
dsout = xr.Dataset()
dsout[lats.name] = lats
dsout[lons.name] = lons
# walk through variables, get lat/ lon cells' data
for v in ds.data_vars:
if is_3d(ds, v):
_shape = (len(LFIDS), len(ds[ds[v].dims[0]]))
COORDS = [('land_id', LFIDS), ('lf_id', ds['lf_id'])]
else:
_shape = (len(LFIDS),)
COORDS = [('land_id', LFIDS)]
_blank = np.ones( _shape )
_da = xr.DataArray(_blank[:], name=v, coords=COORDS)
for lat, lon in zip(latL, lonL):
land_id = D_ids[(lat, lon)]
vals = ds[v].sel(lat=lat, lon=lon).to_masked_array()
_da.loc[land_id] = vals
_da.tile.update_attrs(ds[v].attrs)
_da.tile.update_encoding(ENCODING)
dsout[_da.name] = _da
if is_3d(ds, v):
dsout['lf_id'].tile.update_attrs(dict(standard_name='lf_id',
long_name='lf_id',
units='-'))
# copy lgt attributes from ssrc to dst
dsout.tile.copy_attrs(ds)
return (ds_ids, dsout)
def mask_dataset(ds, valid):
"""Mask all values that are not valid/ 1 (2d or 3d)."""
for v in ds.data_vars:
dims = ds[v].dims
if len(dims) > len(valid.shape):
z = len(ds[v].values)
valid = np.array(z*[valid])
ds[v].values = np.ma.masked_where(valid == 0, ds[v].values).filled(NODATA)
return ds
def create_gridlist(ds):
"""Create LPJ-Guess 4.0 gridlist file."""
outL = []
for j in reversed(range(len(ds['land_id']))):
for i in range(len(ds['land_id'][0])):
x = ds['land_id'][j, i].values #to_masked_array()
if x != NODATA: #p.ma.masked:
lat = float(ds['lat'][j].values)
lon = float(ds['lon'][i].values)
land_id = int(ds['land_id'].sel(lat=lat, lon=lon).values)
outS = "%3.2f %3.2f %d" % (lat, lon, land_id)
outL.append(outS)
return '\n'.join(outL) + '\n'
def main(cfg):
"""Main Script."""
# default soil and elevation data (contained in package)
import pkg_resources
SOIL_NC = pkg_resources.resource_filename(__name__, '../data/GLOBAL_WISESOIL_DOM_05deg.nc')
ELEVATION_NC = pkg_resources.resource_filename(__name__, '../data/GLOBAL_ELEVATION_05deg.nc')
log.info("Converting DEM files and computing landform stats")
# define the final landform classes (now with elevation brackets)
lf_classes, lf_ele_levels = define_landform_classes(200, 6000, TYPE=cfg.CLASSIFICATION)
# process dem files to tiles (if not already processed)
convert_dem_files(cfg, lf_ele_levels)
#sitenc = build_site_netcdf(SOIL_NC, ELEVATION_NC, extent=cfg.REGION)
# compute stats from tiles
df_frac, df_elev, df_slope, df_asp_slope, df_aspect = compute_statistics(cfg)
#print 'reading files'
#df_frac = pd.read_csv('lfdata.cutoff_1.0p/df_frac.csv')
#df_asp_slope = pd.read_csv('lfdata.cutoff_1.0p/df_asp_slope.csv')
#df_slope = pd.read_csv('lfdata.cutoff_1.0p/df_slope.csv')
#df_aspect = pd.read_csv('lfdata.cutoff_1.0p/df_aspect.csv')
#df_elev = pd.read_csv('lfdata.cutoff_1.0p/df_elev.csv')
# build netcdfs
log.info("Building 2D netCDF files")
sitenc = build_site_netcdf(SOIL_NC, ELEVATION_NC, extent=cfg.REGION)
df_dict = dict(frac_lf=df_frac, elev_lf=df_elev, slope_lf=df_slope,
asp_slope_lf=df_asp_slope, aspect_lf=df_aspect)
landformnc = build_landform_netcdf(lf_classes, df_dict, cfg, lf_ele_levels, refnc=sitenc)
# clip to joined mask
#elev_mask = np.where(sitenc['elevation'].values == NODATA, 0, 1)
#landform_mask = np.where(landformnc['lfcnt'].values == NODATA, 0, 1)
#valid_mask = elev_mask * landform_mask
elev_mask = ~np.ma.getmaskarray(sitenc['elevation'].to_masked_array())
sand_mask = ~np.ma.getmaskarray(sitenc['sand'].to_masked_array())
land_mask = ~np.ma.getmaskarray(landformnc['lfcnt'].to_masked_array())
valid_mask = elev_mask * sand_mask * land_mask
sitenc = mask_dataset(sitenc, valid_mask)
landformnc = mask_dataset(landformnc, valid_mask)
landform_mask = np.where(landformnc['lfcnt'].values == -9999, np.nan, 1)
#landform_mask = np.where(landform_mask == True, np.nan, 1)
for v in sitenc.data_vars:
sitenc[v][:] = sitenc[v].values * landform_mask
# write 2d/ 3d netcdf files
sitenc.to_netcdf(os.path.join(cfg.OUTDIR, 'sites_2d.nc'),
format='NETCDF4_CLASSIC')
landformnc.to_netcdf(os.path.join(cfg.OUTDIR, 'landforms_2d.nc'),
format='NETCDF4_CLASSIC')
# convert to compressed netcdf format
log.info("Building compressed format netCDF files")
ids_2d, comp_sitenc = build_compressed(sitenc)
ids_2db, comp_landformnc = build_compressed(landformnc)
# write netcdf files
ids_2d.to_netcdf(os.path.join(cfg.OUTDIR, "land_ids_2d.nc"),
format='NETCDF4_CLASSIC')
ids_2db.to_netcdf(os.path.join(cfg.OUTDIR, "land_ids_2db.nc"),
format='NETCDF4_CLASSIC')
comp_landformnc.to_netcdf(os.path.join(cfg.OUTDIR, "landform_data.nc"),
format='NETCDF4_CLASSIC')
comp_sitenc.to_netcdf(os.path.join(cfg.OUTDIR, "site_data.nc"),
format='NETCDF4_CLASSIC')
# gridlist file
log.info("Creating gridlist file")
gridlist = create_gridlist(ids_2d)
open(os.path.join(cfg.OUTDIR, cfg.GRIDLIST_TXT), 'w').write(gridlist)
log.info("Done")
| 36.282767 | 135 | 0.596247 |
66942000229050463aff5906c4c70265c74740a1 | 4,379 | py | Python | html_parsing/www_dns_shop_ru/check_update_price_date__QWebEnginePage_bs4.py | DazEB2/SimplePyScripts | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | [
"CC-BY-4.0"
] | 117 | 2015-12-18T07:18:27.000Z | 2022-03-28T00:25:54.000Z | html_parsing/www_dns_shop_ru/check_update_price_date__QWebEnginePage_bs4.py | DazEB2/SimplePyScripts | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | [
"CC-BY-4.0"
] | 8 | 2018-10-03T09:38:46.000Z | 2021-12-13T19:51:09.000Z | html_parsing/www_dns_shop_ru/check_update_price_date__QWebEnginePage_bs4.py | DazEB2/SimplePyScripts | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | [
"CC-BY-4.0"
] | 28 | 2016-08-02T17:43:47.000Z | 2022-03-21T08:31:12.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
""" http://www.dns-shop.ru/"""
# # http://stackoverflow.com/a/37755811/5909792
# def get_html(url, check_content_func=None):
# # from PyQt5.QtCore import QUrl
# # from PyQt5.QtWidgets import QApplication
# # from PyQt5.QtWebEngineWidgets import QWebEnginePage
#
# from PyQt4.QtCore import QUrl
# from PyQt4.QtGui import QApplication
# from PyQt4.QtWebKit import QWebPage as QWebEnginePage
#
# class ExtractorHtml:
# def __init__(self, url):
# self.html = None
#
# _app = QApplication([])
# self._page = QWebEnginePage()
# self._page.mainFrame().load(QUrl(url))
# # self._page.load(QUrl(url))
# self._page.loadFinished.connect(self._load_finished_handler)
#
# #
# #
# while self.html is None:
# _app.processEvents()
#
# _app.quit()
#
# self._page = None
#
# def _callable(self, data):
# if check_content_func:
# if check_content_func(data):
# self.html = data
#
# else:
# self.html = data
#
# def _load_finished_handler(self):
# # self._page.toHtml(self._callable)
# self.html = self._page.mainFrame().toHtml()
#
# return ExtractorHtml(url).html
#
#
# class UpdateDateTextNotFound(Exception):
# pass
#
#
# import os
#
#
# def download_price():
# url = 'http://www.dns-shop.ru/'
#
# html = get_html(url, lambda html: 'price-list-downloader' in html)
#
# from bs4 import BeautifulSoup
# root = BeautifulSoup(html, 'lxml')
#
# for a in root.select('#price-list-downloader a'):
# href = a['href']
#
# if href.endswith('.xls'):
# from urllib.parse import urljoin
# file_url = urljoin(url, href)
# # print(file_url)
#
# update_date_text = a.next_sibling.strip()
#
# import re
# match = re.search(r'\d{,2}.\d{,2}.\d{4}', update_date_text)
# if match is None:
# raise UpdateDateTextNotFound()
#
# date_string = match.group()
# # print(date_string)
#
# # from datetime import datetime
# # print(datetime.strptime(date_string, '%d.%m.%Y'))
#
# file_name = os.path.basename(href)
# file_name = date_string + '_' + file_name
#
# if os.path.exists(file_name):
# return file_name
#
# from urllib.request import urlretrieve
# urlretrieve(file_url, file_name)
#
# return file_name
#
# return
#
#
# while True:
# file_name = download_price()
# print(file_name)
#
# import time
# # time.sleep(10 * 60 * 60)
# time.sleep(60)
from PyQt5.QtCore import QUrl, QTimer
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWebEngineWidgets import QWebEnginePage
url = 'http://www.dns-shop.ru/'
app = QApplication([])
page = QWebEnginePage()
page.load(QUrl(url))
page.loadFinished.connect(lambda x=None: page.toHtml(_callable))
# 10
timer = QTimer()
timer.setInterval(10 * 60 * 60 * 1000)
timer.timeout.connect(lambda x=None: page.load(QUrl(url)))
timer.start()
app.exec()
| 26.70122 | 78 | 0.58575 |
6696f698bff747564601f269987739a28d5abfe1 | 12,918 | py | Python | tests/test_adapters.py | Shelestova-Anastasia/cutadapt | 6e239b3b8e20d17fdec041dc1d967ec2a3cfe770 | [
"MIT"
] | null | null | null | tests/test_adapters.py | Shelestova-Anastasia/cutadapt | 6e239b3b8e20d17fdec041dc1d967ec2a3cfe770 | [
"MIT"
] | null | null | null | tests/test_adapters.py | Shelestova-Anastasia/cutadapt | 6e239b3b8e20d17fdec041dc1d967ec2a3cfe770 | [
"MIT"
] | null | null | null | import pytest
from dnaio import Sequence
from cutadapt.adapters import (
RemoveAfterMatch,
RemoveBeforeMatch,
FrontAdapter,
BackAdapter,
PrefixAdapter,
SuffixAdapter,
LinkedAdapter,
MultipleAdapters,
IndexedPrefixAdapters,
IndexedSuffixAdapters,
)
def test_linked_matches_property():
"""Accessing matches property of non-anchored linked adapters"""
# Issue #265
front_adapter = FrontAdapter("GGG")
back_adapter = BackAdapter("TTT")
la = LinkedAdapter(
front_adapter,
back_adapter,
front_required=False,
back_required=False,
name="name",
)
assert la.match_to("AAAATTTT").score == 3
| 27.780645 | 88 | 0.618517 |
66992cf30daf9b3de5a678f20db0b9dc5b3fafdf | 7,561 | py | Python | archABM/event_model.py | vishalbelsare/ArchABM | 4a5ed9506ba96c38e1f3d7f53d6e469f28fe6873 | [
"MIT"
] | 8 | 2021-07-19T11:54:00.000Z | 2022-03-29T01:45:07.000Z | archABM/event_model.py | vishalbelsare/ArchABM | 4a5ed9506ba96c38e1f3d7f53d6e469f28fe6873 | [
"MIT"
] | null | null | null | archABM/event_model.py | vishalbelsare/ArchABM | 4a5ed9506ba96c38e1f3d7f53d6e469f28fe6873 | [
"MIT"
] | 1 | 2021-08-19T23:56:56.000Z | 2021-08-19T23:56:56.000Z | import copy
import random
from .parameters import Parameters
def new(self):
"""Generates a :class:`~archABM.event_model.EventModel` copy, with reset count and noise
Returns:
EventModel: cloned instance
"""
self.count = 0
self.noise = None
return copy.copy(self)
def duration(self, now) -> int:
"""Generates a random duration between :attr:`duration_min` and :attr:`duration_max`.
.. note::
If the generated duration, together with the current timestamp,
exceeds the allowed schedule, the duration is limited to finish
at the scheduled time interval.
The :attr:`noise` attribute is used to model the schedule's time tolerance.
Args:
now (int): current timestamp in minutes
Returns:
int: event duration in minutes
"""
duration = random.randint(self.params.duration_min, self.params.duration_max)
estimated = now + duration
noise = self.get_noise() # minutes
for interval in self.params.schedule:
a, b = interval
if a - noise <= now <= b + noise < estimated:
duration = b + noise - now + 1
break
return duration
def priority(self) -> float:
"""Computes the priority of a certain event.
The priority function follows a piecewise linear function, parametrized by:
* ``r``: repeat\ :sub:`min`
* ``R``: repeat\ :sub:`max`
* ``e``: event count
.. math::
Priority(e) =
\\left\{\\begin{matrix}
1-(1-\\alpha)\\cfrac{e}{r}\,,\quad 0 \leq e < r \\\\
\\alpha\\cfrac{R-e}{R-r}\,,\quad r \leq e < R \\
\end{matrix}\\right.
Returns:
float: priority value [0-1]
"""
alpha = 0.5 # TODO: review hardcoded value
if self.params.repeat_max is None:
return random.uniform(0.0, 1.0)
if self.count == self.params.repeat_max:
return 0.0
if self.count < self.params.repeat_min:
return 1 - (1 - alpha) * self.count / self.params.repeat_min
if self.params.repeat_min == self.params.repeat_max:
return alpha
return alpha * (self.params.repeat_max - self.count) / (self.params.repeat_max - self.params.repeat_min)
def probability(self, now: int) -> float:
"""Wrapper to call the priority function
If the event :attr:`count` is equal to the :attr:`repeat_max` parameters,
it yields a ``0`` probability. Otherwise, it computes the :meth:`priority` function
described above.
Args:
now (int): current timestamp in minutes
Returns:
float: event probability [0-1]
"""
p = 0.0
if self.count == self.params.repeat_max:
return p
noise = self.get_noise() # minutes
for interval in self.params.schedule:
a, b = interval
if a - noise <= now <= b + noise:
p = self.priority()
break
return p
def valid(self) -> bool:
"""Computes whether the event count has reached the :attr:`repeat_max` limit.
It yields ``True``
if :attr:`repeat_max` is ``undefined`` or
if the event :attr:`count` is less than :attr:`repeat_max`.
Otherwise, it yields ``False``.
Returns:
bool: valid event
"""
if self.params.repeat_max is None:
return True
return self.count < self.params.repeat_max
def consume(self) -> None:
"""Increments one unit the event count"""
self.count += 1
# logging.info("Event %s repeated %d out of %d" % (self.name, self.count, self.target))
def supply(self) -> None:
"""Decrements one unit the event count"""
self.count -= 1
| 33.455752 | 112 | 0.547943 |
669c0767b2a56157d94adbe410e078a0a3045bd9 | 13,297 | py | Python | tests/test_photokit.py | oPromessa/osxphotos | 0d7e324f0262093727147b9f22ed275e962e8725 | [
"MIT"
] | 656 | 2019-08-14T14:10:44.000Z | 2022-03-28T15:25:42.000Z | tests/test_photokit.py | oPromessa/osxphotos | 0d7e324f0262093727147b9f22ed275e962e8725 | [
"MIT"
] | 557 | 2019-10-14T19:00:02.000Z | 2022-03-28T00:48:30.000Z | tests/test_photokit.py | oPromessa/osxphotos | 0d7e324f0262093727147b9f22ed275e962e8725 | [
"MIT"
] | 58 | 2019-12-27T01:39:33.000Z | 2022-02-26T22:18:49.000Z | """ test photokit.py methods """
import os
import pathlib
import tempfile
import pytest
from osxphotos.photokit import (
LivePhotoAsset,
PhotoAsset,
PhotoLibrary,
VideoAsset,
PHOTOS_VERSION_CURRENT,
PHOTOS_VERSION_ORIGINAL,
PHOTOS_VERSION_UNADJUSTED,
)
skip_test = "OSXPHOTOS_TEST_EXPORT" not in os.environ
pytestmark = pytest.mark.skipif(
skip_test, reason="Skip if not running with author's personal library."
)
UUID_DICT = {
"plain_photo": {
"uuid": "C6C712C5-9316-408D-A3C3-125661422DA9",
"filename": "IMG_8844.JPG",
},
"hdr": {"uuid": "DD641004-4E37-4233-AF31-CAA0896490B2", "filename": "IMG_6162.JPG"},
"selfie": {
"uuid": "C925CFDC-FF2B-4E71-AC9D-C669B6453A8B",
"filename": "IMG_1929.JPG",
},
"video": {
"uuid": "F4430659-7B17-487E-8029-8C1ABEBE23DF",
"filename": "IMG_9411.TRIM.MOV",
},
"hasadjustments": {
"uuid": "2F252D2C-C9DE-4BE1-8610-9F968C634D3D",
"filename": "IMG_2860.JPG",
"adjusted_size": 3012634,
"unadjusted_size": 2580058,
},
"slow_mo": {
"uuid": "160447F8-4EB0-4FAE-A26A-3D32EA698F75",
"filename": "IMG_4055.MOV",
},
"live_photo": {
"uuid": "8EC216A2-0032-4934-BD3F-04C6259B3304",
"filename": "IMG_3259.HEIC",
"filename_video": "IMG_3259.mov",
},
"burst": {
"uuid": "CDE4E5D9-1428-41E6-8569-EC0C45FD8E5A",
"filename": "IMG_8196.JPG",
"burst_selected": 4,
"burst_all": 5,
},
"raw+jpeg": {
"uuid": "E3DD04AF-CB65-4D9B-BB79-FF4C955533DB",
"filename": "IMG_1994.JPG",
"raw_filename": "IMG_1994.CR2",
"unadjusted_size": 16128420,
"uti_raw": "com.canon.cr2-raw-image",
"uti": "public.jpeg",
},
}
def test_fetch_uuid():
"""test fetch_uuid"""
uuid = UUID_DICT["plain_photo"]["uuid"]
filename = UUID_DICT["plain_photo"]["filename"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
assert isinstance(photo, PhotoAsset)
def test_plain_photo():
"""test plain_photo"""
uuid = UUID_DICT["plain_photo"]["uuid"]
filename = UUID_DICT["plain_photo"]["filename"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
assert photo.original_filename == filename
assert photo.raw_filename is None
assert photo.isphoto
assert not photo.ismovie
def test_raw_plus_jpeg():
"""test RAW+JPEG"""
uuid = UUID_DICT["raw+jpeg"]["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
assert photo.original_filename == UUID_DICT["raw+jpeg"]["filename"]
assert photo.raw_filename == UUID_DICT["raw+jpeg"]["raw_filename"]
assert photo.uti_raw() == UUID_DICT["raw+jpeg"]["uti_raw"]
assert photo.uti() == UUID_DICT["raw+jpeg"]["uti"]
def test_hdr():
"""test hdr"""
uuid = UUID_DICT["hdr"]["uuid"]
filename = UUID_DICT["hdr"]["filename"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
assert photo.original_filename == filename
assert photo.hdr
def test_burst():
"""test burst and burstid"""
test_dict = UUID_DICT["burst"]
uuid = test_dict["uuid"]
filename = test_dict["filename"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
assert photo.original_filename == filename
assert photo.burst
assert photo.burstid
# def test_selfie():
# """ test selfie """
# uuid = UUID_DICT["selfie"]["uuid"]
# filename = UUID_DICT["selfie"]["filename"]
# lib = PhotoLibrary()
# photo = lib.fetch_uuid(uuid)
# assert photo.original_filename == filename
# assert photo.selfie
def test_video():
"""test ismovie"""
uuid = UUID_DICT["video"]["uuid"]
filename = UUID_DICT["video"]["filename"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
assert isinstance(photo, VideoAsset)
assert photo.original_filename == filename
assert photo.ismovie
assert not photo.isphoto
def test_slow_mo():
"""test slow_mo"""
test_dict = UUID_DICT["slow_mo"]
uuid = test_dict["uuid"]
filename = test_dict["filename"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
assert isinstance(photo, VideoAsset)
assert photo.original_filename == filename
assert photo.ismovie
assert photo.slow_mo
assert not photo.isphoto
### PhotoAsset
def test_export_photo_original():
"""test PhotoAsset.export"""
test_dict = UUID_DICT["hasadjustments"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_ORIGINAL)
export_path = pathlib.Path(export_path[0])
assert export_path.is_file()
filename = test_dict["filename"]
assert export_path.stem == pathlib.Path(filename).stem
assert export_path.stat().st_size == test_dict["unadjusted_size"]
def test_export_photo_unadjusted():
"""test PhotoAsset.export"""
test_dict = UUID_DICT["hasadjustments"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_UNADJUSTED)
export_path = pathlib.Path(export_path[0])
assert export_path.is_file()
filename = test_dict["filename"]
assert export_path.stem == pathlib.Path(filename).stem
assert export_path.stat().st_size == test_dict["unadjusted_size"]
def test_export_photo_current():
"""test PhotoAsset.export"""
test_dict = UUID_DICT["hasadjustments"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir)
export_path = pathlib.Path(export_path[0])
assert export_path.is_file()
filename = test_dict["filename"]
assert export_path.stem == pathlib.Path(filename).stem
assert export_path.stat().st_size == test_dict["adjusted_size"]
def test_export_photo_raw():
"""test PhotoAsset.export for raw component"""
test_dict = UUID_DICT["raw+jpeg"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, raw=True)
export_path = pathlib.Path(export_path[0])
assert export_path.is_file()
filename = test_dict["raw_filename"]
assert export_path.stem == pathlib.Path(filename).stem
assert export_path.stat().st_size == test_dict["unadjusted_size"]
### VideoAsset
def test_export_video_original():
"""test VideoAsset.export"""
test_dict = UUID_DICT["video"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_ORIGINAL)
export_path = pathlib.Path(export_path[0])
assert export_path.is_file()
filename = test_dict["filename"]
assert export_path.stem == pathlib.Path(filename).stem
def test_export_video_unadjusted():
"""test VideoAsset.export"""
test_dict = UUID_DICT["video"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_UNADJUSTED)
export_path = pathlib.Path(export_path[0])
assert export_path.is_file()
filename = test_dict["filename"]
assert export_path.stem == pathlib.Path(filename).stem
def test_export_video_current():
"""test VideoAsset.export"""
test_dict = UUID_DICT["video"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_CURRENT)
export_path = pathlib.Path(export_path[0])
assert export_path.is_file()
filename = test_dict["filename"]
assert export_path.stem == pathlib.Path(filename).stem
### Slow-Mo VideoAsset
def test_export_slow_mo_original():
"""test VideoAsset.export for slow mo video"""
test_dict = UUID_DICT["slow_mo"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_ORIGINAL)
export_path = pathlib.Path(export_path[0])
assert export_path.is_file()
filename = test_dict["filename"]
assert export_path.stem == pathlib.Path(filename).stem
def test_export_slow_mo_unadjusted():
"""test VideoAsset.export for slow mo video"""
test_dict = UUID_DICT["slow_mo"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_UNADJUSTED)
export_path = pathlib.Path(export_path[0])
assert export_path.is_file()
filename = test_dict["filename"]
assert export_path.stem == pathlib.Path(filename).stem
def test_export_slow_mo_current():
"""test VideoAsset.export for slow mo video"""
test_dict = UUID_DICT["slow_mo"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_CURRENT)
export_path = pathlib.Path(export_path[0])
assert export_path.is_file()
filename = test_dict["filename"]
assert export_path.stem == pathlib.Path(filename).stem
### LivePhotoAsset
def test_export_live_original():
"""test LivePhotoAsset.export"""
test_dict = UUID_DICT["live_photo"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_ORIGINAL)
for f in export_path:
filepath = pathlib.Path(f)
assert filepath.is_file()
filename = test_dict["filename"]
assert filepath.stem == pathlib.Path(filename).stem
def test_export_live_unadjusted():
"""test LivePhotoAsset.export"""
test_dict = UUID_DICT["live_photo"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_UNADJUSTED)
for file in export_path:
filepath = pathlib.Path(file)
assert filepath.is_file()
filename = test_dict["filename"]
assert filepath.stem == pathlib.Path(filename).stem
def test_export_live_current():
"""test LivePhotAsset.export"""
test_dict = UUID_DICT["live_photo"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_CURRENT)
for file in export_path:
filepath = pathlib.Path(file)
assert filepath.is_file()
filename = test_dict["filename"]
assert filepath.stem == pathlib.Path(filename).stem
def test_export_live_current_just_photo():
"""test LivePhotAsset.export"""
test_dict = UUID_DICT["live_photo"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, photo=True, video=False)
assert len(export_path) == 1
assert export_path[0].lower().endswith(".heic")
def test_export_live_current_just_video():
"""test LivePhotAsset.export"""
test_dict = UUID_DICT["live_photo"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, photo=False, video=True)
assert len(export_path) == 1
assert export_path[0].lower().endswith(".mov")
def test_fetch_burst_uuid():
"""test fetch_burst_uuid"""
test_dict = UUID_DICT["burst"]
uuid = test_dict["uuid"]
filename = test_dict["filename"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
bursts_selected = lib.fetch_burst_uuid(photo.burstid)
assert len(bursts_selected) == test_dict["burst_selected"]
assert isinstance(bursts_selected[0], PhotoAsset)
bursts_all = lib.fetch_burst_uuid(photo.burstid, all=True)
assert len(bursts_all) == test_dict["burst_all"]
assert isinstance(bursts_all[0], PhotoAsset)
| 31.360849 | 88 | 0.670828 |
669c4ded1d39066ae7e38bea807e79c4ad3272ab | 2,764 | py | Python | parse_json_script/lib_parse_json.py | amane-uehara/fitbit-fetcher | 2a949016933dbcac5f949c8b552c7998b2aadd8c | [
"MIT"
] | null | null | null | parse_json_script/lib_parse_json.py | amane-uehara/fitbit-fetcher | 2a949016933dbcac5f949c8b552c7998b2aadd8c | [
"MIT"
] | null | null | null | parse_json_script/lib_parse_json.py | amane-uehara/fitbit-fetcher | 2a949016933dbcac5f949c8b552c7998b2aadd8c | [
"MIT"
] | null | null | null | import os
import sys
import json
| 23.827586 | 102 | 0.599132 |
669d3d5f4966f2fc9848beb0d7bd023a928904e0 | 4,251 | py | Python | utils/tfds_preprocess.py | chansoopark98/tf_keras-Unknown-grasping | be0f68280ba0b293940a08732fd4a31e89a272cd | [
"MIT"
] | null | null | null | utils/tfds_preprocess.py | chansoopark98/tf_keras-Unknown-grasping | be0f68280ba0b293940a08732fd4a31e89a272cd | [
"MIT"
] | null | null | null | utils/tfds_preprocess.py | chansoopark98/tf_keras-Unknown-grasping | be0f68280ba0b293940a08732fd4a31e89a272cd | [
"MIT"
] | null | null | null | import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
import random
from utils.dataset_processing import grasp, image
import matplotlib.pyplot as plt
dataset_path = './tfds/'
train_data, meta = tfds.load('Jacquard', split='train', with_info=True, shuffle_files=False)
BATCH_SIZE = 1
number_train = meta.splits['train'].num_examples
output_size = 300
train_data = train_data.map(preprocess)
# train_data = train_data.map(augment)
train_data = train_data.map(lambda tfds_rgb, tfds_depth, tfds_box: tf.py_function(augment, [tfds_rgb, tfds_depth, tfds_box], [tf.float64]))
rows=1
cols=4
train_data = train_data.take(100)
for input, output in train_data:
# pos_img = label[0]
# cos = label[1]
# sin = label[2]
# width_img = label[3]
fig = plt.figure()
ax0 = fig.add_subplot(rows, cols, 1)
ax0.imshow(output[0][:, :, 0])
ax0.set_title('pos_img')
ax0.axis("off")
ax1 = fig.add_subplot(rows, cols, 2)
ax1.imshow(output[0][:, :, 1])
ax1.set_title('cos')
ax1.axis("off")
ax1 = fig.add_subplot(rows, cols, 3)
ax1.imshow(output[0][:, :, 2])
ax1.set_title('sin')
ax1.axis("off")
ax1 = fig.add_subplot(rows, cols, 4)
ax1.imshow(output[0][:, :, 3])
ax1.set_title('width')
ax1.axis("off")
ax2 = fig.add_subplot(rows, cols, 5)
ax2.imshow(input[0][:, :, :3])
ax2.set_title('sin')
ax2.axis("off")
ax3 = fig.add_subplot(rows, cols, 6)
ax3.imshow(input[0][:, :, 3:])
ax3.set_title('width_img')
ax3.axis("off")
# q_img, ang_img, width_img = post_processing(q_img=pos_img,
# cos_img=cos,
# sin_img=sin,
# width_img=width_img)
# ax3 = fig.add_subplot(rows, cols, 9)
# ax3.imshow(q_img)
# ax3.set_title('q_img')
# ax3.axis("off")
# ax3 = fig.add_subplot(rows, cols, 10)
# ax3.imshow(ang_img)
# ax3.set_title('ang_img')
# ax3.axis("off")
# ax3 = fig.add_subplot(rows, cols, 11)
# ax3.imshow(width_img)
# ax3.set_title('width_img')
# ax3.axis("off")
# ax3 = fig.add_subplot(rows, cols, 12)
# ax3.imshow(inpaint_depth)
# ax3.set_title('from_pcd_inpaint')
# ax3.axis("off")
# s = evaluation.calculate_iou_match(grasp_q = q_img,
# grasp_angle = ang_img,
# ground_truth_bbs = gtbbs,
# no_grasps = 3,
# grasp_width = width_img,
# threshold=0.25)
# print('iou results', s)
plt.show()
| 26.735849 | 139 | 0.604799 |
669f60ed987d448932641383a9784e17ffb52883 | 836 | py | Python | tests/scheduler_test.py | peng4217/scylla | aa5133d7c6d565c95651fc75b26ad605da0982cd | [
"Apache-2.0"
] | 3,556 | 2018-04-28T22:59:40.000Z | 2022-03-28T22:20:07.000Z | tests/scheduler_test.py | peng4217/scylla | aa5133d7c6d565c95651fc75b26ad605da0982cd | [
"Apache-2.0"
] | 120 | 2018-05-20T11:49:00.000Z | 2022-03-07T00:08:55.000Z | tests/scheduler_test.py | peng4217/scylla | aa5133d7c6d565c95651fc75b26ad605da0982cd | [
"Apache-2.0"
] | 518 | 2018-05-27T01:42:25.000Z | 2022-03-25T12:38:32.000Z | import pytest
from scylla.scheduler import Scheduler, cron_schedule
| 23.885714 | 78 | 0.744019 |
669ffe2b5e6215275de00b66a4a28e352cc9a091 | 2,063 | py | Python | ch16_ex.py | DexHunter/Think-Python-book-exercise-solutions | d0abae261eda1dca99043e17e8a1e614caad2140 | [
"CC-BY-4.0"
] | 24 | 2019-05-07T15:11:28.000Z | 2022-03-02T04:50:28.000Z | ch16_ex.py | Dekzu/Think-Python-book-exercise-solutions | d0abae261eda1dca99043e17e8a1e614caad2140 | [
"CC-BY-4.0"
] | null | null | null | ch16_ex.py | Dekzu/Think-Python-book-exercise-solutions | d0abae261eda1dca99043e17e8a1e614caad2140 | [
"CC-BY-4.0"
] | 19 | 2019-08-05T20:59:04.000Z | 2022-03-07T05:13:32.000Z |
def mul_time(t, n):
'''Multiple time t by n
n: int
Returns a time tr
'''
return int_to_time(time_to_int(t) * n)
def increment(t, sec):
'''Writes a inc function does not contain any loops
#for the second exercise of writing a pure function, I think you can just create a new object by copy.deepcopy(t) and modify the new object. I think it is quite simple so I will skip this one, if you differ please contact me and I will try to help
idea: using divmod
sec: seconds in IS
'''
t.second += sec
inc_min, t.second = div(t.seconds, 60)
t.minute += inc_min
inc_hour, t.minute = div(t.minute, 60)
t.hour += inc_hour
return t
def int_to_time(seconds):
"""Makes a new Time object.
seconds: int seconds since midnight.
"""
time = Time()
minutes, time.second = divmod(seconds, 60)
time.hour, time.minute = divmod(minutes, 60)
return time
def time_to_int(time):
"""Computes the number of seconds since midnight.
time: Time object.
"""
minutes = time.hour * 60 + time.minute
seconds = minutes * 60 + time.second
return seconds
if __name__ == '__main__':
t = Time()
t.hour = 17
t.minute = 43
t.second = 6
print_time(mul_time(t, 3))
t2 = Time()
t2.hour = 17
t2.minute = 44
t2.second = 5
print_time(t)
start = Time()
start.hour = 9
start.minute =45
start.second = 0
duration = Time()
duration.hour = 1
duration.minute = 35
duration.second = 0
done = add_time(start, duration)
print_time(done)
print( is_after(t, t2) )
| 20.838384 | 248 | 0.652448 |
66a0075c55665ddddee62ce3c5592465d9e8004b | 200 | py | Python | knowit/providers/__init__.py | labrys/knowit | eea9ac18e38c930230cf81b5dca4a9af9fb10d4e | [
"MIT"
] | null | null | null | knowit/providers/__init__.py | labrys/knowit | eea9ac18e38c930230cf81b5dca4a9af9fb10d4e | [
"MIT"
] | null | null | null | knowit/providers/__init__.py | labrys/knowit | eea9ac18e38c930230cf81b5dca4a9af9fb10d4e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Provider package."""
from __future__ import unicode_literals
from .enzyme import EnzymeProvider
from .ffmpeg import FFmpegProvider
from .mediainfo import MediaInfoProvider
| 25 | 40 | 0.785 |
66a03a53035c1596664c882408ebdf47aa3afc54 | 304 | py | Python | python-mundo3/ex077.py | abm-astro/estudos-python | c0dcd71489e528d445efa25d4986bf2fd08f8fe6 | [
"MIT"
] | 1 | 2021-08-15T18:18:43.000Z | 2021-08-15T18:18:43.000Z | python-mundo3/ex077.py | abm-astro/estudos-python | c0dcd71489e528d445efa25d4986bf2fd08f8fe6 | [
"MIT"
] | null | null | null | python-mundo3/ex077.py | abm-astro/estudos-python | c0dcd71489e528d445efa25d4986bf2fd08f8fe6 | [
"MIT"
] | null | null | null | list = ('APRENDER', 'PROGRAMAR', 'LINGUAGEM', 'PYTHON', 'CURSO', 'GRATIS', 'ESTUDAR',
'PRATICAR', 'TRABALHAR', 'MERCADO', 'PROGRAMADOR', 'FUTURO')
for p in list:
print(f'\nNa palavra {p} temos: ', end='')
for l in p:
if l.lower() in 'aeiou':
print(l.lower(), end=' ')
| 38 | 85 | 0.546053 |
66a1405cb275e20463fb6f972194333959f1c8d7 | 1,449 | py | Python | src/DataParser/odmdata/variable.py | UCHIC/iUTAHData | 4ffab29ad6b3313416bb2a8b98acf0b2e02c8cab | [
"Unlicense"
] | 2 | 2015-02-25T01:12:51.000Z | 2017-02-08T22:54:41.000Z | src/DataParser/odmdata/variable.py | UCHIC/iUTAHData | 4ffab29ad6b3313416bb2a8b98acf0b2e02c8cab | [
"Unlicense"
] | 48 | 2015-01-12T18:01:56.000Z | 2021-06-10T20:05:26.000Z | src/DataParser/odmdata/variable.py | UCHIC/iUTAHData | 4ffab29ad6b3313416bb2a8b98acf0b2e02c8cab | [
"Unlicense"
] | null | null | null | from sqlalchemy import *
from sqlalchemy.orm import relationship
from base import Base
from unit import Unit
| 45.28125 | 102 | 0.718427 |
66a4535ff16536c58c62bd0252d04c6087d6613d | 7,751 | py | Python | pandas/pandastypes.py | pyxll/pyxll-examples | e8a1cba1ffdb346191f0c80bea6877cbe0291957 | [
"Unlicense"
] | 93 | 2015-04-27T14:44:02.000Z | 2022-03-03T13:14:49.000Z | pandas/pandastypes.py | samuelpedrini/pyxll-examples | ce7f839b4ff4f4032b78dffff2357f3feaadc3a1 | [
"Unlicense"
] | 4 | 2019-12-13T11:32:17.000Z | 2022-03-03T14:07:02.000Z | pandas/pandastypes.py | samuelpedrini/pyxll-examples | ce7f839b4ff4f4032b78dffff2357f3feaadc3a1 | [
"Unlicense"
] | 53 | 2015-04-27T14:44:14.000Z | 2022-01-23T05:26:52.000Z | """
Custom excel types for pandas objects (eg dataframes).
For information about custom types in PyXLL see:
https://www.pyxll.com/docs/udfs.html#custom-types
For information about pandas see:
http://pandas.pydata.org/
Including this module in your pyxll config adds the following custom types that can
be used as return and argument types to your pyxll functions:
- dataframe
- series
- series_t
Dataframes with multi-index indexes or columns will be returned with the columns and
index values in the resulting array. For normal indexes, the index will only be
returned as part of the resulting array if the index is named.
eg::
from pyxll import xl_func
import pandas as pa
@xl_func("int rows, int cols, float value: dataframe")
def make_empty_dataframe(rows, cols, value):
# create an empty dataframe
df = pa.DataFrame({chr(c + ord('A')) : value for c in range(cols)}, index=range(rows))
# return it. The custom type will convert this to a 2d array that
# excel will understand when this function is called as an array
# function.
return df
@xl_func("dataframe df, string col: float")
def sum_column(df, col):
return df[col].sum()
In excel (use Ctrl+Shift+Enter to enter an array formula)::
=make_empty_dataframe(3, 3, 100)
>> A B C
>> 100 100 100
>> 100 100 100
>> 100 100 100
=sum_column(A1:C4, "A")
>> 300
"""
from pyxll import xl_return_type, xl_arg_type
import datetime as dt
import pandas as pa
import numpy as np
import pytz
try:
import pywintypes
except ImportError:
pywintypes = None
def _normalize_dates(data):
"""
Ensure all date types returns are standard datetimes with a timezone.
pythoncom will fail to convert datetimes to Windows dates without tzinfo.
This is useful if using these functions to convert a dataframe to native
python types for setting to a Range using COM. If only passing objects
to/from python using PyXLL functions then this isn't necessary (but
isn't harmful either).
"""
return [[normalize_date(c) for c in r] for r in data]
def _fix_pywintypes(data):
"""
Converts any pywintypes.TimeType instances passed in to the
conversion functions into datetime types.
This is useful if using these functions to convert a n Excel Range of
of values a pandas type, as pandas will crash if called with the
pywintypes.TimeType.
"""
if pywintypes is None:
return data
return [[fix_pywintypes(c) for c in r] for r in data]
| 31.897119 | 95 | 0.632047 |
66a463bd296e2375b0d9a6abd3ff5e747d929dcd | 10,912 | py | Python | liveDataApp/views.py | subahanii/COVID19-tracker | b7d30ff996974755e78393f0777d6cf623c4d654 | [
"MIT"
] | 7 | 2020-04-28T12:34:42.000Z | 2021-05-17T06:20:51.000Z | liveDataApp/views.py | subahanii/COVID19-tracker | b7d30ff996974755e78393f0777d6cf623c4d654 | [
"MIT"
] | 1 | 2020-07-09T18:17:32.000Z | 2020-07-10T13:56:01.000Z | liveDataApp/views.py | subahanii/COVID19-tracker | b7d30ff996974755e78393f0777d6cf623c4d654 | [
"MIT"
] | null | null | null | from django.shortcuts import render
import requests
from bs4 import BeautifulSoup
import re
from collections import defaultdict as dfd
from .models import *
from datetime import date
from datetime import timedelta
from django.db.models import Sum
from django.db.models import Count
from django.db.models.functions import ExtractDay,ExtractMonth,ExtractYear
today = date.today()
yesterday = today - timedelta(days = 1)
colorList = {
1:"#FF0000",
2:"#FF4040",
3:"#FF4040",
4:"#FF4040",
5:"#FF7474",
6:"#FF7474",
7:"#FF7474",
8:"#FF7474",
9:"#FF7474",
10:"#FF7474",
11:"#FF7474",
12:"#FF7474",
13:"#FF8787",
14:"#FF8787",
15:"#FF8787",
16:"#FF8787",
17:"#FF8787",
18:"#FF8787",
19:"#FF8787",
20:"#FFB3B3",
21:"#FFB3B3",
22:"#FFB3B3",
23:"#FFB3B3",
24:"#FFB3B3",
25:"#FFB3B3",
26:"#FFECEC",
27:"#FFECEC",
28:"#FFECEC",
29:"#FFECEC",
30:"#FFE0E0",
31:"#FFE0E0",
32:"#FFE0E0",
33:"#FFE0E0",
34:"#FFE0E0",
35:"#FFE0E0",
}
stateCode = {
'Andaman and Nicobar Islands': "AN" ,
'Andhra Pradesh': "AP",
'Arunachal Pradesh': "AR",
'Assam': "AS" ,
'Bihar':"BR" ,
'Chandigarh':"CT" ,
'Chhattisgarh': "CH",
'Delhi':"DL" ,
'Dadara & Nagar Havelli': "DN",
'Goa':"GA" ,
'Gujarat': "GJ",
'Haryana': "HR",
'Himachal Pradesh': "HP",
'Jammu and Kashmir': "JK" ,
'Jharkhand': "JH",
'Karnataka': "KA",
'Kerala': "KL",
'Ladakh': "LK",
'Lakshadweep': "LD",
'Madhya Pradesh': "MP",
'Maharashtra':"MH" ,
'Manipur':"MN" ,
'Meghalaya': "ML",
'Mizoram': "MZ",
'Nagaland': "NL",
'Odisha': "OD",
'Puducherry': "PY",
'Punjab': "PB",
'Rajasthan': "RJ",
'Sikkim': "SK",
'Tamil Nadu':"TN" ,
'Telengana': "TS",
'Tripura':"TR" ,
'Uttarakhand': "UK",
'Uttar Pradesh':"UP" ,
'West Bengal':"WB"
}
# Create your views here.
| 26.421308 | 129 | 0.640121 |
66a65924a1e2768d7469c1f8356205da9b3cbe9a | 89 | py | Python | project/healthcheck.py | permallotment/allotment3 | 0eb390086cc8f48ba6817541c6c70c06dfc83058 | [
"CC0-1.0"
] | null | null | null | project/healthcheck.py | permallotment/allotment3 | 0eb390086cc8f48ba6817541c6c70c06dfc83058 | [
"CC0-1.0"
] | null | null | null | project/healthcheck.py | permallotment/allotment3 | 0eb390086cc8f48ba6817541c6c70c06dfc83058 | [
"CC0-1.0"
] | null | null | null | from django.http import HttpResponse
| 17.8 | 36 | 0.764045 |
66a6d482011b0d35775a7523319647c543ff9fb5 | 11,829 | py | Python | src/algo/baselines/randomP/randomP.py | Lukeeeeee/CE7490-Group-Project-Python | 840a655bcb8cebbe3d39e5d3f3d68a01936a6283 | [
"MIT"
] | null | null | null | src/algo/baselines/randomP/randomP.py | Lukeeeeee/CE7490-Group-Project-Python | 840a655bcb8cebbe3d39e5d3f3d68a01936a6283 | [
"MIT"
] | null | null | null | src/algo/baselines/randomP/randomP.py | Lukeeeeee/CE7490-Group-Project-Python | 840a655bcb8cebbe3d39e5d3f3d68a01936a6283 | [
"MIT"
] | 1 | 2020-10-20T07:06:18.000Z | 2020-10-20T07:06:18.000Z | from src.core import Basic
import networkx as nx
| 37.792332 | 95 | 0.481951 |
66a846d0d120e378d227803f5adec0334b4d67ff | 1,336 | py | Python | stations/heathen/migrations/0003_auto_20161128_0519.py | boyombo/django-stations | 93a70be7eb8268f9d48f6e3cf9a532bcb27ff895 | [
"MIT"
] | null | null | null | stations/heathen/migrations/0003_auto_20161128_0519.py | boyombo/django-stations | 93a70be7eb8268f9d48f6e3cf9a532bcb27ff895 | [
"MIT"
] | null | null | null | stations/heathen/migrations/0003_auto_20161128_0519.py | boyombo/django-stations | 93a70be7eb8268f9d48f6e3cf9a532bcb27ff895 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-11-28 05:19
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| 30.363636 | 115 | 0.577844 |
66aa16869b2a00e5d9cde4a253891d698c5527b2 | 2,437 | py | Python | src/observers/simple_observer.py | ChenyangTang/bark-ml | 1d2ab1957bf49929e27d718dd4bd3912162197b8 | [
"MIT"
] | null | null | null | src/observers/simple_observer.py | ChenyangTang/bark-ml | 1d2ab1957bf49929e27d718dd4bd3912162197b8 | [
"MIT"
] | null | null | null | src/observers/simple_observer.py | ChenyangTang/bark-ml | 1d2ab1957bf49929e27d718dd4bd3912162197b8 | [
"MIT"
] | null | null | null |
from gym import spaces
import numpy as np
from bark.models.dynamic import StateDefinition
from modules.runtime.commons.parameters import ParameterServer
import math
import operator
from src.commons.spaces import BoundedContinuous, Discrete
from src.observers.observer import StateObserver
| 30.848101 | 77 | 0.672959 |
66aa1e9b55b1f6a0fc3a8c730d67ac565985ed59 | 9,610 | py | Python | cosilico/base/scatter.py | cosilico/cosilico | 983373139aeaf459271c559a47a6439939ec93a5 | [
"MIT"
] | null | null | null | cosilico/base/scatter.py | cosilico/cosilico | 983373139aeaf459271c559a47a6439939ec93a5 | [
"MIT"
] | null | null | null | cosilico/base/scatter.py | cosilico/cosilico | 983373139aeaf459271c559a47a6439939ec93a5 | [
"MIT"
] | null | null | null | import altair as alt
import pandas as pd
def scatterplot(x, y, data, hue=None, color=None, opacity=1.,
x_autoscale=True, y_autoscale=True):
"""Display a basic scatterplot.
Parameters
----------
x : str
Column in data to be used for x-axis
y : str
Column in data to be used for y-axis
data : pandas.DataFrame
Dataframe holding x and y
hue : str, None
Column in data used to color the points
color : str, None
What color to display the points as
If hue is not None, then color will be overriden by hue
opacity : float
Opacity of the points in the plot
x_autoscale : bool
Scale the x-axis to fit the data,
otherwise axis starts at zero
y_autoscale : bool
Scale the y-axis to fit the data,
otherwise axis starts at zero
Example
-------
>>> import cosilico.base as base
>>> import seaborn as sns
>>>
>>> iris = sns.load_dataset('iris')
>>>
>>> base.scatterplot('sepal_length', 'sepal_width', iris, hue='species')
Returns
-------
altair.Chart
.. output::
https://static.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp
height: 600px
"""
mark_kwargs = {
'opacity': opacity
}
if color is not None and hue is None:
mark_kwargs['color'] = color
encode_kwargs = {}
if hue is not None: encode_kwargs['color'] = f'{hue}:N'
chart = alt.Chart(data).mark_point(**mark_kwargs).encode(
x=alt.X(f'{x}:Q',
scale=alt.Scale(zero=not x_autoscale)
),
y=alt.Y(f'{y}:Q',
scale=alt.Scale(zero=not y_autoscale)
),
**encode_kwargs
)
return chart
def jointplot(x, y, data, hue=None, color=None, show_x=True,
show_y=True, opacity=.6, padding_scalar=.05, maxbins=30,
hist_height=50):
"""Display a scatterplot with axes histograms.
Parameters
----------
x : str
Column in data to be used for x-axis
y : str
Column in data to be used for y-axis
data : pandas.DataFrame
Dataframe holding x and y
hue : str, None
Column in data used to color the points
color : str, None
What color to display the points as
If hue is not None, then color will be overriden by hue
show_X : bool
Show the distribution for the x-axis values
show_y : bool
Show the distribution for the y-axis values
opacity : float
Opacity of the histograms in the plot
maxbins : int
Max bins for the histograms
hist_height : int
Height of histograms
Example
-------
>>> import cosilico.base as base
>>>
>>> import seaborn as sns
>>> iris = sns.load_dataset('iris')
>>>
>>> base.jointplot('sepal_length', 'sepal_width', iris, hue='species')
Returns
-------
altair.Chart
.. output::
https://static.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp
height: 600px
"""
chart = alt.Chart(data)
x_diff = max(data[x]) - min(data[x])
y_diff = max(data[y]) - min(data[y])
xscale = alt.Scale(domain=(min(data[x]) - (x_diff * padding_scalar),
max(data[x]) + (x_diff * padding_scalar)))
yscale = alt.Scale(domain=(min(data[y]) - (y_diff * padding_scalar),
max(data[y]) + (y_diff * padding_scalar)))
area_kwargs = {'opacity': opacity, 'interpolate': 'step'}
mark_kwargs = {}
if hue is not None:
mark_kwargs['color'] = f'{hue}:N'
points = chart.mark_circle().encode(
alt.X(x, scale=xscale),
alt.Y(y, scale=yscale),
**mark_kwargs
)
encode_kwargs = {}
if hue is not None:
encode_kwargs['color'] = f'{hue}:N'
top_hist = chart.mark_area(**area_kwargs).encode(
alt.X(f'{x}:Q',
# when using bins, the axis scale is set through
# the bin extent, so we do not specify the scale here
# (which would be ignored anyway)
bin=alt.Bin(maxbins=maxbins, extent=xscale.domain),
stack=None,
title='',
axis=alt.Axis(labels=False, tickOpacity=0.)
),
alt.Y('count()', stack=None, title=''),
**encode_kwargs
).properties(height=hist_height)
right_hist = chart.mark_area(**area_kwargs).encode(
alt.Y(f'{y}:Q',
bin=alt.Bin(maxbins=maxbins, extent=yscale.domain),
stack=None,
title='',
axis=alt.Axis(labels=False, tickOpacity=0.)
),
alt.X('count()', stack=None, title=''),
**encode_kwargs
).properties(width=hist_height)
if show_x and show_y:
return top_hist & (points | right_hist)
if show_x and not show_y:
return top_hist & points
if not show_x and show_y:
return points | right_hist
return points
def clean_jointplot(x, y, data, hue=None, show_x=True,
show_y=True, opacity=.6, padding_scalar=.2, bandwidth_scalar=10,
line_height=50, top_spacing=-40, right_spacing=0,
apply_configure_view=True):
"""Display a clean scatterplot with axes distribution lines.
Parameters
----------
x : str
Column in data to be used for x-axis
y : str
Column in data to be used for y-axis
data : pandas.DataFrame
Dataframe holding x and y
hue : str, None
Column in data used to coloring the points
show_X : bool
Show the line distribution for the x-axis values
show_y : bool
Show the line distribution for the y-axis values
opacity : float
Opacity of the histograms in the plot
bandwidth_scalar : float, int
Sets bandwidth for the density estimation.
Bandwidth = value_range / bandwidth_scalar
line_height : int
Height of the distribution lines
top_spacing : int
Amount of spacing between top distribution line and scatter
right_spacing : int
Amount of spacing between right distribution line and scatter
apply_configure_view : bool
Whether to apply strokeWidth=0 to the configure view function.
Note that if this is applied you cant later combine this chart
with another chart. To combine this chart with another chart
you will need to set apply_configure_view to False and then reapply
.configure_view in the combined chart to make the weird axis
borders go away
Example
-------
>>> import cosilico.base as base
>>>
>>> import seaborn as sns
>>> iris = sns.load_dataset('iris')
>>>
>>> base.clean_jointplot('sepal_length', 'sepal_width', iris, hue='species')
Returns
-------
altair.Chart
.. output::
https://static.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp
height: 600px
"""
chart = alt.Chart(data)
x_diff = max(data[x]) - min(data[x])
y_diff = max(data[y]) - min(data[y])
xscale = alt.Scale(domain=(min(data[x]) - (x_diff * padding_scalar),
max(data[x]) + (x_diff * padding_scalar)))
yscale = alt.Scale(domain=(min(data[y]) - (y_diff * padding_scalar),
max(data[y]) + (y_diff * padding_scalar)))
area_kwargs = {'opacity': opacity, 'interpolate': 'step'}
mark_kwargs = {}
if hue is not None:
mark_kwargs['color'] = f'{hue}:N'
points = chart.mark_circle().encode(
alt.X(x, scale=xscale),
alt.Y(y, scale=yscale),
**mark_kwargs
)
encode_kwargs = {}
if hue is not None:
encode_kwargs['color'] = f'{hue}:N'
transform_kwargs = {}
if hue is not None:
transform_kwargs['groupby'] = [hue]
line_axis_kwargs = {'labels': False, 'tickOpacity': 0., 'domain': False,
'grid': False}
top_line = chart.transform_density(
density=x,
bandwidth=x_diff / bandwidth_scalar,
counts=True,
extent=xscale.domain,
steps=200,
**transform_kwargs
).mark_line(
opacity=opacity
).encode(
x=alt.X(f'value:Q',
scale=xscale,
title='',
axis=alt.Axis(**line_axis_kwargs)
),
y=alt.Y('density:Q',
title='',
axis=alt.Axis(**line_axis_kwargs)
),
**encode_kwargs
).properties(height=line_height)
right_line = chart.transform_density(
density=y,
bandwidth=y_diff / bandwidth_scalar,
counts=True,
extent=yscale.domain,
steps=200,
**transform_kwargs
).mark_line(
opacity=opacity
).encode(
y=alt.X(f'value:Q',
scale=yscale,
title='',
axis=alt.Axis(**line_axis_kwargs)
),
x=alt.Y('density:Q',
title='',
axis=alt.Axis(**line_axis_kwargs)
),
order='value:Q',
**encode_kwargs
).properties(width=line_height)
if show_x and show_y:
combined = alt.vconcat(top_line,
alt.hconcat(points, right_line, spacing=right_spacing),
spacing=top_spacing)
if show_x and not show_y:
combined = alt.vconcat(top_line, points, spacing=top_spacing)
if not show_x and show_y:
combined = alt.hconcat(points, right_line, spacing=right_spacing)
if not show_x and not show_y:
combined = points
if apply_configure_view:
combined = combined.configure_view(strokeWidth=0)
return combined
| 29.478528 | 87 | 0.591467 |
66abb66cbd60706f6fbdf7789edf198d10295b85 | 12,103 | py | Python | flappy_env.py | timlaroche/FlapPyBird | cffc7bb76daad67957a8b5778c1f2c7d82da1514 | [
"MIT"
] | null | null | null | flappy_env.py | timlaroche/FlapPyBird | cffc7bb76daad67957a8b5778c1f2c7d82da1514 | [
"MIT"
] | null | null | null | flappy_env.py | timlaroche/FlapPyBird | cffc7bb76daad67957a8b5778c1f2c7d82da1514 | [
"MIT"
] | null | null | null | import gym
from gym import spaces
from itertools import cycle
import random
import sys
import os
import pygame
from pygame.locals import *
import flappy
import numpy as np
import cv2
# GLOBALS
FPS = 30
SCREENWIDTH = 288
SCREENHEIGHT = 512
PIPEGAPSIZE = 100 # gap between upper and lower part of pipe
BASEY = SCREENHEIGHT * 0.79
PLAYERS_FILES = ('assets/sprites/redbird-upflap.png', 'assets/sprites/redbird-midflap.png', 'assets/sprites/redbird-downflap.png')
BACKGROUND_FILE= 'assets/sprites/background-day.png'
PIPES_LIST = 'assets/sprites/pipe-green.png'
IMAGES, SOUNDS, HITMASKS = {}, {}, {}
try:
xrange
except NameError:
xrange = range | 32.799458 | 130 | 0.673635 |
66aded0365be403ed572fa925d74446e3fe43e79 | 4,587 | py | Python | vkmini/group/group_longpoll.py | Elchinchel/vkmini | 378ee3893c5826563a19198fd532df47aaa03350 | [
"MIT"
] | 2 | 2021-08-12T20:22:40.000Z | 2022-02-06T18:13:38.000Z | vkmini/group/group_longpoll.py | Elchinchel/vkmini | 378ee3893c5826563a19198fd532df47aaa03350 | [
"MIT"
] | null | null | null | vkmini/group/group_longpoll.py | Elchinchel/vkmini | 378ee3893c5826563a19198fd532df47aaa03350 | [
"MIT"
] | 3 | 2020-07-31T17:19:20.000Z | 2021-12-11T11:38:23.000Z | from typing import AsyncGenerator, List, Union, Any
from aiohttp.client import ClientSession
from vkmini.utils import AbstractLogger
from vkmini.request import longpoll_get, default_session
from vkmini.exceptions import TokenInvalid
from vkmini import VkApi
| 28.849057 | 79 | 0.58535 |
66af18eea69ccb8397ca09f7ca83656cd98f0584 | 1,162 | py | Python | aswan/tests/unit/test_migrations.py | papsebestyen/aswan | ed1b2a3dae6a8b7de355edd75de8d4ad577c97cd | [
"MIT"
] | 1 | 2021-04-28T23:08:07.000Z | 2021-04-28T23:08:07.000Z | aswan/tests/unit/test_migrations.py | papsebestyen/aswan | ed1b2a3dae6a8b7de355edd75de8d4ad577c97cd | [
"MIT"
] | 1 | 2022-01-22T22:02:55.000Z | 2022-01-22T22:02:55.000Z | aswan/tests/unit/test_migrations.py | papsebestyen/aswan | ed1b2a3dae6a8b7de355edd75de8d4ad577c97cd | [
"MIT"
] | 2 | 2022-01-05T10:01:22.000Z | 2022-02-16T10:58:46.000Z | import tarfile
import pandas as pd
import sqlalchemy as db
from aswan import AswanConfig, ProdConfig, Project
from aswan.migrate import pull, push
from aswan.models import Base
from aswan.object_store import get_object_store
| 24.723404 | 62 | 0.683305 |
66b02efea9465e74c9e2945b8ff0942e0ed6931f | 82 | py | Python | backend/src/apps/test/apps.py | LucienLuc/project-sts | 02ad13b515bcefe1c1ef30f0c06104359bff613e | [
"MIT"
] | null | null | null | backend/src/apps/test/apps.py | LucienLuc/project-sts | 02ad13b515bcefe1c1ef30f0c06104359bff613e | [
"MIT"
] | null | null | null | backend/src/apps/test/apps.py | LucienLuc/project-sts | 02ad13b515bcefe1c1ef30f0c06104359bff613e | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 16.4 | 33 | 0.743902 |
66b23735ac5dd60f24c047d430921a774e2c8f6b | 1,055 | py | Python | booking.py | kurkurzz/AdminDashboard-BookingWithTimeslot | aa34fef7bc0e1f8cabb602adc6d69af925436e5d | [
"MIT"
] | null | null | null | booking.py | kurkurzz/AdminDashboard-BookingWithTimeslot | aa34fef7bc0e1f8cabb602adc6d69af925436e5d | [
"MIT"
] | null | null | null | booking.py | kurkurzz/AdminDashboard-BookingWithTimeslot | aa34fef7bc0e1f8cabb602adc6d69af925436e5d | [
"MIT"
] | null | null | null | import datetime as dt | 31.029412 | 118 | 0.559242 |
66b3e370acc80eb4f8fc537add6850404fc19250 | 148 | py | Python | problems/incorrect_division_method.py | stereoabuse/codewars | d6437afaef38c3601903891b8b9cb0f84c108c54 | [
"MIT"
] | null | null | null | problems/incorrect_division_method.py | stereoabuse/codewars | d6437afaef38c3601903891b8b9cb0f84c108c54 | [
"MIT"
] | null | null | null | problems/incorrect_division_method.py | stereoabuse/codewars | d6437afaef38c3601903891b8b9cb0f84c108c54 | [
"MIT"
] | null | null | null | ## Incorrect division method
## 8 kyu
## https://www.codewars.com/kata/54d1c59aba326343c80000e7
| 21.142857 | 59 | 0.682432 |
66b517ab0ecf7dee82c7b5fd1f3ac99536fb011e | 1,927 | py | Python | launch_notebooks.py | srivnamrata/openvino | aea76984a731fa3e81be9633dc8ffc702fb4e207 | [
"Apache-2.0"
] | null | null | null | launch_notebooks.py | srivnamrata/openvino | aea76984a731fa3e81be9633dc8ffc702fb4e207 | [
"Apache-2.0"
] | null | null | null | launch_notebooks.py | srivnamrata/openvino | aea76984a731fa3e81be9633dc8ffc702fb4e207 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import subprocess
import sys
from pathlib import Path
import os
pythonpath = sys.executable
curdir = Path(__file__).parent.resolve()
parentdir = curdir.parent
# If openvino_env is already activated, launch jupyter lab
# This will also start if openvino_env_2 is activated instead of openvino_env
# The assumption is that that is usually intended
if "openvino_env" in pythonpath:
subprocess.run([pythonpath, "-m", "jupyterlab", "notebooks"])
else:
if sys.platform == "win32":
scripts_dir = "Scripts"
else:
scripts_dir = "bin"
# If openvino_env is not activated, search for the openvino_env folder in the
# current and parent directory and launch the notebooks
try:
pythonpath = os.path.normpath(
os.path.join(curdir, f"openvino_env/{scripts_dir}/python")
)
subprocess.run([pythonpath, "-m", "jupyterlab", "notebooks"])
except:
try:
pythonpath = os.path.normpath(
os.path.join(parentdir, f"openvino_env/{scripts_dir}/python")
)
subprocess.run([pythonpath, "-m", "jupyterlab", "notebooks"])
except:
print(pythonpath)
print(
"openvino_env could not be found in the current or parent "
"directory, or the installation is not complete. Please follow "
"the instructions on "
"https://github.com/openvinotoolkit/openvino_notebooks to "
"install the notebook requirements in a virtual environment.\n\n"
"After installation, you can also launch the notebooks by "
"activating the virtual environment manually (see the README "
"on GitHub, linked above) and typing `jupyter lab notebooks`.\n\n"
f"Current directory: {curdir}"
f"Python executable: {sys.executable}"
)
| 39.326531 | 82 | 0.632071 |
66b64a14727f525c1e5bbd7f0c1785592ad8eed7 | 1,143 | py | Python | update_last_date.py | ankschoubey/testblog | f74e93f0f85edaee9c5adbe402e8e4a5252cc64d | [
"Apache-2.0"
] | 1 | 2021-07-26T00:58:53.000Z | 2021-07-26T00:58:53.000Z | update_last_date.py | ankschoubey/testblog | f74e93f0f85edaee9c5adbe402e8e4a5252cc64d | [
"Apache-2.0"
] | 15 | 2020-03-28T05:27:53.000Z | 2022-01-07T17:44:08.000Z | update_last_date.py | ankschoubey/testblog | f74e93f0f85edaee9c5adbe402e8e4a5252cc64d | [
"Apache-2.0"
] | 3 | 2021-05-08T19:59:02.000Z | 2021-05-11T17:14:45.000Z |
import os.path, os, time
from datetime import datetime
from os import listdir
from os.path import isfile, join
path = "_posts"
onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]
#print(onlyfiles)
for i in onlyfiles:
completePath = f"{path}/{i}"
updatePost(completePath) | 30.078947 | 69 | 0.601925 |
66b70f0759d9cb9c2433981c7b3e962dee37c367 | 4,032 | py | Python | basic/19-brownie/brownie_test/tests/exchange/test_eth_to_token.py | xiangzhengfeng/Dapp-Learning | 813fe6e52898206046842d10ecf9eb68b7f336a1 | [
"MIT"
] | 987 | 2021-12-19T09:57:18.000Z | 2022-03-31T15:39:45.000Z | basic/19-brownie/brownie_test/tests/exchange/test_eth_to_token.py | xiangzhengfeng/Dapp-Learning | 813fe6e52898206046842d10ecf9eb68b7f336a1 | [
"MIT"
] | 30 | 2021-12-20T03:13:29.000Z | 2022-03-31T15:00:23.000Z | basic/19-brownie/brownie_test/tests/exchange/test_eth_to_token.py | xiangzhengfeng/Dapp-Learning | 813fe6e52898206046842d10ecf9eb68b7f336a1 | [
"MIT"
] | 207 | 2021-12-19T08:40:38.000Z | 2022-03-31T13:10:02.000Z | from brownie import (accounts, web3)
| 51.692308 | 117 | 0.705109 |
66b7938b4ce230cf1fa2893cf38e7f737bacfde6 | 49 | py | Python | hello.py | Lifereborn/cs3240-labdemo | 20db420273e78b4a905ec7e3a21fc717d71dc301 | [
"MIT"
] | null | null | null | hello.py | Lifereborn/cs3240-labdemo | 20db420273e78b4a905ec7e3a21fc717d71dc301 | [
"MIT"
] | null | null | null | hello.py | Lifereborn/cs3240-labdemo | 20db420273e78b4a905ec7e3a21fc717d71dc301 | [
"MIT"
] | null | null | null | from helper import greetings
greetings("hi!")
| 8.166667 | 28 | 0.734694 |
66b88bc537b297b0b6ea48d2a39575fd0626f252 | 232 | py | Python | setup.py | h-rub/manzip | 875e4ed75e08bd06b0d50698ecf1744ab3723e4c | [
"MIT"
] | null | null | null | setup.py | h-rub/manzip | 875e4ed75e08bd06b0d50698ecf1744ab3723e4c | [
"MIT"
] | null | null | null | setup.py | h-rub/manzip | 875e4ed75e08bd06b0d50698ecf1744ab3723e4c | [
"MIT"
] | null | null | null | from setuptools import setup
setup(
name="manzip",
version='1.0.0',
py_modules=['manzip'],
install_requires=[
'Click',
],
entry_points='''
[console_scripts]
manzip=app:main
''',
) | 16.571429 | 28 | 0.547414 |
66b95f7f1063980cc02f05f543cab0abf0bce28b | 199 | py | Python | tests/test_mmhelloworld.py | manasm11/mmhelloworld | 2e6907ac0962de90764a036d14046861b5f47521 | [
"MIT"
] | null | null | null | tests/test_mmhelloworld.py | manasm11/mmhelloworld | 2e6907ac0962de90764a036d14046861b5f47521 | [
"MIT"
] | null | null | null | tests/test_mmhelloworld.py | manasm11/mmhelloworld | 2e6907ac0962de90764a036d14046861b5f47521 | [
"MIT"
] | null | null | null | from mmhelloworld import say_hello
| 19.9 | 53 | 0.738693 |
66b9ec6f54ec8e5b78556e4fbb86bde48b9e1d35 | 1,167 | py | Python | bann/b_container/functions/print_init_net_state.py | arturOnRails/BANN | 027af04349304941fb73c2ede502aca4b76f1ad1 | [
"MIT"
] | null | null | null | bann/b_container/functions/print_init_net_state.py | arturOnRails/BANN | 027af04349304941fb73c2ede502aca4b76f1ad1 | [
"MIT"
] | null | null | null | bann/b_container/functions/print_init_net_state.py | arturOnRails/BANN | 027af04349304941fb73c2ede502aca4b76f1ad1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
""".. moduleauthor:: Artur Lissin"""
from typing import TypeVar
from bann.b_container.states.general.interface.init_state import InitState
from bann.b_container.states.general.interface.net_state import NetState
from bann.b_container.functions.dict_str_repr import dict_string_repr
from rewowr.public.functions.syncout_dep_functions import logger_print_to_console
from rewowr.public.interfaces.logger_interface import SyncStdoutInterface
_TypeNet = TypeVar('_TypeNet', bound=NetState)
_TypeInit = TypeVar('_TypeInit', bound=InitState)
_TypeState = TypeVar('_TypeState', NetState, InitState)
| 44.884615 | 99 | 0.77892 |
66baa831bc3a0b5f4c002eec9ab7e86c9dd317b9 | 4,578 | py | Python | PythonCodes/ScientificPlotting/FigGen_Py_wolfel/Fig3.py | Nicolucas/C-Scripts | 2608df5c2e635ad16f422877ff440af69f98f960 | [
"MIT"
] | null | null | null | PythonCodes/ScientificPlotting/FigGen_Py_wolfel/Fig3.py | Nicolucas/C-Scripts | 2608df5c2e635ad16f422877ff440af69f98f960 | [
"MIT"
] | null | null | null | PythonCodes/ScientificPlotting/FigGen_Py_wolfel/Fig3.py | Nicolucas/C-Scripts | 2608df5c2e635ad16f422877ff440af69f98f960 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
plt.style.use('science')
import os, sys, time
sys.path.insert(0,"/import/freenas-m-03-geodynamics/jhayek/petsc-3.12.5/lib/petsc/bin/")
sys.path.insert(0,"/import/freenas-m-03-geodynamics/jhayek/TEAR/se2wave/utils/python")
sys.path.insert(0,"/import/freenas-m-03-geodynamics/jhayek/TEAR/processing/TEAR/PythonCodes/")
from se2waveload import *
from Lib_GeneralFunctions import *
from GeneratePaperFigs import *
from ModelIllustration import *
SMALL_SIZE = 14
MEDIUM_SIZE = 16
BIGGER_SIZE = 20
FontSizeControlFreak(SMALL_SIZE,MEDIUM_SIZE,BIGGER_SIZE)
from palettable.colorbrewer.diverging import PuOr_11_r as FieldColor
cmap = FieldColor.mpl_colormap
from matplotlib.colors import ListedColormap
import matplotlib.lines as mlines
from palettable.cartocolors.qualitative import Safe_5 as LineColor
cmapProf = ListedColormap(LineColor.mpl_colors[:])
###################################################################
###################### Reference solution
###################################################################
pathRef = "/import/freenas-m-03-geodynamics/jhayek/SharedWolfel/PaperData/References/"
# Reference saved into a list of objects
RefList = [SSCreference(pathRef + "Kostrov/Kos_sem2dpack-{}-receiver-0.txt", "0km"),
SSCreference(pathRef + "Kostrov/Kos_sem2dpack-{}-receiver-1.txt", "2km"),
SSCreference(pathRef + "Kostrov/Kos_sem2dpack-{}-receiver-2.txt", "4km"),
SSCreference(pathRef + "Kostrov/Kos_sem2dpack-{}-receiver-3.txt", "6km"),
SSCreference(pathRef + "Kostrov/Kos_sem2dpack-{}-receiver-4.txt", "8km"),
]
# Reference saved into a list of objects
RefListTPV = [TPV3reference(pathRef + "TPV3/TPV_sem2dpack-{}-receiver-0.0e+00.txt", "0km"),
TPV3reference(pathRef + "TPV3/TPV_sem2dpack-{}-receiver-2.0e+03.txt", "2km"),
TPV3reference(pathRef + "TPV3/TPV_sem2dpack-{}-receiver-4.0e+03.txt", "4km"),
TPV3reference(pathRef + "TPV3/TPV_sem2dpack-{}-receiver-6.0e+03.txt", "6km"),
TPV3reference(pathRef + "TPV3/TPV_sem2dpack-{}-receiver-8.0e+03.txt", "8km"),
]
###################################################################
###################### Reference solution
###################################################################
# Figure 3
start_time = time.time()
fname = "step-{timestep:04}_wavefield.pbin"
path = "/import/freenas-m-03-geodynamics/jhayek/TEAR/Results/T2/Runs/TEAR46_Kos_T20_P3_025x025_A12phi65_Delta2.5_4s/"
i=4630
FieldFilename = os.path.join(path,fname.format(timestep=i))
MeshFilename = os.path.join(path, "default_mesh_coor.pbin")
se2_coor = se2wave_load_coordinates(MeshFilename)
FileList = glob(os.path.join(path,"step-{timestep}_wavefield.pbin".format(timestep="*")))
l = [i.replace(os.path.join(path,'step-'),'').replace('_wavefield.pbin','') for i in FileList]
TimeStepVal, LCoorX, LCoorY, LFieldX, LFieldY, LFieldvelX, LFieldvelY = ExtractFields(FieldFilename, se2_coor)
FolderProfilesPath = "/import/freenas-m-03-geodynamics/jhayek/SharedWolfel/PaperData/CorrectedSimulations/20220325/"
DataProfile = LoadPickleFile(Filename = "TEAR46_Kos_T20_P3_025x025_A12phi65_Delta2.5_4s-Tilt20.0-P3-TPList_t4630_d62.5.pickle",FolderPath = FolderProfilesPath)
x0,y0 = 7350,2675
InsetAxis = [x0-200,x0+200,y0-200,y0+200]
F1, ax = Plot4KomaSetup(LCoorX, LCoorY, LFieldX, LFieldvelX,
["X-Component Displacement ", "X-Component Displacement [m]"],
TimeStepVal,InsetAxis,
cmap=cmap, rasterized=True)
del x0,y0,InsetAxis
# Tilted case plotting
iidx = 0
for iidx,Test1 in enumerate(DataProfile):
ax[0].plot(Test1.Time, Test1.DispX, color= cmapProf.colors[iidx], linewidth=1.5, zorder=iidx)
ax[1].plot(Test1.Time, Test1.VelX, color= cmapProf.colors[iidx], linewidth=1.5, zorder=iidx)
ax[0].set_xlabel("time [s]")
#F1.suptitle("Tilting (20deg) Kostrov simulation")
[item.PlotReference(ax[0], "Slip", filtering=False) for item in RefList]
[item.PlotReference(ax[1], "SlipRate", filtering=False) for item in RefList]
Format_LabelsOnFig_formatAxis(F1, ax[:2],inverted=True, ncols = 3, HeightBbox=1.2)
LabelizeAxisList(ax,Pos=[0.9, 0.9],fontsize=BIGGER_SIZE)
print("Saving Figure...")
OutFile = "/import/freenas-m-03-geodynamics/jhayek/SharedWolfel/Works/se2dr_Paper/Illustrations/FinalFigures/F{}.pdf"
F1.savefig(OutFile.format("3"))
OutFile = "/import/freenas-m-03-geodynamics/jhayek/SharedWolfel/Works/se2dr_Paper/Illustrations/FinalFigures/F{}.png"
F1.savefig(OutFile.format("3")) | 41.243243 | 159 | 0.68851 |
66bba8495cc9b2de4fa5d89e4f271bf43563f4b0 | 3,560 | py | Python | setup.py | fkie/rosrepo | 13cdf89e32f0c370d106a61540b0cd102675daf9 | [
"Apache-2.0"
] | 5 | 2016-09-06T08:02:10.000Z | 2018-06-10T20:45:21.000Z | setup.py | fkie/rosrepo | 13cdf89e32f0c370d106a61540b0cd102675daf9 | [
"Apache-2.0"
] | 2 | 2019-03-11T21:44:50.000Z | 2020-03-17T09:20:47.000Z | setup.py | fkie/rosrepo | 13cdf89e32f0c370d106a61540b0cd102675daf9 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
#
# ROSREPO
# Manage ROS workspaces with multiple Gitlab repositories
#
# Author: Timo Rhling
#
# Copyright 2016 Fraunhofer FKIE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import fastentrypoints
from setuptools import setup, __version__ as setuptools_version
import os
import sys
srcdir = os.path.normpath(os.path.join(os.path.dirname(__file__), "src"))
if os.path.isfile(os.path.join(srcdir, "rosrepo", "__init__.py")) and os.path.isfile(os.path.join(srcdir, "rosrepo", "main.py")):
sys.path.insert(0, srcdir)
else:
sys.stderr.write("This script is supposed to run from the rosrepo source tree")
sys.exit(1)
from rosrepo import __version__ as rosrepo_version
install_requires = ["catkin_pkg", "catkin_tools", "python-dateutil", "pygit2", "requests", "rosdep", "pyyaml"]
extras_require = {}
# The following code is a somewhat barbaric attempt to get conditional
# dependencies that works on setuptools versions before 18.0 as well:
if int(setuptools_version.split(".", 1)[0]) < 18:
if sys.version_info[0] < 3:
install_requires.append("futures")
if sys.version_info[:2] < (3, 5):
install_requires.append("scandir")
# Unfortunately, the fake conditional dependencies do not work with
# the caching mechanism of bdist_wheel, so if you want to create wheels,
# use at least setuptools version 18
assert "bdist_wheel" not in sys.argv
else:
# We have a reasonably modern setuptools version
from distutils.version import StrictVersion as Version
if Version(setuptools_version) >= Version("36.2"):
# Starting with setuptools 36.2, we can do proper conditional
# dependencies "PEP 508 style", the way God intended
install_requires.append("futures ; python_version<'3'")
install_requires.append("scandir ; python_version<'3.5'")
else:
# No proper conditional dependencies, but we can resort to some
# trickery and get the job done nevertheless
extras_require[":python_version<'3'"] = ["futures"]
extras_require[":python_version<'3.5'"] = ["scandir"]
setup(
name = "rosrepo",
description = "Manage ROS workspaces with multiple Gitlab repositories",
author = "Timo Rhling",
author_email = "timo.roehling@fkie.fraunhofer.de",
license = "Apache Software License",
keywords = ["catkin", "ROS", "Git"],
packages = ["rosrepo"],
package_dir = {"": "src"},
data_files = [("share/bash-completion/completions", ["bash/rosrepo"])],
version = rosrepo_version,
install_requires = install_requires,
extras_require = extras_require,
test_suite = "nose.collector",
entry_points = {
"console_scripts": ["rosrepo = rosrepo.main:main"]
},
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"Topic :: Software Development :: Version Control",
"Programming Language :: Python",
]
)
| 40 | 129 | 0.692416 |
66bcb0ae9b3366b6b0c297fee8c32430261239e3 | 2,948 | py | Python | structural/decorator_and_proxy/example/proxy.py | BruceWW/python_designer_pattern | c5f8b5ee32c8984401b4a217fa35364170331063 | [
"Apache-2.0"
] | 1 | 2020-08-29T09:17:12.000Z | 2020-08-29T09:17:12.000Z | structural/decorator_and_proxy/example/proxy.py | BruceWW/python_design_pattern | c5f8b5ee32c8984401b4a217fa35364170331063 | [
"Apache-2.0"
] | null | null | null | structural/decorator_and_proxy/example/proxy.py | BruceWW/python_design_pattern | c5f8b5ee32c8984401b4a217fa35364170331063 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Date : 2020/8/30
# @Author : Bruce Liu /Lin Luo
# @Mail : 15869300264@163.com
def trans(source_card: Card, target_card: Card, trans_num: int):
"""
:param source_card:
:param target_card:
:param trans_num:
:return:
"""
print(f'trans 100 from {source_card.name} to {target_card.name}')
print(f'surplus of source_card: {source_card.name} before trans: {source_card.surplus}')
print(f'surplus of target_card: {target_card.name} before trans: {target_card.surplus}')
source_card.operator_num = trans_num
res = target_card + source_card
print(f'transfer result: {res}')
print(f'surplus of source_card: {source_card.name} after trans: {source_card.surplus}')
print(f'surplus of target_card: {target_card.name} after trans: {target_card.surplus}')
if __name__ == '__main__':
#
# 10000
card_1 = Card('card_1', False, 100000, 10000)
# 10000
card_2 = Card('card_2', True, 1000, 0)
# 10000100
card_3 = Card('card_3', True, 10000, 100)
# 100
trans(card_2, card_1, 100)
print()
# 2000
trans(card_1, card_3, 2000)
print()
# 999
trans(card_1, card_2, 999)
print()
# 2
trans(card_1, card_2, 2)
print()
# 10000
trans(card_3, card_1, 10000)
| 27.045872 | 129 | 0.587517 |
66bccd1b00412b945cbbdb0f6a0be3ab3a3ef37f | 158 | py | Python | tests/cli.py | joesitton/Ciphey | 862555f13e3915428a2f4ada5538fdf0be77ffcd | [
"MIT"
] | 9,908 | 2020-06-06T01:06:50.000Z | 2022-03-31T21:22:57.000Z | tests/cli.py | joesitton/Ciphey | 862555f13e3915428a2f4ada5538fdf0be77ffcd | [
"MIT"
] | 423 | 2020-05-30T11:44:37.000Z | 2022-03-18T03:15:30.000Z | tests/cli.py | joesitton/Ciphey | 862555f13e3915428a2f4ada5538fdf0be77ffcd | [
"MIT"
] | 714 | 2020-06-09T20:24:41.000Z | 2022-03-29T15:28:53.000Z | import subprocess
from sys import exit
result = subprocess.check_output(["ciphey", "-q", "-t 'hello'"])
if "hello" in result:
exit(0)
else:
exit(1)
| 15.8 | 64 | 0.651899 |
66bd2091216a58b01f3847f7b8145c69c89e49b7 | 13,057 | py | Python | macro_benchmark/SegLink/seglink/unit_tests.py | songhappy/ai-matrix | 901078e480c094235c721c49f8141aec7a84e70e | [
"Apache-2.0"
] | 180 | 2018-09-20T07:27:40.000Z | 2022-03-19T07:55:42.000Z | macro_benchmark/SegLink/seglink/unit_tests.py | songhappy/ai-matrix | 901078e480c094235c721c49f8141aec7a84e70e | [
"Apache-2.0"
] | 80 | 2018-09-26T18:55:56.000Z | 2022-02-10T02:03:26.000Z | macro_benchmark/SegLink/seglink/unit_tests.py | songhappy/ai-matrix | 901078e480c094235c721c49f8141aec7a84e70e | [
"Apache-2.0"
] | 72 | 2018-08-30T00:49:15.000Z | 2022-02-15T23:22:40.000Z | import math
import os
import tensorflow as tf
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import ops
import utils
import model_fctd
import data
import config
import visualizations as vis
FLAGS = tf.app.flags.FLAGS
if __name__ == '__main__':
# test_encode_decode_synth_data()
test_encode_decode_real_data()
# test_clip_rboxes()
# test_data_loading_and_preprocess()
# test_max_pool_on_odd_sized_maps()
# test_decode_combine_rboxes()
| 34.726064 | 103 | 0.635138 |
66bdffeb1d31a5333d1015ec0693dc331a8aaed7 | 1,432 | py | Python | setup.py | thefossgeek/packer.py | deda7a708e1968f6a206a939e97149c7aefc1c02 | [
"Apache-2.0"
] | 24 | 2018-03-24T00:06:04.000Z | 2022-01-29T19:25:32.000Z | setup.py | thefossgeek/packer.py | deda7a708e1968f6a206a939e97149c7aefc1c02 | [
"Apache-2.0"
] | 7 | 2018-03-24T00:12:06.000Z | 2021-07-01T23:29:28.000Z | setup.py | thefossgeek/packer.py | deda7a708e1968f6a206a939e97149c7aefc1c02 | [
"Apache-2.0"
] | 7 | 2018-10-10T00:36:25.000Z | 2022-01-27T15:02:17.000Z | """
Copyright 2018 Matthew Aynalem
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from distutils.core import setup
from setuptools import find_packages
setup(
name='packer.py',
version='0.3.0',
author='Matthew Aynalem',
author_email='maynalem@gmail.com',
packages=['packerpy'],
url='https://github.com/mayn/packer.py',
license='Apache License 2.0',
description='packer.py - python library to run hashicorp packer CLI commands',
keywords="hashicorp packer",
long_description=open('README.rst').read(),
install_requires=[
],
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| 34.095238 | 82 | 0.692039 |
66be0ddd5abfb03dffd9214bd347839460bf60b7 | 39,732 | py | Python | pyreach/impl/logs_directory_client_test.py | google-research/pyreach | f91753ce7a26e77e122eb02a9fdd5a1ce3ce0159 | [
"Apache-2.0"
] | 13 | 2021-09-01T01:10:22.000Z | 2022-03-05T10:01:52.000Z | pyreach/impl/logs_directory_client_test.py | google-research/pyreach | f91753ce7a26e77e122eb02a9fdd5a1ce3ce0159 | [
"Apache-2.0"
] | null | null | null | pyreach/impl/logs_directory_client_test.py | google-research/pyreach | f91753ce7a26e77e122eb02a9fdd5a1ce3ce0159 | [
"Apache-2.0"
] | 6 | 2021-09-20T21:17:53.000Z | 2022-03-14T18:42:48.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for logs_directory_client."""
import json
import os
import queue
import tempfile
from typing import Callable, Optional, List, Union
import unittest
from pyreach import core
from pyreach.common.python import types_gen
from pyreach.impl import logs_directory_client
from pyreach.impl import playback_client
from pyreach.impl import playback_client_test
from pyreach.impl import snapshot_impl
from pyreach.impl import utils
if __name__ == "__main__":
unittest.main()
| 39.652695 | 80 | 0.55892 |
66bff38e64bc42b7572591b13e17cd3a431e4073 | 1,007 | py | Python | SoftLayer/CLI/file/duplicate_convert_status.py | ko101/softlayer-python | f4cc9fa2eb01d97c0e890907ef6735390f1a5b10 | [
"MIT"
] | null | null | null | SoftLayer/CLI/file/duplicate_convert_status.py | ko101/softlayer-python | f4cc9fa2eb01d97c0e890907ef6735390f1a5b10 | [
"MIT"
] | null | null | null | SoftLayer/CLI/file/duplicate_convert_status.py | ko101/softlayer-python | f4cc9fa2eb01d97c0e890907ef6735390f1a5b10 | [
"MIT"
] | null | null | null | """Get status for split or move completed percentage of a given file duplicate volume."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
| 32.483871 | 112 | 0.713009 |
66c4abe639069bea0f557f4dba81d69a1839cf18 | 392 | py | Python | apps/saas/forms.py | lucaslucyk/sigec | cdf65868e2f8ead35b005603611fcd20446633c7 | [
"MIT"
] | null | null | null | apps/saas/forms.py | lucaslucyk/sigec | cdf65868e2f8ead35b005603611fcd20446633c7 | [
"MIT"
] | 7 | 2020-02-12T03:10:01.000Z | 2021-06-10T19:30:50.000Z | apps/saas/forms.py | lucaslucyk/sigec | cdf65868e2f8ead35b005603611fcd20446633c7 | [
"MIT"
] | null | null | null | from django import forms
#from pagedown.widgets import PagedownWidget
from apps.saas.models import Offer | 21.777778 | 69 | 0.742347 |
66c4c0ab19cb9fa1cb71b15b0da8a32e24b51bb6 | 5,491 | py | Python | Linuxu.py | Jefferson-Hsu/Linuxu-shell | 2bbc42248e05ac01f8d3466479bb8106833c7ab1 | [
"MIT"
] | 1 | 2022-03-04T05:53:33.000Z | 2022-03-04T05:53:33.000Z | Linuxu.py | Jefferson-Hsu/Linuxu-shell | 2bbc42248e05ac01f8d3466479bb8106833c7ab1 | [
"MIT"
] | null | null | null | Linuxu.py | Jefferson-Hsu/Linuxu-shell | 2bbc42248e05ac01f8d3466479bb8106833c7ab1 | [
"MIT"
] | null | null | null | #library
import os
#string aphoto
print(" _ _ _ _ __ __ _ _ .____ .__ ")
print("| | | | ___| | | __\\ \\ / /__ _ __| | __| | | | |__| ____ __ _____ _____ __ ")
print("| |_| |/ _ \\ | |/ _ \\ \\ /\\ / / _ \\| '__| |/ _` | | | | |/ \| | \ \/ / | \ ")
print("| _ | __/ | | (_) \\ V V / (_) | | | | (_| | | |___| | | \ | /> <| | / ")
print("|_| |_|\\___|_|_|\\___/ \\_/\\_/ \\___/|_| |_|\\__,_| |_______ \__|___| /____//__/\_ \____/ ")
print(" ")
print(" ")
print(" ")
#password & user name
join_key=3
again_key=4
name="XuFaxin"
password="Xinxin080502"
print("--------------------------------------------------------------------------------------------------------------------------------------------")
input_name=input("Please type the user name: ")
print("--------------------------------------------------------------------------------------------------------------------------------------------")
input_password=input("Please type the password: ")
print("--------------------------------------------------------------------------------------------------------------------------------------------")
print("welcome to Linuxu system!!!")
print(" ")
while(join_key==3):
if input_name=="XuFaxin" and input_password=="Xinxin080502":
print(" ")
print(" ")
else:
print("Bye,you are not user!")
break
#command shell
command=input("XuFaxin@computer% ")
#root command
if(command=="root"):
print(" ")
print("you are rooter!")
print(" ")
print("But don't be happy too soon")
print(" ")
print("-----------------------------------------------------------------------------------------------------------------------------------")
print(" In the world of Linuxu XuFaxin is god!")
print("-----------------------------------------------------------------------------------------------------------------------------------")
print(" ")
#Calculator command
if(command=="math"):
print("Develop by XuFaxin")
counts=3
while counts>0:
str1=input("First number: ")
str2=input("Second number:")
X=int(str1)
Y=int(str2)
print(X+Y)
print(X-Y)
print(X*Y)
print(X/Y)
print(X**Y)
print(X//Y)
break
#game command
if(command=="game"):
print(" ")
print("Welcome to XuFaxin's guess number game!")
print(" ")
print("You have three chances")
print(" ")
print("Guess an integer between 1 and 10")
print(" ")
print("develop by XuFaxin")
print(" ")
print(" ")
import random
answer=random.randint(1,10)
counts=3
while counts>0:
temp=input("Guess a number: ")
guess=int(temp)
if guess==answer:
print(" ")
print("Win")
print(" ")
print("Win!!! But no pay! HAHA!")
else:
if guess>0:
print(" ")
print("Big!")
print(" ")
else:
print(" ")
print("small!")
counts=counts-1
#clear command
if(command=="clear"):
os.system( 'cls' )
os.system("clear")
#list command
if(command=="ls"):
print("-------------------------------------------------------------------------------------------------------------------------------")
print(" ||game|| ||math|| ")
print("-------------------------------------------------------------------------------------------------------------------------------")
#exit command
if(command=="exit"):
print(" ")
print("See you again!")
break | 44.282258 | 152 | 0.24094 |
66c7e494275971e9a3a3aa777ced7402edea752a | 1,237 | py | Python | src/test.py | williamyang1991/TET-GAN | bdfca141fc14c5917fd9be8d2bc23870f9ad3288 | [
"MIT"
] | 86 | 2019-01-02T06:20:09.000Z | 2022-03-23T01:16:32.000Z | src/test.py | williamyang1991/TET-GAN | bdfca141fc14c5917fd9be8d2bc23870f9ad3288 | [
"MIT"
] | 5 | 2019-01-22T06:18:26.000Z | 2021-12-16T02:01:34.000Z | src/test.py | williamyang1991/TET-GAN | bdfca141fc14c5917fd9be8d2bc23870f9ad3288 | [
"MIT"
] | 24 | 2019-01-03T09:36:54.000Z | 2021-12-14T10:04:11.000Z | from options import TestOptions
import torch
from models import TETGAN
from utils import load_image, to_data, to_var, visualize, save_image
import os
#os.environ["CUDA_VISIBLE_DEVICES"] = "0"
if __name__ == '__main__':
main()
| 25.770833 | 68 | 0.609539 |
66cc342e6fa18c2dd06d530c8ed54f8e34f04274 | 1,853 | py | Python | scripts/bulkLoadUrls.py | conveyal/gtfs-data-manager | e7269fc1660f1816da269b1c116b43bdf758900b | [
"MIT"
] | 25 | 2015-02-11T19:20:07.000Z | 2021-03-10T07:53:29.000Z | scripts/bulkLoadUrls.py | conveyal/gtfs-data-manager | e7269fc1660f1816da269b1c116b43bdf758900b | [
"MIT"
] | 53 | 2015-01-07T20:30:56.000Z | 2016-10-10T12:47:22.000Z | scripts/bulkLoadUrls.py | conveyal/gtfs-data-manager | e7269fc1660f1816da269b1c116b43bdf758900b | [
"MIT"
] | 3 | 2015-01-03T10:17:34.000Z | 2015-11-10T10:44:27.000Z | #!/usr/bin/python
# load many feeds to the GTFS data manager, from a csv with fields name and url
# usage: bulkLoadFeeds.py file.csv http://server.example.com/
import csv
from getpass import getpass
from sys import argv
import json
from cookielib import CookieJar
import urllib2
from urllib import urlencode
if len(argv) != 3:
print 'usage: %s file.csv http://gtfs-data-manager.example.com' % argv[0]
server = argv[2]
with open(argv[1]) as f:
reader = csv.DictReader(f)
# log in to the server
print 'Please authenticate'
uname = raw_input('username: ')
pw = getpass('password: ')
# strip trailing slash to normalize url
server = server if not server.endswith('/') else server[:-1]
# cookie handling
# http://www.techchorus.net/using-cookie-jar-urllib2
cj = CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
# authenticate
opener.open(server + '/authenticate', urlencode(dict(username=uname, password=pw)))
# choose feed collection
colls = json.load(opener.open(server + '/api/feedcollections'))
print 'choose a feed collection: '
for i in xrange(len(colls)):
print '%s. %s' % (i + 1, colls[i]['name'])
while True:
try:
coll = colls[int(raw_input('> ')) - 1]
except ValueError:
continue
else:
break
# load each feed
for feed in reader:
data = dict(
name = feed['name'],
url = feed['url'],
isPublic = True,
autofetch = True,
# every day
feedCollection = coll
)
# http://stackoverflow.com/questions/3290522
req = urllib2.Request(server + '/api/feedsources/', json.dumps(data), {'Content-Type': 'application/json'})
opener.open(req)
| 25.736111 | 115 | 0.611981 |
66ce81273371c8d4fdeb7dac39c7d81c55ecac89 | 5,962 | py | Python | EQUATIONS/FOR_RESOLUTION_STUDY/BuoyancyResolutionStudy.py | mmicromegas/ransX | 2faaa786e00cfd14dce0e18f0793cd0252428d2a | [
"BSD-2-Clause"
] | 4 | 2019-04-22T11:43:47.000Z | 2020-09-16T00:28:15.000Z | EQUATIONS/FOR_RESOLUTION_STUDY/BuoyancyResolutionStudy.py | mmicromegas/ransX | 2faaa786e00cfd14dce0e18f0793cd0252428d2a | [
"BSD-2-Clause"
] | 34 | 2019-07-01T09:11:00.000Z | 2022-03-30T13:35:43.000Z | EQUATIONS/FOR_RESOLUTION_STUDY/BuoyancyResolutionStudy.py | mmicromegas/ransX | 2faaa786e00cfd14dce0e18f0793cd0252428d2a | [
"BSD-2-Clause"
] | 1 | 2020-09-16T00:28:17.000Z | 2020-09-16T00:28:17.000Z | import numpy as np
from scipy import integrate
import matplotlib.pyplot as plt
from UTILS.Calculus import Calculus
from UTILS.SetAxisLimit import SetAxisLimit
from UTILS.Tools import Tools
from UTILS.Errors import Errors
import sys
# Theoretical background https://arxiv.org/abs/1401.5176
# Mocak, Meakin, Viallet, Arnett, 2014, Compressible Hydrodynamic Mean-Field #
# Equations in Spherical Geometry and their Application to Turbulent Stellar #
# Convection Data #
| 32.939227 | 122 | 0.548306 |
66d0333de9cb88854cae7ea5468d3e9e83ace47c | 953 | py | Python | quokka/ext/weasyprint.py | yencchen/quokka_epus | d64aeb9c5ca59ee4bdcd84381f9bb0504680f5f5 | [
"MIT"
] | 1 | 2020-10-31T03:57:07.000Z | 2020-10-31T03:57:07.000Z | quokka/ext/weasyprint.py | yencchen/quokka_epus | d64aeb9c5ca59ee4bdcd84381f9bb0504680f5f5 | [
"MIT"
] | null | null | null | quokka/ext/weasyprint.py | yencchen/quokka_epus | d64aeb9c5ca59ee4bdcd84381f9bb0504680f5f5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import print_function
import logging
from flask import url_for
logger = logging.getLogger()
try:
from flask_weasyprint import render_pdf
import_error = False
except (ImportError, OSError) as e:
# print("""
# Error importing flask-weasyprint!
# PDF support is temporarily disabled.
# Manual dependencies may need to be installed.
# See,
# `http://weasyprint.org/docs/install/#by-platform`_
# `https://github.com/Kozea/WeasyPrint/issues/79`_
# """ + str(e))
import_error = True
| 25.756757 | 74 | 0.684155 |
66d0fa4f73c90e59d6dc87d8a6c39b035c3b58f1 | 392 | py | Python | jupyter_server_terminals/__init__.py | blink1073/jupyter_server_terminals | cc0363421ab50fded26c8519ea4694bf1a391fce | [
"BSD-3-Clause-Clear"
] | 3 | 2021-12-30T23:55:47.000Z | 2022-02-18T01:14:54.000Z | jupyter_server_terminals/__init__.py | blink1073/jupyter_server_terminals | cc0363421ab50fded26c8519ea4694bf1a391fce | [
"BSD-3-Clause-Clear"
] | 5 | 2021-12-26T21:27:11.000Z | 2022-03-03T11:37:04.000Z | jupyter_server_terminals/__init__.py | blink1073/jupyter_server_terminals | cc0363421ab50fded26c8519ea4694bf1a391fce | [
"BSD-3-Clause-Clear"
] | 4 | 2021-12-26T21:25:45.000Z | 2022-01-27T02:47:10.000Z | from ._version import __version__ # noqa:F401
try:
from .app import TerminalsExtensionApp
except ModuleNotFoundError:
import warnings
warnings.warn("Could not import submodules")
| 21.777778 | 59 | 0.663265 |
66d19b6566c778e3d204fad20cbbd324cf9a6a61 | 5,256 | py | Python | CommandsToFunction.py | destruc7i0n/CommandsToFunction | f1c29c6280524c54cc5876b966c1ff36ab1c2d27 | [
"MIT"
] | 1 | 2018-03-10T21:09:04.000Z | 2018-03-10T21:09:04.000Z | CommandsToFunction.py | destruc7i0n/CommandsToFunction | f1c29c6280524c54cc5876b966c1ff36ab1c2d27 | [
"MIT"
] | null | null | null | CommandsToFunction.py | destruc7i0n/CommandsToFunction | f1c29c6280524c54cc5876b966c1ff36ab1c2d27 | [
"MIT"
] | null | null | null | # By TheDestruc7i0n https://thedestruc7i0n.ca
# MrGarretto for the code for traversing the command block chain https://mrgarretto.com
import mcplatform
import codecs
__version__ = "V1.4.1"
displayName = "Commands to Function"
inputs = (
("Converts a command block chain into a function.", "label"),
("The filter also includes a polyfill for conditional commands.", "label"),
("Select 1 repeating command block.", "label"),
("Ask for file save", True),
("If above is not checked, it will print the commands to the console.", "label"),
("Area effect cloud tag", ("string", "value=cond")),
("The above sets the tag that the area effect cloud will have, change if you have multiple functions.", "label"),
("Please ensure that there is a SuccessCount dummy objective in the world if you're using conditional command blocks.", "label"),
("Based off a filter by MrGarretto.", "label"),
("By TheDestruc7i0n: https://thedestruc7i0n.ca/", "label"),
)
| 39.818182 | 180 | 0.562976 |
66d42f1fdcd91d122cd938babcc3fe924510d04e | 2,147 | py | Python | src/admin/godmode/actions/base.py | aimanow/sft | dce87ffe395ae4bd08b47f28e07594e1889da819 | [
"Apache-2.0"
] | 280 | 2016-07-19T09:59:02.000Z | 2022-03-05T19:02:48.000Z | godmode/actions/base.py | YAR-SEN/GodMode2 | d8a79b45c6d8b94f3d2af3113428a87d148d20d0 | [
"WTFPL"
] | 3 | 2016-07-20T05:36:49.000Z | 2018-12-10T16:16:19.000Z | godmode/actions/base.py | YAR-SEN/GodMode2 | d8a79b45c6d8b94f3d2af3113428a87d148d20d0 | [
"WTFPL"
] | 20 | 2016-07-20T10:51:34.000Z | 2022-01-12T23:15:22.000Z | import json
from flask import g, request, render_template
from flask.views import View
from godmode import logging
from godmode.acl import ACL
from godmode.audit_log import audit_log
from godmode.exceptions import AccessDenied
log = logging.getLogger(__name__)
| 26.182927 | 98 | 0.583605 |
66d880a9b64fd73b407a720c9fa6817d2609e5bf | 16,001 | py | Python | forever/Warframe.py | dss285/4ever | bd6f70f92d76d43342da401562f2c504adaf3867 | [
"MIT"
] | null | null | null | forever/Warframe.py | dss285/4ever | bd6f70f92d76d43342da401562f2c504adaf3867 | [
"MIT"
] | null | null | null | forever/Warframe.py | dss285/4ever | bd6f70f92d76d43342da401562f2c504adaf3867 | [
"MIT"
] | null | null | null | import discord
import asyncio
import time
import aiohttp
import re
import pathlib
import os
import json
from bs4 import BeautifulSoup
from datetime import datetime
from models.UpdatedMessage import UpdatedMessage
from models.EmbedTemplate import EmbedTemplate
from models.BotMention import BotMention
from forever import Utilities
| 41.778068 | 159 | 0.645272 |
66d8ba6f365049a80533d4986a5c2cf0bb77bfb0 | 2,561 | py | Python | config/jupyter/jupyterhub_config.py | mhwasil/jupyterhub-on-gcloud | 9cfe935772d7599fa36c5b998cebb87c17e24277 | [
"MIT"
] | 3 | 2018-10-06T20:35:08.000Z | 2019-03-02T08:04:52.000Z | config/jupyter/jupyterhub_config.py | mhwasil/jupyterhub-on-gcloud | 9cfe935772d7599fa36c5b998cebb87c17e24277 | [
"MIT"
] | 4 | 2019-05-15T11:36:43.000Z | 2019-07-23T09:34:45.000Z | config/jupyter/jupyterhub_config.py | mhwasil/jupyterhub-on-gcloud | 9cfe935772d7599fa36c5b998cebb87c17e24277 | [
"MIT"
] | 2 | 2020-01-09T21:03:44.000Z | 2020-11-22T16:47:00.000Z | c = get_config()
c.JupyterHub.ip = u'127.0.0.1'
c.JupyterHub.port = 8000
c.JupyterHub.cookie_secret_file = u'/srv/jupyterhub/jupyterhub_cookie_secret'
c.JupyterHub.db_url = u'/srv/jupyterhub/jupyterhub.sqlite'
#c.JupyterHub.proxy_auth_token = u'/srv/jupyterhub/proxy_auth_token'
c.ConfigurableHTTPProxy.auth_token = u'/srv/jupyterhub/proxy_auth_token'
c.JupyterHub.spawner_class = 'systemdspawner.SystemdSpawner'
c.SystemdSpawner.user_workingdir = '/home/{USERNAME}'
#c.JupyterHub.config_file = '/home/admin/jupyterhub_config.py'
# Limit memory and cpu usage for each user
c.SystemdSpawner.mem_limit = '0.5G'
c.SystemdSpawner.cpu_limit = 0.5
# create private /tmp to isolate each user info
c.SystemdSpawner.isolate_tmp = True
# Disable or enable user sudo
c.SystemdSpawner.disable_user_sudo = False
# Readonly
c.SystemdSpawner.readonly_paths = None
# Readwrite path
#c.SystemdSpawner.readwrite_paths = None
# use jupyterlab
c.Spawner.cmd = ['jupyter-labhub']
c.Spawner.default_url = '/tree'
# ser default_shell
c.SystemdSpawner.default_shell = '/bin/bash'
c.Authenticator.admin_users = {'admin', 'mrc-grader'}
c.Authenticator.whitelist = {'admin', 'mhm_wasil', 'instructor1',
'instructor2', 'student1', 'student2', 'student3',
'mrc-grader', 'wtus-grader'}
c.LocalAuthenticator.group_whitelist = {'mrc-group'}
#c.LocalAuthenticator.group_whitelist = {'mrc-group', 'wtus-group'}
# sionbg and willingc have access to a shared server:
c.JupyterHub.load_groups = {
'mrc-group': [
'instructor1',
'instructor2'
]
#,
#'wtus-student-group': [
# 'instructor2'
#]
}
service_names = ['shared-mrc-notebook', 'shared-wtus-notebook']
service_ports = [9998, 9999]
group_names = ['mrc-group']
#group_names = ['mrc-student-group', 'wtus-student-group']
# start the notebook server as a service
c.JupyterHub.services = [
{
'name': service_names[0],
'url': 'http://127.0.0.1:{}'.format(service_ports[0]),
'command': [
'jupyterhub-singleuser',
'--group={}'.format(group_names[0]),
'--debug',
],
'user': 'mrc-grader',
'cwd': '/home/mrc-grader'
}
#,
#{
# 'name': service_names[1],
# 'url': 'http://127.0.0.1:{}'.format(service_ports[1]),
# 'command': [
# 'jupyterhub-singleuser',
# '--group={}'.format(group_names[1]),
# '--debug',
# ],
# 'user': 'wtus-grader',
# 'cwd': '/home/wtus-grader'
#}
]
| 31.617284 | 78 | 0.643108 |
66d95353965e38496015e85b754a89803b392d87 | 11,908 | py | Python | legacy/Environment.py | LaoKpa/reinforcement_trader | 1465731269e6d58900a28a040346bf45ffb5cf97 | [
"MIT"
] | 7 | 2020-09-28T23:36:40.000Z | 2022-02-22T02:00:32.000Z | legacy/Environment.py | LaoKpa/reinforcement_trader | 1465731269e6d58900a28a040346bf45ffb5cf97 | [
"MIT"
] | 4 | 2020-11-13T18:48:52.000Z | 2022-02-10T01:29:47.000Z | legacy/Environment.py | lzcaisg/reinforcement_trader | 1465731269e6d58900a28a040346bf45ffb5cf97 | [
"MIT"
] | 3 | 2020-11-23T17:31:59.000Z | 2021-04-08T10:55:03.000Z | import datetime
import warnings
import pandas as pd
import numpy as np
from MongoDBUtils import *
from scipy.optimize import fsolve
import pymongo
TRADING_FEE = 0.008
EARLIEST_DATE = datetime.datetime(2014, 10, 17)
LATEST_DATE = datetime.datetime(2019, 10, 17)
# In any cases, we shouldn't know today's and future value;
# ONLY PROVIDE CALCULATED RESULT
# Handled by Both Environment and Actors
| 36.527607 | 141 | 0.570037 |
66d9e2205d4a01f644f0a6147e2760e0d6b2de38 | 579 | py | Python | examples/Titanic/titanic.py | mlflow/mlflow-torchserve | 91663b630ef12313da3ad821767faf3fc409345b | [
"Apache-2.0"
] | 40 | 2020-11-13T02:08:10.000Z | 2022-03-27T07:41:57.000Z | examples/Titanic/titanic.py | Ideas2IT/mlflow-torchserve | d6300fb73f16d74ee2c7718c249faf485c4f3b62 | [
"Apache-2.0"
] | 23 | 2020-11-16T11:28:01.000Z | 2021-09-23T11:28:24.000Z | examples/Titanic/titanic.py | Ideas2IT/mlflow-torchserve | d6300fb73f16d74ee2c7718c249faf485c4f3b62 | [
"Apache-2.0"
] | 15 | 2020-11-13T10:25:25.000Z | 2022-02-01T10:13:20.000Z | import torch.nn as nn
| 30.473684 | 64 | 0.62867 |
66db0c7061bb9a75d8373490465f8ef60bcc3200 | 426 | py | Python | api/tacticalrmm/agents/migrations/0049_agent_agents_agen_monitor_df8816_idx.py | v2cloud/tacticalrmm | 12f599f9749985f66ff9b559c5e5abd36064b182 | [
"MIT"
] | null | null | null | api/tacticalrmm/agents/migrations/0049_agent_agents_agen_monitor_df8816_idx.py | v2cloud/tacticalrmm | 12f599f9749985f66ff9b559c5e5abd36064b182 | [
"MIT"
] | null | null | null | api/tacticalrmm/agents/migrations/0049_agent_agents_agen_monitor_df8816_idx.py | v2cloud/tacticalrmm | 12f599f9749985f66ff9b559c5e5abd36064b182 | [
"MIT"
] | null | null | null | # Generated by Django 4.0.3 on 2022-04-18 14:29
from django.db import migrations, models
| 23.666667 | 98 | 0.65493 |
66dcca39ba0172f5d72111b99f2df6a26ed3cb02 | 6,431 | py | Python | src/Datasets.py | fauxneticien/bnf_cnn_qbe-std | ab7dcb9c9d3d8969f1f17aaa87b7337d3ccfcc30 | [
"MIT"
] | 4 | 2021-03-26T17:18:59.000Z | 2022-03-21T18:28:56.000Z | src/Datasets.py | fauxneticien/bnf_cnn_qbe-std | ab7dcb9c9d3d8969f1f17aaa87b7337d3ccfcc30 | [
"MIT"
] | 1 | 2021-11-02T17:29:46.000Z | 2021-11-02T17:29:46.000Z | src/Datasets.py | fauxneticien/bnf_cnn_qbe-std | ab7dcb9c9d3d8969f1f17aaa87b7337d3ccfcc30 | [
"MIT"
] | 1 | 2020-11-11T05:04:55.000Z | 2020-11-11T05:04:55.000Z | import os
import torch
import numpy as np
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from scipy.spatial.distance import cdist
import logging
| 51.448 | 133 | 0.640647 |
66de338a8afcfc34368f70df12c0187b512a7430 | 3,209 | py | Python | dmz/store.py | yuvipanda/edit-stats | fb096715f18df999b4af4fb116e6c4130f24c2ec | [
"MIT"
] | null | null | null | dmz/store.py | yuvipanda/edit-stats | fb096715f18df999b4af4fb116e6c4130f24c2ec | [
"MIT"
] | null | null | null | dmz/store.py | yuvipanda/edit-stats | fb096715f18df999b4af4fb116e6c4130f24c2ec | [
"MIT"
] | null | null | null | """Implements a db backed storage area for intermediate results"""
import sqlite3
| 38.662651 | 111 | 0.600499 |
66e356546289b5293424a7a6ad3ffb4afce031ec | 7,074 | py | Python | main.py | usdot-its-jpo-data-portal/metadata-query-function | 589e5df691fab82e264ce74196dd797b9eb17f5e | [
"Apache-2.0"
] | null | null | null | main.py | usdot-its-jpo-data-portal/metadata-query-function | 589e5df691fab82e264ce74196dd797b9eb17f5e | [
"Apache-2.0"
] | null | null | null | main.py | usdot-its-jpo-data-portal/metadata-query-function | 589e5df691fab82e264ce74196dd797b9eb17f5e | [
"Apache-2.0"
] | 1 | 2021-12-14T18:00:20.000Z | 2021-12-14T18:00:20.000Z | import boto3
import dateutil
import glob
import json
import logging
import os
import queue
import time
from queries import MetadataQueries
USE_LOCAL_DATA = True # whether to load data from S3 (false) or locally (true)
LOCAL_DATA_REPOSITORY = "s3data/usdot-its-cvpilot-public-data" # path to local directory containing s3 data
### Query to run
METADATA_QUERY = 'query13_listOfLogFilesBefore'
### Data source configuration settings
PREFIX_STRINGS = ["wydot/BSM/2018/12", "wydot/BSM/2019/01", "wydot/BSM/2019/02", "wydot/BSM/2019/03", "wydot/BSM/2019/04", "wydot/TIM/2018/12", "wydot/TIM/2019/01", "wydot/TIM/2019/02", "wydot/TIM/2019/03", "wydot/TIM/2019/04"]
S3_BUCKET = "usdot-its-cvpilot-public-data"
### Returns a list of records from a given file
### Returns filenames from an S3 list files (list_objects) query
if __name__ == "__main__":
lambda_handler(None, None)
| 46.235294 | 227 | 0.669918 |
66e36f3c188b5158455460f11322fdc4021ffe06 | 1,070 | py | Python | example_config/SecretConfig.py | axiegamingph-dev/discordaxieqrbot | fac9b3f325b98d21ece12445ec798c125d06f788 | [
"MIT"
] | null | null | null | example_config/SecretConfig.py | axiegamingph-dev/discordaxieqrbot | fac9b3f325b98d21ece12445ec798c125d06f788 | [
"MIT"
] | null | null | null | example_config/SecretConfig.py | axiegamingph-dev/discordaxieqrbot | fac9b3f325b98d21ece12445ec798c125d06f788 | [
"MIT"
] | 2 | 2022-01-13T18:45:26.000Z | 2022-03-03T11:50:43.000Z | Managers = ['Shim', 'Mike', 'Ryan', 'Kevin', 'Wessa', 'ser0wl']
# google spreedsheet id
ISKO_SPREADSHEET_ID = ''
# the list of names with discord ID
ISKO_DiscordAccount = 'DiscordAccount!A2:B100'
# the list of Names, ronin address, ronin private keys
# eg:
# Name | Address | Privatekey
# Isko1 | ronin:8213789127387543adfgsasdkjsd... | 0x0666c1234567890...
# Isko2 | ronin:8213789127387543adfgsasdkjsd... | 0x0666c1234567890...
# Isko3 | ronin:8213789127387543adfgsasdkjsd... | 0x0666c1234567890...
# note: Name should map to the ISKO_DiscordAccount values
ISKO_Accounts = 'Isko!A2:C100'
# list of names that can request qr code on behalf of that person.
# eg:
# Representative | IskoName
# Isko1 | Isko1
# Isko1 | Isko2
# this means Isko1 can request code for Isko1 and Isko2 using the !qrof Isko1 and !qrof Isko2.
ISKO_Representative = 'Representative!A2:B100'
# Put Your Discord Bot Token Here
DiscordBotToken_Prod = ''
DiscordBotToken_Test = ''
DiscordBotToken = DiscordBotToken_Prod
| 33.4375 | 94 | 0.699065 |
66e44acc59d85966cbb8120b35805a421dccdbf1 | 566 | py | Python | world/dominion/migrations/0011_organization_theories.py | stesla/arxcode | a0ebf7c4d310de8c1980a8ba2a48948a68bb5a0a | [
"MIT"
] | 5 | 2019-03-16T08:26:53.000Z | 2019-11-27T15:42:16.000Z | world/dominion/migrations/0011_organization_theories.py | stesla/arxcode | a0ebf7c4d310de8c1980a8ba2a48948a68bb5a0a | [
"MIT"
] | 7 | 2018-09-29T05:08:15.000Z | 2021-06-10T21:35:32.000Z | world/dominion/migrations/0011_organization_theories.py | stesla/arxcode | a0ebf7c4d310de8c1980a8ba2a48948a68bb5a0a | [
"MIT"
] | 7 | 2018-09-19T21:11:29.000Z | 2019-11-19T12:46:14.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2017-08-19 03:52
from __future__ import unicode_literals
from django.db import migrations, models
| 25.727273 | 108 | 0.637809 |
66e492eef799f5d354e84f2867ee89f9c4cd7b7a | 200 | py | Python | tests/button_test.py | almasgai/Drone | 1223375976baf79d0f4362d42287d1a4039ba1e9 | [
"MIT"
] | null | null | null | tests/button_test.py | almasgai/Drone | 1223375976baf79d0f4362d42287d1a4039ba1e9 | [
"MIT"
] | null | null | null | tests/button_test.py | almasgai/Drone | 1223375976baf79d0f4362d42287d1a4039ba1e9 | [
"MIT"
] | null | null | null | from gpiozero import Button
import os
from time import sleep
button = Button(2)
i = 0
while True:
if button.is_pressed:
print(i, ". I've been pressed")
i += 1
sleep(0.1)
| 15.384615 | 39 | 0.61 |
66e5419754e56410c068112926f27e01cdae86bb | 820 | py | Python | reprojection.py | ekrell/nir2watermap | 5253f2cde142a62103eb06fb2931c9aed6431211 | [
"MIT"
] | null | null | null | reprojection.py | ekrell/nir2watermap | 5253f2cde142a62103eb06fb2931c9aed6431211 | [
"MIT"
] | null | null | null | reprojection.py | ekrell/nir2watermap | 5253f2cde142a62103eb06fb2931c9aed6431211 | [
"MIT"
] | null | null | null | import rasterio
from rasterio.plot import show, reshape_as_raster, reshape_as_image, adjust_band
from rasterio import warp
import numpy as np
| 37.272727 | 91 | 0.680488 |
66e80248874252f8ee1fc31cfa1763523a5f99eb | 4,034 | py | Python | opentsdb/push_thread.py | razvandimescu/opentsdb-py | 61c15302468769121f94323493e88cb51efcea15 | [
"MIT"
] | 48 | 2016-12-27T10:11:41.000Z | 2021-11-15T16:05:24.000Z | opentsdb/push_thread.py | razvandimescu/opentsdb-py | 61c15302468769121f94323493e88cb51efcea15 | [
"MIT"
] | 8 | 2017-10-08T16:20:30.000Z | 2022-02-23T08:36:52.000Z | opentsdb/push_thread.py | razvandimescu/opentsdb-py | 61c15302468769121f94323493e88cb51efcea15 | [
"MIT"
] | 17 | 2017-10-01T01:14:55.000Z | 2021-11-15T16:05:24.000Z | from logging import getLogger
from queue import Empty
import threading
import random
import time
logger = getLogger('opentsdb-py')
| 32.015873 | 119 | 0.617005 |
66e8dfd4ed77fb442ea81a851f7a9c4e599b1de3 | 465 | py | Python | projects/generate_pdf/main.py | parth-patel-samarthview/batch_201901 | f407c1bf9575a01e8ddc507adb6f0574f8d2bc09 | [
"MIT"
] | 2 | 2019-03-17T07:20:24.000Z | 2019-03-31T05:47:09.000Z | projects/generate_pdf/main.py | parth-patel-samarthview/batch_201901 | f407c1bf9575a01e8ddc507adb6f0574f8d2bc09 | [
"MIT"
] | null | null | null | projects/generate_pdf/main.py | parth-patel-samarthview/batch_201901 | f407c1bf9575a01e8ddc507adb6f0574f8d2bc09 | [
"MIT"
] | 2 | 2019-01-28T13:09:48.000Z | 2019-03-17T07:20:37.000Z | from xlrd import open_workbook
wb = open_workbook(r"C:\Users\Lenovo\Documents\excel converter.xlsx")
for s in wb.sheets():
#print 'Sheet:',s.name
values = []
for row in range(s.nrows):
col_value = []
for col in range(s.ncols):
value = (s.cell(row,col).value)
try : value = str(int(value))
except : pass
col_value.append(value)
values.append(col_value)
print(values)
| 31 | 70 | 0.572043 |
66ebd223e34af9e0e97db29c5f0febdca09f52fb | 3,068 | py | Python | apitaxdrivers/Openstack.py | Apitax/Drivers | 35b2c2f4c8ce8b98615f42fc30f04111d7b9bffe | [
"Apache-2.0"
] | null | null | null | apitaxdrivers/Openstack.py | Apitax/Drivers | 35b2c2f4c8ce8b98615f42fc30f04111d7b9bffe | [
"Apache-2.0"
] | 4 | 2018-08-03T20:01:57.000Z | 2018-10-22T15:32:27.000Z | apitaxdrivers/Openstack.py | Apitax/Drivers | 35b2c2f4c8ce8b98615f42fc30f04111d7b9bffe | [
"Apache-2.0"
] | null | null | null | from apitax.drivers.Driver import Driver
from apitax.utilities.Files import getAllFiles
from apitax.ah.Options import Options
from pathlib import Path
from apitax.ah.Credentials import Credentials
from apitax.utilities.Json import read
from apitax.ah.State import State
from apitax.utilities.Files import getPath
| 35.264368 | 118 | 0.601695 |