hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
93e4b9ba2043f4d210f572091843a3b906a5ef27
| 1,492
|
py
|
Python
|
adminmgr/media/code/python/red1/myReducer2.2.py
|
IamMayankThakur/test-bigdata
|
cef633eb394419b955bdce479699d0115d8f99c3
|
[
"Apache-2.0"
] | 9
|
2019-11-08T02:05:27.000Z
|
2021-12-13T12:06:35.000Z
|
adminmgr/media/code/python/red2/myReducer2.2.py
|
IamMayankThakur/test-bigdata
|
cef633eb394419b955bdce479699d0115d8f99c3
|
[
"Apache-2.0"
] | 6
|
2019-11-27T03:23:16.000Z
|
2021-06-10T19:15:13.000Z
|
adminmgr/media/code/python/red2/myReducer2.2.py
|
IamMayankThakur/test-bigdata
|
cef633eb394419b955bdce479699d0115d8f99c3
|
[
"Apache-2.0"
] | 4
|
2019-11-26T17:04:27.000Z
|
2021-12-13T11:57:03.000Z
|
#!/usr/bin/python3
import sys
#import fileinput
from operator import itemgetter
myDictionary = {}
myList=[]
for line in sys.stdin:
line = line.strip()
line = line.split("\t")
batsman, bowler, wickets = line[0], line[1], int(line[2])
if batsman not in myDictionary:
myDictionary[batsman] = {}
myDictionary[batsman][bowler] = [0,0] # [Wickets, Number of Balls Faced]
elif batsman in myDictionary:
if bowler not in myDictionary[batsman]:
myDictionary[batsman][bowler] = [0,0]
myDictionary[batsman][bowler][0] += wickets
myDictionary[batsman][bowler][1] += 1
for batsman in list(myDictionary): # Remove batsman-bowler pair who has less than 5 deliveries in between.
for bowler in list(myDictionary[batsman]):
if myDictionary[batsman][bowler][1] <= 5:
del myDictionary[batsman][bowler]
for batsman in myDictionary:
tempList = list(myDictionary[batsman].keys())
if len(tempList) > 0:
for bowler in myDictionary[batsman]:
myList.append((batsman, bowler, myDictionary[batsman][bowler][0], myDictionary[batsman][bowler][1]))
#newList = sorted(myList, key=itemgetter(1))
newList = sorted(myList, key=itemgetter(0))
newList = sorted(newList, key=itemgetter(3))
newList = sorted(newList, key=itemgetter(2), reverse=True)
for (batsman, bowler, wickets, deliveries) in newList:
print(batsman + ',' + bowler + ',' + str(wickets) + ',' + str(deliveries))
| 30.44898
| 121
| 0.663539
|
93e5a39d73ffdd336557527636051e03eddd2896
| 2,478
|
py
|
Python
|
tests/datastore_test.py
|
isouzasoares/needle-forms
|
1cdb097e1e9801d53607683aa6dcb645b1b82226
|
[
"MIT"
] | null | null | null |
tests/datastore_test.py
|
isouzasoares/needle-forms
|
1cdb097e1e9801d53607683aa6dcb645b1b82226
|
[
"MIT"
] | 3
|
2020-03-31T04:49:13.000Z
|
2021-04-30T21:03:00.000Z
|
tests/datastore_test.py
|
isouzasoares/needle-forms
|
1cdb097e1e9801d53607683aa6dcb645b1b82226
|
[
"MIT"
] | 2
|
2019-11-05T14:48:39.000Z
|
2020-03-26T20:17:42.000Z
|
from abstractions.datastore import Datastore
from unittest.mock import Mock
from google.cloud.datastore.entity import Entity
from unittest.mock import patch
import pytest
"""
#test_valid_information::ut
#test_with_invalid_information::ut
"""
| 34.416667
| 75
| 0.75222
|
93e5d68b70881e1e29365acb06e52f0fb4bc0b36
| 2,331
|
py
|
Python
|
neuro_logging/__init__.py
|
neuro-inc/neuro-logging
|
e3173a40d0e2559f113f1420ed8a3fd4a0e76dde
|
[
"Apache-2.0"
] | null | null | null |
neuro_logging/__init__.py
|
neuro-inc/neuro-logging
|
e3173a40d0e2559f113f1420ed8a3fd4a0e76dde
|
[
"Apache-2.0"
] | 50
|
2021-08-20T00:10:05.000Z
|
2022-02-21T16:44:46.000Z
|
neuro_logging/__init__.py
|
neuro-inc/neuro-logging
|
e3173a40d0e2559f113f1420ed8a3fd4a0e76dde
|
[
"Apache-2.0"
] | null | null | null |
import logging
import logging.config
import os
from importlib.metadata import version
from typing import Any, Union
from .trace import (
make_request_logging_trace_config,
make_sentry_trace_config,
make_zipkin_trace_config,
new_sampled_trace,
new_trace,
new_trace_cm,
notrace,
setup_sentry,
setup_zipkin,
setup_zipkin_tracer,
trace,
trace_cm,
)
__version__ = version(__package__)
__all__ = [
"init_logging",
"HideLessThanFilter",
"make_request_logging_trace_config",
"make_sentry_trace_config",
"make_zipkin_trace_config",
"notrace",
"setup_sentry",
"setup_zipkin",
"setup_zipkin_tracer",
"trace",
"trace_cm",
"new_sampled_trace",
"new_trace",
"new_trace_cm",
]
if "NP_LOG_LEVEL" in os.environ:
_default_log_level = logging.getLevelName(os.environ["NP_LOG_LEVEL"])
else:
_default_log_level = logging.WARNING
DEFAULT_CONFIG = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"standard": {"format": "%(asctime)s - %(name)s - %(levelname)s - %(message)s"}
},
"filters": {
"hide_errors": {"()": f"{__name__}.HideLessThanFilter", "level": "ERROR"}
},
"handlers": {
"stdout": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "standard",
"stream": "ext://sys.stdout",
"filters": ["hide_errors"],
},
"stderr": {
"class": "logging.StreamHandler",
"level": "ERROR",
"formatter": "standard",
"stream": "ext://sys.stderr",
},
},
"root": {"level": _default_log_level, "handlers": ["stderr", "stdout"]},
}
| 25.336957
| 86
| 0.607465
|
93e8008d69beb181243428fecbcab6a20eb6cce6
| 3,628
|
py
|
Python
|
quantrocket/houston.py
|
Jay-Jay-D/quantrocket-client
|
b70ac199382d22d56fad923ca2233ce027f3264a
|
[
"Apache-2.0"
] | null | null | null |
quantrocket/houston.py
|
Jay-Jay-D/quantrocket-client
|
b70ac199382d22d56fad923ca2233ce027f3264a
|
[
"Apache-2.0"
] | null | null | null |
quantrocket/houston.py
|
Jay-Jay-D/quantrocket-client
|
b70ac199382d22d56fad923ca2233ce027f3264a
|
[
"Apache-2.0"
] | 1
|
2019-06-12T11:34:27.000Z
|
2019-06-12T11:34:27.000Z
|
# Copyright 2017 QuantRocket - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import requests
from .exceptions import ImproperlyConfigured
from quantrocket.cli.utils.output import json_to_cli
# Instantiate houston so that all callers can share a TCP connection (for
# performance's sake)
houston = Houston()
def ping():
"""
Pings houston.
Returns
-------
json
reply from houston
"""
response = houston.get("/ping")
houston.raise_for_status_with_json(response)
return response.json()
| 33.284404
| 88
| 0.651599
|
93e81c0784cf18fea2ad26a23da9cc7f264a20a2
| 3,778
|
py
|
Python
|
src/Sudoku/SudokuGenerator.py
|
andrea-pollastro/Sudoku
|
84d82c9a181ad87f782efe7489fa28da70993590
|
[
"MIT"
] | 1
|
2020-01-09T10:48:47.000Z
|
2020-01-09T10:48:47.000Z
|
src/Sudoku/SudokuGenerator.py
|
andrea-pollastro/Sudoku
|
84d82c9a181ad87f782efe7489fa28da70993590
|
[
"MIT"
] | null | null | null |
src/Sudoku/SudokuGenerator.py
|
andrea-pollastro/Sudoku
|
84d82c9a181ad87f782efe7489fa28da70993590
|
[
"MIT"
] | null | null | null |
from random import shuffle
from math import sqrt
from enum import Enum
from src.Sudoku.SudokuSolver import SudokuSolver
from src.Sudoku.Sudoku import Sudoku
"""
**** SUDOKU GENERATOR ****
Author: Andrea Pollastro
Date: September 2018
"""
| 35.641509
| 117
| 0.636051
|
93e8893408bea136d9cbb5e2c4e9cac3b5b0c2f9
| 15,753
|
py
|
Python
|
stanza/coptic.py
|
CopticScriptorium/stanza
|
a16b152fce3d2cc325b7d67e03952bd00c878fe3
|
[
"Apache-2.0"
] | null | null | null |
stanza/coptic.py
|
CopticScriptorium/stanza
|
a16b152fce3d2cc325b7d67e03952bd00c878fe3
|
[
"Apache-2.0"
] | null | null | null |
stanza/coptic.py
|
CopticScriptorium/stanza
|
a16b152fce3d2cc325b7d67e03952bd00c878fe3
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import random, os
from os.path import join as j
from collections import OrderedDict
import conllu
import torch
import pathlib
import tempfile
import depedit
import stanza.models.parser as parser
from stanza.models.depparse.data import DataLoader
from stanza.models.depparse.trainer import Trainer
from stanza.models.common import utils
from stanza.models.common.pretrain import Pretrain
from stanza.models.common.doc import *
from stanza.utils.conll import CoNLL
PACKAGE_BASE_DIR = pathlib.Path(__file__).parent.absolute()
# Parser arguments -----------------------------------------------------------------------------------------------------
# These args are used by stanza.models.parser. Keys should always exactly match those you'd get from the dictionary
# obtained from running stanza.models.parser.parse_args(). The values below were selected through hyperoptimization.
DEFAULT_PARSER_ARGS = {
# general setup
'lang': 'cop',
'treebank': 'cop_scriptorium',
'shorthand': 'cop_scriptorium',
'data_dir': j(PACKAGE_BASE_DIR, 'data', 'depparse'),
'output_file': j(PACKAGE_BASE_DIR, 'coptic_data', 'scriptorium', 'pred.conllu'),
'seed': 1234,
'cuda': torch.cuda.is_available(),
'cpu': not torch.cuda.is_available(),
'save_dir': j(PACKAGE_BASE_DIR, "..", 'stanza_models'),
'save_name': None,
# word embeddings
'pretrain': True,
'wordvec_dir': j(PACKAGE_BASE_DIR, 'coptic_data', 'wordvec'),
'wordvec_file': j(PACKAGE_BASE_DIR, 'coptic_data', 'wordvec', 'word2vec', 'Coptic', 'coptic_50d.vec.xz'),
'word_emb_dim': 50,
'word_dropout': 0.3,
# char embeddings
'char': True,
'char_hidden_dim': 200,
'char_emb_dim': 50,
'char_num_layers': 1,
'char_rec_dropout': 0, # very slow!
# pos tags
'tag_emb_dim': 5,
'tag_type': 'gold',
# network params
'hidden_dim': 300,
'deep_biaff_hidden_dim': 200,
'composite_deep_biaff_hidden_dim': 100,
'transformed_dim': 75,
'num_layers': 3,
'pretrain_max_vocab': 250000,
'dropout': 0.5,
'rec_dropout': 0, # very slow!
'linearization': True,
'distance': True,
# training
'sample_train': 1.0,
'optim': 'adam',
'lr': 0.002,
'beta2': 0.95,
'max_steps': 20000,
'eval_interval': 100,
'max_steps_before_stop': 2000,
'batch_size': 1500,
'max_grad_norm': 1.0,
'log_step': 20,
# these need to be included or there will be an error when stanza tries to access them
'train_file': None,
'eval_file': None,
'gold_file': None,
'mode': None,
}
# Custom features ------------------------------------------------------------------------------------------------------
# Params for controlling the custom features we're feeding the network
FEATURE_CONFIG = {
# BIOLU or BIO
'features': [
'foreign_word',
'morph_count',
'left_morph',
'entity',
],
'foreign_word_binary': True,
'morph_count_binary': False,
'entity_encoding_scheme': 'BIOLU',
'entity_dropout': 0.30,
}
# DepEdit preprocessor which removes gold morph data and makes a few other tweaks
PREPROCESSOR = depedit.DepEdit(config_file=j(PACKAGE_BASE_DIR, "coptic_data", "depedit", "add_ud_and_flat_morph.ini"),
options=type('', (), {"quiet": True, "kill": "both"}))
# Load a lexicon of foreign words and initialize a lemma cache
with open(j(PACKAGE_BASE_DIR, 'coptic_data', 'lang_lexicon.tab'), 'r', encoding="utf8") as f:
FOREIGN_WORDS = {x.split('\t')[0]: x.split('\t')[1].rstrip()
for x in f.readlines() if '\t' in x}
FW_CACHE = {}
# load known entities and sort in order of increasing token length
with open(j(PACKAGE_BASE_DIR, 'coptic_data', 'entities.tab'), 'r', encoding="utf8") as f:
KNOWN_ENTITIES = OrderedDict(sorted(
((x.split('\t')[0], x.split('\t')[1]) for x in f.readlines()),
key=lambda x: len(x[0].split(" "))
))
FEATURE_FUNCTIONS = {
'foreign_word': _add_foreign_word_feature,
'left_morph': _add_left_morph_feature,
'morph_count': _add_morph_count_feature,
'entity': _add_entity_feature,
}
# public api -----------------------------------------------------------------------------------------------------------
def train(train, dev, save_name=None):
"""Train a new stanza model.
:param train: either a conllu string or a path to a conllu file
:param dev: either a conllu string or a path to a conllu file
:param save_name: optional, a name for your model's save file, which will appear in 'stanza_models/'
"""
args = DEFAULT_PARSER_ARGS.copy()
feature_config = FEATURE_CONFIG.copy()
args['mode'] = 'train'
args['train_file'] = _read_conllu_arg(train, feature_config)
args['eval_file'] = _read_conllu_arg(dev, feature_config)
args['gold_file'] = _read_conllu_arg(dev, feature_config, gold=True)
if save_name:
args['save_name'] = save_name
parser.train(args)
def test(test, save_name=None):
"""Evaluate using an existing stanza model.
:param test: either a conllu string or a path to a conllu file
:param save_name: optional, a name for your model's save file, which will appear in 'stanza_models/'
"""
args = DEFAULT_PARSER_ARGS.copy()
feature_config = FEATURE_CONFIG.copy()
args['mode'] = "predict"
args['eval_file'] = _read_conllu_arg(test, feature_config)
args['gold_file'] = _read_conllu_arg(test, feature_config, gold=True)
if save_name:
args['save_name'] = save_name
return parser.evaluate(args)
| 35.320628
| 120
| 0.599314
|
93e9f8a0d79848804615cc209c301de7b2ddbead
| 151
|
py
|
Python
|
chap01/07.py
|
knuu/nlp100
|
5008d678d7c8d15057ac67fe68b0667657c39b29
|
[
"MIT"
] | 1
|
2015-09-11T10:33:42.000Z
|
2015-09-11T10:33:42.000Z
|
chap01/07.py
|
knuu/nlp100
|
5008d678d7c8d15057ac67fe68b0667657c39b29
|
[
"MIT"
] | null | null | null |
chap01/07.py
|
knuu/nlp100
|
5008d678d7c8d15057ac67fe68b0667657c39b29
|
[
"MIT"
] | null | null | null |
if __name__ == '__main__':
ans = makeSentence(12, '', 22.4)
print(ans)
| 21.571429
| 41
| 0.576159
|
93ea9b725dcae36d582332e92813504608956ade
| 49
|
py
|
Python
|
src/fseq/__init__.py
|
tanioklyce/python-fseq
|
be0d5d895ead1b099dd0d47602520c9ebf1f449d
|
[
"MIT"
] | 3
|
2019-12-07T19:32:32.000Z
|
2021-12-27T05:19:26.000Z
|
src/fseq/__init__.py
|
tanioklyce/python-fseq
|
be0d5d895ead1b099dd0d47602520c9ebf1f449d
|
[
"MIT"
] | 3
|
2020-01-05T03:05:58.000Z
|
2021-04-24T06:28:16.000Z
|
src/fseq/__init__.py
|
tanioklyce/python-fseq
|
be0d5d895ead1b099dd0d47602520c9ebf1f449d
|
[
"MIT"
] | 4
|
2020-03-13T17:49:06.000Z
|
2022-03-14T01:26:28.000Z
|
__version__ = '0.1.0'
from .parser import parse
| 12.25
| 25
| 0.714286
|
93eaa3d71ba2d5f76f2ff30667b738af79cf0791
| 1,769
|
py
|
Python
|
pycraft/configuration.py
|
duaneking/pycraft
|
eb7fcc9ee44677555f667520b74fe053a3fb2392
|
[
"MIT"
] | null | null | null |
pycraft/configuration.py
|
duaneking/pycraft
|
eb7fcc9ee44677555f667520b74fe053a3fb2392
|
[
"MIT"
] | null | null | null |
pycraft/configuration.py
|
duaneking/pycraft
|
eb7fcc9ee44677555f667520b74fe053a3fb2392
|
[
"MIT"
] | null | null | null |
from os.path import expanduser
import sys
import json
'''
The configuration file will be stored as a json file
@example
{
window: {
w_dimension: 800
h_dimension: 600
resizeable: False
}
}
'''
| 28.532258
| 94
| 0.692482
|
93ebca1d1f1083aaf12c3d720e9e95e6ae2564e1
| 11,432
|
py
|
Python
|
diff.py
|
JohnAgapeyev/binary_diff
|
4e8a1eb9540af134375f171e4a5a8781d042043d
|
[
"MIT"
] | null | null | null |
diff.py
|
JohnAgapeyev/binary_diff
|
4e8a1eb9540af134375f171e4a5a8781d042043d
|
[
"MIT"
] | null | null | null |
diff.py
|
JohnAgapeyev/binary_diff
|
4e8a1eb9540af134375f171e4a5a8781d042043d
|
[
"MIT"
] | null | null | null |
#!/bin/python3
import sys
import os
import getopt
import csv
import json
import itertools
import zipfile
import tarfile
import binwalk
import collections
from heapq import nsmallest
from collections import defaultdict
import tlsh
import numpy as np
import matplotlib.pyplot as plt
from multiprocessing.dummy import Pool
from sklearn.cluster import *
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
from sklearn.externals import joblib
pool = Pool()
#Values are from the TLSH paper
try:
opts, args = getopt.getopt(sys.argv[1:], "hd:m:bn:t", ["help", "directory", "metadata", "binwalk", "number", "test"])
except getopt.GetoptError as err:
print(err) # will print something like "option -a not recognized"
usage()
exit(2)
directory = ""
meta = ""
use_binwalk = False
n = 10
use_existing = False
for o, a in opts:
if o in ("-d", "--directory"):
directory = a
elif o in ("-h", "--help"):
usage()
exit()
elif o in ("-m", "--metadata"):
meta = a
elif o in ("-b", "--binwalk"):
use_binwalk = True
elif o in ("-n", "--number"):
n = int(a)
elif o in ("-t", "--test"):
use_existing = True
if not directory:
print("Program must be provided a file directory path")
exit(1)
file_list = list_files(directory)
hash_list = []
if meta:
meta_contents = parse_metadata_json(meta)
else:
meta_contents = None
hash_list = [lsh_json(x) for x in zip(file_list, itertools.repeat(meta_contents))]
if use_existing:
file_data = np.load(".tmp.npz")
#See https://stackoverflow.com/questions/22315595/saving-dictionary-of-header-information-using-numpy-savez for why this syntax is needed
clustered_files = file_data['clusters'][()]
cluster_hashes = file_data['hash_list']
ms = joblib.load('.tmp2.pkl')
adj = np.zeros((len(hash_list), len(cluster_hashes)), int)
#Compare new file hashes against saved data to get distances
for i in range(len(hash_list)):
for j in range(len(cluster_hashes)):
adj[i][j] = diff_hash(hash_list[i], cluster_hashes[j]);
cluster_labels = ms.predict(adj)
for f in file_list:
#Label of the prediucted file cluster
lab = cluster_labels[file_list.index(f)]
if lab not in clustered_files:
print("{} does not belong to any existing cluster".format(f))
continue
clus = clustered_files[lab]
print("Target file {} is in cluster {}".format(f, lab))
for c in clus:
print(c)
#Empty line to separate cluster print outs
print()
exit()
else:
adj = np.zeros((len(hash_list), len(hash_list)), int)
for i in range(len(hash_list)):
for j in range(len(hash_list)):
d = diff_hash(hash_list[i], hash_list[j]);
adj[i][j] = d
adj[j][i] = d
best_cluster_count = 0
best_silhouette_score = -1.0
#Calculate the best cluster count in parallel
silhouette_list = Pool().map(cl, zip(range(2, 16), itertools.repeat(adj)))
best_cluster_count = silhouette_list.index(max(silhouette_list)) + 2
ms = MiniBatchKMeans(n_clusters=best_cluster_count)
cluster_labels = ms.fit_predict(adj)
clustered_files = {}
for f in file_list:
lab = cluster_labels[file_list.index(f)]
if lab in clustered_files:
clustered_files[lab].append(f)
else:
clustered_files[lab] = [f]
print(clustered_files)
np.savez(".tmp", clusters=clustered_files, hash_list=hash_list)
joblib.dump(ms, '.tmp2.pkl')
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
plt.figure(1)
plt.clf()
colors = itertools.cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(adj[my_members, 0], adj[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| 30.485333
| 141
| 0.571641
|
93ecc85140eb083700cb75cd12da34a931dcc1e5
| 3,132
|
py
|
Python
|
proyecto_opti.py
|
rafaelfrieri1/Optimization-Project
|
20db5200cd361d358e213310c6eb2997c893ff27
|
[
"MIT"
] | null | null | null |
proyecto_opti.py
|
rafaelfrieri1/Optimization-Project
|
20db5200cd361d358e213310c6eb2997c893ff27
|
[
"MIT"
] | null | null | null |
proyecto_opti.py
|
rafaelfrieri1/Optimization-Project
|
20db5200cd361d358e213310c6eb2997c893ff27
|
[
"MIT"
] | null | null | null |
from pyomo.environ import *
import numpy as np
cijr = []
fjr = []
with open("./Test_Instances/RAND2000_120-80.txt") as instanceFile:
n = int(instanceFile.readline())
clientsFacilitiesSizeStr = instanceFile.readline().strip().split(" ")
instanceFile.readline()
for phase in range(2):
for i in range(1, len(clientsFacilitiesSizeStr)):
if phase == 0:
fjr.append([])
for j in range(int(clientsFacilitiesSizeStr[i])):
fjr[i-1].append(float(instanceFile.readline().strip().split(" ")[0]))
else:
nextLine = instanceFile.readline().strip()
cijr.append([])
j = 0
while(nextLine != ""):
nextLineNumbers = nextLine.split(" ")
cijr[i-1].append([])
for nextLinenumber in nextLineNumbers:
cijr[i-1][j].append(float(nextLinenumber))
j+=1
nextLine = instanceFile.readline().strip()
instanceFile.readline()
instanceFile.close()
#cijr = np.array(
# [
# np.array([
# [1,1000,1000],
# [15000,2,15000],
# [20000,20000,3],
# [1,40000,40000]
# ]),
# np.array([
# [10000,5,10000,10000,20000],
# [30000,23000,3,24000,18000],
# [28000,35000,21000,4,33000]
# ]),
# np.array([
# [16, 14],
# [2, 18000],
# [20000, 3],
# [20000, 2],
# [24, 25]
# ])
# ]
#)
#fjr = np.array(
# [
# [10,15,20],
# [17,20,25,30,18],
# [48,50]
# ]
#)
I = range(len(cijr[0]))
J = range(len(cijr[0][0]))
R = range(1, len(fjr)+1)
RM1 = range(1, len(fjr))
yjrIndexes = []
zirabIndexes = []
for r in R:
for j in range(len(fjr[r-1])):
yjrIndexes.append((r,j))
for i in I:
for r in RM1:
for a in range(len(cijr[r])):
for b in range(len(cijr[r][0])):
zirabIndexes.append((i,r,a,b))
model = ConcreteModel()
model.vij1 = Var(I, J, domain=Binary)
model.yjr = Var(yjrIndexes, domain=Binary)
model.zirab = Var(zirabIndexes, domain=Binary)
model.constraints = ConstraintList()
for i in I:
model.constraints.add(sum(model.vij1[i, j] for j in J) == 1)
for j1 in J:
model.constraints.add(sum(model.zirab[i,1,j1,b] for b in range(len(cijr[1][0]))) == model.vij1[i,j1])
model.constraints.add(model.vij1[i,j1] <= model.yjr[1,j1])
for r in range(2, len(fjr) + 1):
if(r <= len(fjr) - 1):
for a in range(len(cijr[r])):
model.constraints.add(sum(model.zirab[i,r,a,b] for b in range(len(cijr[r][0]))) == sum(model.zirab[i,r-1,bp,a] for bp in range(len(cijr[r-1]))))
for b in range(len(cijr[r-1][0])):
model.constraints.add(sum(model.zirab[i,r-1,a,b,] for a in range(len(cijr[r-1]))) <= model.yjr[r, b])
model.objective = Objective(
expr = sum(sum(cijr[0][i][j1]*model.vij1[i,j1] for j1 in J) for i in I) + sum(sum(sum(sum(cijr[r][a][b]*model.zirab[i,r,a,b] for b in range(len(cijr[r][0])))for a in range(len(cijr[r]))) for r in RM1) for i in I) + sum(sum(fjr[r-1][j]*model.yjr[r,j] for j in range(len(fjr[r-1]))) for r in R),
sense=minimize
)
results = SolverFactory('cplex').solve(model)
results.write()
#if results.solver.status:
# model.pprint()
#model.constraints.display()
| 28.472727
| 295
| 0.590358
|
93ee69a5215c2d7c2cad4e30beebcfeebc327ae5
| 367
|
py
|
Python
|
import_gs.py
|
christophdrayss/labelImg-pointer-upgrade
|
9304d2c347abb935543579e14554aa74ec97807c
|
[
"MIT"
] | null | null | null |
import_gs.py
|
christophdrayss/labelImg-pointer-upgrade
|
9304d2c347abb935543579e14554aa74ec97807c
|
[
"MIT"
] | null | null | null |
import_gs.py
|
christophdrayss/labelImg-pointer-upgrade
|
9304d2c347abb935543579e14554aa74ec97807c
|
[
"MIT"
] | null | null | null |
#
# exports a specific folder in the import directory to the google cloud
#
# Syntax:
# python3 import.py <GS URI>
#
# Example:
# python3 import.py gs://bini-products-bucket/training-sets-2/intrusion/poc1_v1.2.0/frames
#
#
import os
import sys
# uri
cloud_uri = sys.argv[1]
os.system('mkdir -p import')
os.system('gsutil -m cp -c -r '+cloud_uri+' ./import/')
| 18.35
| 90
| 0.692098
|
93ef5cd7a8eaabe0926f1a74eb45c72effabde13
| 482,361
|
py
|
Python
|
pynos/versions/ver_7/ver_7_1_0/yang/brocade_xstp_ext.py
|
bdeetz/pynos
|
bd8a34e98f322de3fc06750827d8bbc3a0c00380
|
[
"Apache-2.0"
] | 12
|
2015-09-21T23:56:09.000Z
|
2018-03-30T04:35:32.000Z
|
pynos/versions/ver_7/ver_7_1_0/yang/brocade_xstp_ext.py
|
bdeetz/pynos
|
bd8a34e98f322de3fc06750827d8bbc3a0c00380
|
[
"Apache-2.0"
] | 10
|
2016-09-15T19:03:27.000Z
|
2017-07-17T23:38:01.000Z
|
pynos/versions/ver_7/ver_7_1_0/yang/brocade_xstp_ext.py
|
bdeetz/pynos
|
bd8a34e98f322de3fc06750827d8bbc3a0c00380
|
[
"Apache-2.0"
] | 6
|
2015-08-14T08:05:23.000Z
|
2022-02-03T15:33:54.000Z
|
#!/usr/bin/env python
import xml.etree.ElementTree as ET
| 47.744333
| 133
| 0.671974
|
93f0a5f958369eb4430a00c36a168b1783fda002
| 735
|
py
|
Python
|
portal/middleware.py
|
cds-snc/covid-alert-portal
|
e7d56fa9fa4a2ad2d60f056eae063713661bd260
|
[
"MIT"
] | 43
|
2020-07-31T14:38:06.000Z
|
2022-03-07T11:28:28.000Z
|
portal/middleware.py
|
cds-snc/covid-alert-portal
|
e7d56fa9fa4a2ad2d60f056eae063713661bd260
|
[
"MIT"
] | 322
|
2020-07-23T19:38:26.000Z
|
2022-03-31T19:15:45.000Z
|
portal/middleware.py
|
cds-snc/covid-alert-portal
|
e7d56fa9fa4a2ad2d60f056eae063713661bd260
|
[
"MIT"
] | 6
|
2020-11-28T19:30:20.000Z
|
2021-07-29T18:06:55.000Z
|
import pytz
from django.conf import settings
| 27.222222
| 75
| 0.642177
|
93f2524a7c6f2e836f91bdc023c3abf9b271eb96
| 56
|
py
|
Python
|
mikaponics/foundation/__init__.py
|
mikaponics/mikaponics-back
|
98e1ff8bab7dda3492e5ff637bf5aafd111c840c
|
[
"BSD-3-Clause"
] | 2
|
2019-04-30T23:51:41.000Z
|
2019-05-04T00:35:52.000Z
|
mikaponics/foundation/__init__.py
|
mikaponics/mikaponics-back
|
98e1ff8bab7dda3492e5ff637bf5aafd111c840c
|
[
"BSD-3-Clause"
] | 27
|
2019-04-30T20:22:28.000Z
|
2022-02-10T08:10:32.000Z
|
mikaponics/foundation/__init__.py
|
mikaponics/mikaponics-back
|
98e1ff8bab7dda3492e5ff637bf5aafd111c840c
|
[
"BSD-3-Clause"
] | 1
|
2019-03-08T18:24:23.000Z
|
2019-03-08T18:24:23.000Z
|
default_app_config = 'foundation.apps.FoundationConfig'
| 28
| 55
| 0.857143
|
93f3149ab4cf735ff8855c62f4a02835c7b351e6
| 483
|
py
|
Python
|
app/main.py
|
cesko-digital/newschatbot
|
4f47d7902433bff09b48fcebcf9ee8422eb0ec7e
|
[
"MIT"
] | 1
|
2021-04-06T16:52:36.000Z
|
2021-04-06T16:52:36.000Z
|
app/main.py
|
cesko-digital/newschatbot
|
4f47d7902433bff09b48fcebcf9ee8422eb0ec7e
|
[
"MIT"
] | 17
|
2021-05-30T17:06:48.000Z
|
2021-09-26T08:20:02.000Z
|
app/main.py
|
cesko-digital/newschatbot
|
4f47d7902433bff09b48fcebcf9ee8422eb0ec7e
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask_migrate import Migrate
from app.model import db
from app.controller import api
app = Flask(__name__)
app.register_blueprint(api)
app.config[
"SQLALCHEMY_DATABASE_URI"
] = "postgresql://newschatbotdevelopment:Wlk8skrHKvZEbM6Gw@database.internal.newschatbot.ceskodigital.net:5432/newschatbotdevelopment"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
migrate = Migrate(app, db)
db.init_app(app)
if __name__ == "__main__":
app.run()
| 25.421053
| 134
| 0.797101
|
93f37962ca5f7dd8a9101563b5c77758b61305bc
| 67
|
py
|
Python
|
api/vehicles/test_cases/__init__.py
|
jaconsta/soat_cnpx
|
3c5a05a334e44158b0aa9ab57c309771953b2950
|
[
"MIT"
] | null | null | null |
api/vehicles/test_cases/__init__.py
|
jaconsta/soat_cnpx
|
3c5a05a334e44158b0aa9ab57c309771953b2950
|
[
"MIT"
] | null | null | null |
api/vehicles/test_cases/__init__.py
|
jaconsta/soat_cnpx
|
3c5a05a334e44158b0aa9ab57c309771953b2950
|
[
"MIT"
] | null | null | null |
from .test_vehicles_crud import *
from .test_soat_vehicles import *
| 33.5
| 33
| 0.835821
|
93f51b485662f69b94bcc6b67cecfbb6633cdc40
| 2,887
|
py
|
Python
|
examples/siha/sleep_intraday_dataset.py
|
qcri/tasrif
|
327bc1eccb8f8e11d8869ba65a7c72ad038aa094
|
[
"BSD-3-Clause"
] | 20
|
2021-12-06T10:41:54.000Z
|
2022-03-13T16:25:43.000Z
|
examples/siha/sleep_intraday_dataset.py
|
qcri/tasrif
|
327bc1eccb8f8e11d8869ba65a7c72ad038aa094
|
[
"BSD-3-Clause"
] | 33
|
2021-12-06T08:27:18.000Z
|
2022-03-14T05:07:53.000Z
|
examples/siha/sleep_intraday_dataset.py
|
qcri/tasrif
|
327bc1eccb8f8e11d8869ba65a7c72ad038aa094
|
[
"BSD-3-Clause"
] | 2
|
2022-02-07T08:06:48.000Z
|
2022-02-14T07:13:42.000Z
|
"""Example on how to read sleep data from SIHA
"""
import os
from tasrif.data_readers.siha_dataset import SihaDataset
from tasrif.processing_pipeline import SequenceOperator
from tasrif.processing_pipeline.custom import JqOperator
from tasrif.processing_pipeline.pandas import (
ConvertToDatetimeOperator,
JsonNormalizeOperator,
SetIndexOperator,
)
siha_folder_path = os.environ.get("SIHA_PATH")
pipeline = SequenceOperator(
[
SihaDataset(siha_folder_path, table_name="Data"),
JqOperator(
"map({patientID} + (.data.sleep[].data as $data | "
+ "($data.sleep | map(.) | .[]) | . * {levels: {overview : ($data.summary//{})}})) | "
+ "map (if .levels.data != null then . else .levels += {data: []} end) | "
+ "map(. + {type, dateOfSleep, minutesAsleep, logId, startTime, endTime, duration, isMainSleep,"
+ " minutesToFallAsleep, minutesAwake, minutesAfterWakeup, timeInBed, efficiency, infoCode})"
),
JsonNormalizeOperator(
record_path=["levels", "data"],
meta=[
"patientID",
"logId",
"dateOfSleep",
"startTime",
"endTime",
"duration",
"isMainSleep",
"minutesToFallAsleep",
"minutesAsleep",
"minutesAwake",
"minutesAfterWakeup",
"timeInBed",
"efficiency",
"type",
"infoCode",
["levels", "summary", "deep", "count"],
["levels", "summary", "deep", "minutes"],
["levels", "summary", "deep", "thirtyDayAvgMinutes"],
["levels", "summary", "wake", "count"],
["levels", "summary", "wake", "minutes"],
["levels", "summary", "wake", "thirtyDayAvgMinutes"],
["levels", "summary", "light", "count"],
["levels", "summary", "light", "minutes"],
["levels", "summary", "light", "thirtyDayAvgMinutes"],
["levels", "summary", "rem", "count"],
["levels", "summary", "rem", "minutes"],
["levels", "summary", "rem", "thirtyDayAvgMinutes"],
["levels", "overview", "totalTimeInBed"],
["levels", "overview", "totalMinutesAsleep"],
["levels", "overview", "stages", "rem"],
["levels", "overview", "stages", "deep"],
["levels", "overview", "stages", "light"],
["levels", "overview", "stages", "wake"],
],
errors="ignore",
),
ConvertToDatetimeOperator(
feature_names=["dateTime"], infer_datetime_format=True
),
SetIndexOperator("dateTime"),
]
)
df = pipeline.process()
print(df)
| 38.493333
| 108
| 0.512643
|
93f625804e80ba67e77913d8e04521c5e668b99a
| 6,169
|
py
|
Python
|
tests/test_contract.py
|
MioYvo/vision-python-sdk
|
53608232c333f57f83a8b16ba92f50e90f28ac88
|
[
"MIT"
] | null | null | null |
tests/test_contract.py
|
MioYvo/vision-python-sdk
|
53608232c333f57f83a8b16ba92f50e90f28ac88
|
[
"MIT"
] | null | null | null |
tests/test_contract.py
|
MioYvo/vision-python-sdk
|
53608232c333f57f83a8b16ba92f50e90f28ac88
|
[
"MIT"
] | null | null | null |
import pytest
from visionpy import Vision, Contract
from visionpy import AsyncVision, AsyncContract
from visionpy.keys import PrivateKey
# vpioneer addr and key
PRIVATE_KEY = PrivateKey(bytes.fromhex("a318cb4f1f3b87d604163e4a854312555d57158d78aef26797482d3038c4018b"))
FROM_ADDR = 'VSfD1o6FPChqdqLgwJaztjckyyo2GSM1KP'
TO_ADDR = 'VTCYvEK2ZuWvZ5LXqrLpU2GoRkFeJ1NrD2' # private_key: eed06aebdef88683ff5678b353d1281bb2b730113c9283f7ea96600a0d2c104f
VRC20_CONTRACT_ADDR = 'VE2sE7iXbSyESQKMPLav5Q84EXEHZCnaRX'
| 36.288235
| 481
| 0.70449
|
93f6815c90e0d2c5d837cca53aee27c9bd4b93f4
| 261
|
py
|
Python
|
store/cach.py
|
fj-fj-fj/drf-api-orm
|
c3fdd9ca31dafe40b8b1b88b5ee9dbeb4880a92a
|
[
"MIT"
] | null | null | null |
store/cach.py
|
fj-fj-fj/drf-api-orm
|
c3fdd9ca31dafe40b8b1b88b5ee9dbeb4880a92a
|
[
"MIT"
] | 2
|
2021-03-20T10:18:45.000Z
|
2021-04-05T19:45:58.000Z
|
store/cach.py
|
fj-fj-fj/drf-api-orm
|
c3fdd9ca31dafe40b8b1b88b5ee9dbeb4880a92a
|
[
"MIT"
] | 1
|
2021-03-19T20:51:17.000Z
|
2021-03-19T20:51:17.000Z
|
from django.db.models import Avg
from store.models import Book, UserBookRelation
| 26.1
| 99
| 0.731801
|
93f841a8146cf689db89cb783c3060d1cae831aa
| 555
|
py
|
Python
|
webware/PSP/PSPPage.py
|
PeaceWorksTechnologySolutions/w4py3
|
7f9e7088034e3e3ac53158edfa4f377b5b2f45fe
|
[
"MIT"
] | 11
|
2020-10-18T07:33:56.000Z
|
2021-09-27T21:03:38.000Z
|
webware/PSP/PSPPage.py
|
PeaceWorksTechnologySolutions/w4py3
|
7f9e7088034e3e3ac53158edfa4f377b5b2f45fe
|
[
"MIT"
] | 9
|
2020-01-03T18:58:25.000Z
|
2020-01-09T18:36:23.000Z
|
webware/PSP/PSPPage.py
|
PeaceWorksTechnologySolutions/w4py3
|
7f9e7088034e3e3ac53158edfa4f377b5b2f45fe
|
[
"MIT"
] | 4
|
2020-06-30T09:41:56.000Z
|
2021-02-20T13:48:08.000Z
|
"""Default base class for PSP pages.
This class is intended to be used in the future as the default base class
for PSP pages in the event that some special processing is needed.
Right now, no special processing is needed, so the default base class
for PSP pages is the standard Webware Page.
"""
from Page import Page
| 26.428571
| 73
| 0.717117
|
93f8df4d6dce3b4bf4d20eae42a3c300c4be5bdc
| 139,378
|
py
|
Python
|
rcsb/utils/dictionary/DictMethodEntityInstanceHelper.py
|
rcsb/py-rcsb_utils_dictionary
|
459f759ed7bc267ef63f57b230974afe555d9157
|
[
"Apache-2.0"
] | 2
|
2022-01-22T16:23:44.000Z
|
2022-01-22T20:28:34.000Z
|
rcsb/utils/dictionary/DictMethodEntityInstanceHelper.py
|
rcsb/py-rcsb_utils_dictionary
|
459f759ed7bc267ef63f57b230974afe555d9157
|
[
"Apache-2.0"
] | 4
|
2021-11-23T15:27:49.000Z
|
2022-03-30T19:51:43.000Z
|
rcsb/utils/dictionary/DictMethodEntityInstanceHelper.py
|
rcsb/py-rcsb_utils_dictionary
|
459f759ed7bc267ef63f57b230974afe555d9157
|
[
"Apache-2.0"
] | 2
|
2022-01-22T16:23:46.000Z
|
2022-03-27T18:01:42.000Z
|
##
# File: DictMethodEntityInstanceHelper.py
# Author: J. Westbrook
# Date: 16-Jul-2019
# Version: 0.001 Initial version
#
#
# Updates:
# 22-Nov-2021 dwp authSeqBeg and authSeqEnd are returned as integers but must be compared as strings in pAuthAsymD
#
##
"""
This helper class implements methods supporting entity-instance-level functions in the RCSB dictionary extension.
"""
__docformat__ = "google en"
__author__ = "John Westbrook"
__email__ = "jwest@rcsb.rutgers.edu"
__license__ = "Apache 2.0"
# pylint: disable=too-many-lines
import logging
import re
import time
from collections import OrderedDict
from mmcif.api.DataCategory import DataCategory
from rcsb.utils.dictionary.DictMethodSecStructUtils import DictMethodSecStructUtils
logger = logging.getLogger(__name__)
| 56.474068
| 184
| 0.543536
|
93f8e49e11b7653fd863536bebeb07d2b758a06e
| 12,788
|
py
|
Python
|
tests/data/long_statement_strings.py
|
aalto-speech/fi-parliament-tools
|
c40ab81a23c661765c380238cbf10acf733d94d4
|
[
"MIT"
] | 5
|
2021-05-19T22:56:40.000Z
|
2022-03-29T15:25:03.000Z
|
tests/data/long_statement_strings.py
|
aalto-speech/fi-parliament-tools
|
c40ab81a23c661765c380238cbf10acf733d94d4
|
[
"MIT"
] | 32
|
2021-05-10T07:58:57.000Z
|
2022-03-01T08:02:11.000Z
|
tests/data/long_statement_strings.py
|
aalto-speech/fi-parliament-tools
|
c40ab81a23c661765c380238cbf10acf733d94d4
|
[
"MIT"
] | null | null | null |
"""Long statement strings and other space consuming data definitions for testing are declared here.
This is done to avoid clutter in main test files.
"""
from typing import Dict
from typing import List
from typing import Tuple
import pytest
from _pytest.fixtures import SubRequest
from fi_parliament_tools.parsing.data_structures import MP
chairman_texts = [
"Ilmoitetaan, ett valiokuntien ja kansliatoimikunnan vaalit toimitetaan ensi tiistaina 5. "
"pivn toukokuuta kello 14 pidettvss tysistunnossa. Ehdokaslistat nit vaaleja varten "
"on jtettv keskuskansliaan viimeistn ensi maanantaina 4. pivn toukokuuta kello 12.",
"Toimi Kankaanniemen ehdotus 5 ja Krista Kiurun ehdotus 6 koskevat samaa asiaa, joten ensin "
"nestetn Krista Kiurun ehdotuksesta 6 Toimi Kankaanniemen ehdotusta 5 vastaan ja sen "
"jlkeen voittaneesta mietint vastaan.",
"Kuhmosta oleva agrologi Tuomas Kettunen, joka varamiehen Oulun vaalipiirist on "
"tullut Antti Rantakankaan sijaan, on tnn 28.11.2019 esittnyt puhemiehelle "
"edustajavaltakirjansa ja ryhtynyt hoitamaan edustajantointaan.",
]
speaker_texts = [
"Arvoisa puhemies! Hallituksen esityksen mukaisesti on varmasti hyv jatkaa mraikaisesti "
"matkapuhelinliittymien telemarkkinointikieltoa. Kukaan kansalainen ei ole kyll ainakaan "
"itselleni valittanut siit, ett en eivt puhelinkauppiaat soittele kotiliittymiin ja "
"puhelimiin, ja mys operaattorit ovat olleet kohtuullisen tyytyvisi thn kieltoon. "
"Ongelmia on kuitenkin muussa puhelinmyynniss ja telemarkkinoinnissa. Erityisesti "
"nettiliittymien puhelinmyynniss on ongelmia. On aggressiivista myynti, ja ihmisill on "
"eptietoisuutta siit, mit he ovat lopulta ostaneet. Lisksi mielestni on ongelmallista "
"rajata vain puhelinliittymt telemarkkinointikiellon piiriin, kun viestint- ja "
"mobiilipalveluiden puhelinkauppa on laajempi aihe ja se on laajempi ongelma ja ongelmia on "
"tosiaan tss muidenkin tyyppisten sopimusten myynniss. Tm laki tmnsisltisen on "
"varmasti ihan hyv, ja on hyv mraikaisesti jatkaa tt, mutta nkisin, ett sitten kun "
"tm laki on kulumassa umpeen, meidn on palattava asiaan ja on tehtv joku lopullisempi "
"ratkaisu tst telemarkkinoinnista. Ei voida menn tllaisen yhden sopimusalan "
"mraikaisuudella eteenpin. Meidn tytyy tehd ratkaisut, jotka ovat laajempia ja jotka "
"koskevat viestint-, tele- ja mobiilisopimusten puhelinmyynti laajemmin ja muutenkin "
"puhelinmyynnin pelisntj laajemmin. Varmaankin paras ratkaisu olisi se, ett jatkossa "
"puhelimessa tehty ostos pitisi varmentaa kirjallisesti esimerkiksi shkpostilla, "
"tekstiviestill tai kirjeell. Meidn on ratkaistava jossain vaiheessa nm puhelinmyynniss "
"olevat ongelmat ja ksiteltv asia kokonaisvaltaisesti. Kiitos. (Hlin)",
"Arvoisa puhemies! Pienen, vastasyntyneen lapsen ensimminen ote on samaan aikaan luja ja "
"hento. Siihen otteeseen kiteytyy paljon luottamusta ja vastuuta. Luottamusta siihen, ett "
"molemmat vanhemmat ovat lsn lapsen elmss. Vastuuta siit, ett huominen on aina "
"valoisampi. Luottamus ja vastuu velvoittavat mys meit pttji. Tmn hallituksen "
"ptkset eivt perheiden kannalta ole olleet kovin hppisi. Paljon on leikattu perheiden "
"arjesta, mutta toivon kipin hersi viime vuonna, kun hallitus ilmoitti, ett se toteuttaa "
"perhevapaauudistuksen. Viime perjantaina hallituksen perheministeri kuitenkin ylltten "
"ilmoitti, ett hn keskeytt tmn uudistuksen. Viel suurempi hmmstys oli se syy, jonka "
"takia tm keskeytettiin. Ministeri ilmoitti, ett valmistellut mallit olisivat olleet "
"huonoja suomalaisille perheille. Perheministeri Saarikko, kun te olette vastuussa tmn "
"uudistuksen valmistelusta, niin varmasti suomalaisia perheit kiinnostaisi tiet, miksi te "
"valmistelitte huonoja malleja.",
"Arvoisa puhemies! Lmpimt osanotot omasta ja perussuomalaisten eduskuntaryhmn "
"puolesta pitkaikaisen kansanedustajan Maarit Feldt-Rannan omaisille ja lheisille. "
"Nuorten mielenterveysongelmat ovat vakava yhteiskunnallinen ongelma. "
"Mielenterveysongelmat ovat kasvaneet viime vuosina rjhdysmisesti, mutta "
"terveydenhuoltoon ei ole listty vastaavasti resursseja, vaan hoitoonpsy on "
"ruuhkautunut. Masennuksesta krsii jopa 15 prosenttia nuorista, ahdistuneisuudesta 10 "
"prosenttia, ja 1015 prosentilla on toistuvia itsetuhoisia ajatuksia. Monet nist "
"ongelmista olisivat hoidettavissa, jos yhteiskunta ottaisi asian vakavasti. Turhan "
"usein hoitoon ei kuitenkaan pse, vaan nuoret jtetn heitteille. Kysyn: mihin "
"toimiin hallitus ryhtyy varmistaakseen, ett mielenterveysongelmista krsiville "
"nuorille on tarjolla heidn tarvitsemansa hoito silloin kun he sit tarvitsevat?",
]
speaker_lists = [
[
(1301, "Jani", "Mkel", "ps", ""),
(1108, "Juha", "Sipil", "", "Pministeri"),
(1301, "Jani", "Mkel", "ps", ""),
(1108, "Juha", "Sipil", "", "Pministeri"),
(1141, "Peter", "stman", "kd", ""),
(947, "Petteri", "Orpo", "", "Valtiovarainministeri"),
(1126, "Tytti", "Tuppurainen", "sd", ""),
(1108, "Juha", "Sipil", "", "Pministeri"),
(1317, "Simon", "Elo", "sin", ""),
(1108, "Juha", "Sipil", "", "Pministeri"),
],
[
(1093, "Juho", "Eerola", "ps", ""),
(1339, "Kari", "Kulmala", "sin", ""),
(887, "Sirpa", "Paatero", "sd", ""),
(967, "Timo", "Heinonen", "kok", ""),
],
[
(971, "Johanna", "Ojala-Niemel", "sd", ""),
(1129, "Arja", "Juvonen", "ps", ""),
(1388, "Mari", "Rantanen", "ps", ""),
(1391, "Ari", "Koponen", "ps", ""),
(1325, "Sari", "Tanus", "kd", ""),
(971, "Johanna", "Ojala-Niemel", "sd", ""),
],
]
chairman_statements = [
{
"type": "C",
"mp_id": 0,
"firstname": "Mauri",
"lastname": "Pekkarinen",
"party": "",
"title": "Ensimminen varapuhemies",
"start_time": "",
"end_time": "",
"language": "",
"text": "Ainoaan ksittelyyn esitelln pivjrjestyksen 4. asia. Ksittelyn pohjana on "
"talousvaliokunnan mietint TaVM 18/2016 vp.",
"offset": -1.0,
"duration": -1.0,
"embedded_statement": {
"mp_id": 0,
"title": "",
"firstname": "",
"lastname": "",
"language": "",
"text": "",
"offset": -1.0,
"duration": -1.0,
},
},
{
"type": "C",
"mp_id": 0,
"firstname": "Mauri",
"lastname": "Pekkarinen",
"party": "",
"title": "Ensimminen varapuhemies",
"start_time": "",
"end_time": "",
"language": "",
"text": "Toiseen ksittelyyn esitelln pivjrjestyksen 3. asia. Keskustelu asiasta "
"pttyi 6.6.2017 pidetyss tysistunnossa. Keskustelussa on Anna Kontula Matti Semin "
"kannattamana tehnyt vastalauseen 2 mukaisen lausumaehdotuksen.",
"offset": -1.0,
"duration": -1.0,
"embedded_statement": {
"mp_id": 0,
"title": "",
"firstname": "",
"lastname": "",
"language": "",
"text": "",
"offset": -1.0,
"duration": -1.0,
},
},
{
"type": "C",
"mp_id": 0,
"firstname": "Tuula",
"lastname": "Haatainen",
"party": "",
"title": "Toinen varapuhemies",
"start_time": "",
"end_time": "",
"language": "",
"text": "Toiseen ksittelyyn esitelln pivjrjestyksen 6. asia. Nyt voidaan hyvksy "
"tai hylt lakiehdotukset, joiden sisllst ptettiin ensimmisess ksittelyss.",
"offset": -1.0,
"duration": -1.0,
"embedded_statement": {
"mp_id": 0,
"title": "",
"firstname": "",
"lastname": "",
"language": "",
"text": "",
"offset": -1.0,
"duration": -1.0,
},
},
]
embedded_statements = [
{
"mp_id": 0,
"title": "Puhemies",
"firstname": "Maria",
"lastname": "Lohela",
"language": "",
"text": "Edustaja Laukkanen, ja sitten puhujalistaan.",
"offset": -1.0,
"duration": -1.0,
},
{
"mp_id": 0,
"title": "",
"firstname": "",
"lastname": "",
"language": "",
"text": "",
"offset": -1.0,
"duration": -1.0,
},
{
"mp_id": 0,
"title": "Ensimminen varapuhemies",
"firstname": "Mauri",
"lastname": "Pekkarinen",
"language": "",
"text": "Tm valtiovarainministerin puheenvuoro saattaa antaa aihetta muutamaan "
"debattipuheenvuoroon. Pyydn niit edustajia, jotka haluavat kytt vastauspuheenvuoron, "
"nousemaan yls ja painamaan V-painiketta.",
"offset": -1.0,
"duration": -1.0,
},
{
"mp_id": 0,
"title": "Ensimminen varapuhemies",
"firstname": "Antti",
"lastname": "Rinne",
"language": "",
"text": "Meill on puoleenyhn vhn reilu kolme tuntia aikaa, ja valtioneuvoston pit "
"sit ennen soveltamisasetus saattaa voimaan. Pyydn ottamaan tmn huomioon "
"keskusteltaessa.",
"offset": -1.0,
"duration": -1.0,
},
]
mps = [
MP(
103,
"Matti",
"Ahde",
"o",
"fi",
1945,
"Sosialidemokraattinen eduskuntaryhm",
"",
"",
"Oulu",
"Oulun lnin vaalipiiri (03/1970-06/1990), Oulun vaalipiiri (03/2003-04/2011)",
"kansakoulu, ammattikoulu, kansankorkeakoulu",
),
MP(
1432,
"Marko",
"Kilpi",
"m",
"fi",
1969,
"Parliamentary Group of the National Coalition Party",
"police officer, writer",
"Kuopio",
"Rovaniemi",
"Electoral District of Savo-Karelia (04/2019-)",
"Degree in policing",
),
MP(
1374,
"Veronica",
"Rehn-Kivi",
"f",
"sv",
1956,
"Swedish Parliamentary Group",
"architect, building supervision manager",
"Kauniainen",
"Helsinki",
"Electoral District of Uusimaa (08/2016-)",
"architect",
),
MP(
1423,
"Iiris",
"Suomela",
"f",
"fi",
1994,
"Green Parliamentary Group",
"student of social sciences",
"Tampere",
"",
"Electoral District of Pirkanmaa (04/2019-)",
"",
),
]
| 37.722714
| 100
| 0.626759
|
93f98465a257a935b04f433473c8d2f49911d9f0
| 336
|
py
|
Python
|
atcoder/abc144/d.py
|
sugitanishi/competitive-programming
|
51af65fdce514ece12f8afbf142b809d63eefb5d
|
[
"MIT"
] | null | null | null |
atcoder/abc144/d.py
|
sugitanishi/competitive-programming
|
51af65fdce514ece12f8afbf142b809d63eefb5d
|
[
"MIT"
] | null | null | null |
atcoder/abc144/d.py
|
sugitanishi/competitive-programming
|
51af65fdce514ece12f8afbf142b809d63eefb5d
|
[
"MIT"
] | null | null | null |
import sys
import math
import itertools
from collections import deque
sys.setrecursionlimit(10000000)
input=lambda : sys.stdin.readline().rstrip()
a,b,x=map(int,input().split())
if (a**2*b)-x<=(a**2*b)/2:
c=2*((a**2*b)-x)/(a**2)
print(math.degrees(math.atan2(c,a)))
else:
c=2*x/b/a
print(math.degrees(math.atan2(b,c)))
| 22.4
| 44
| 0.657738
|
93faa5a4405de0a04d15d1ea4890ad9da6d219ed
| 12,644
|
py
|
Python
|
bakefont3/encode.py
|
golightlyb/bakefont3
|
b5e05f5f96dc37136cf1cf6053c081a7b30f9ea8
|
[
"MIT"
] | 7
|
2017-12-01T16:48:12.000Z
|
2021-01-21T13:05:24.000Z
|
bakefont3/encode.py
|
golightlyb/bakefont3
|
b5e05f5f96dc37136cf1cf6053c081a7b30f9ea8
|
[
"MIT"
] | 1
|
2021-04-19T01:03:37.000Z
|
2021-04-19T03:02:52.000Z
|
bakefont3/encode.py
|
golightlyb/bakefont3
|
b5e05f5f96dc37136cf1cf6053c081a7b30f9ea8
|
[
"MIT"
] | null | null | null |
import struct
import itertools
import freetype
ENDIAN = '<' # always little endian
# If you make a modified version, please change the URL in the string to
# let people know what generated the file!
# example: Bakefont 3.0.2 (compatible; Acme Inc version 1.3)
ENCODER = "Bakefont 3.0.2 (https://github.com/golightlyb/bakefont3)"
def fp26_6(native_num):
"""
Encode a number in 26.6 fixed point arithmatic with the lower 6 bytes
used for the fractional component and the upper 26 bytes used for the
integer component, returning a native int.
Freetype uses this encoding to represent floats.
"""
if isinstance(native_num, int):
result = native_num * 64
else:
result = int(float(native_num) * 64.0)
return int32(result)
def cstring(nativeString, encoding="utf-8"):
"""null-terminated C-style string"""
bytes = nativeString.encode(encoding);
return bytes + b'\0';
def b8string(nativeString, encoding="utf-8"):
"""uint8 length-prefixed Pascal-style string
plus a C-style null terminator
aka a 'bastard string'."""
bytes = nativeString.encode(encoding);
length = len(bytes)
assert length < 256
return uint8(length) + bytes + b'\0';
| 34.358696
| 94
| 0.612148
|
93fcc6644a7bd3a91ddcfdaa15c6e3faf2dbec83
| 137
|
py
|
Python
|
tests/views/test_healthcheck.py
|
oliveryuen/python-flask
|
a53c9ed823fc2f63c416e5a3b47e91f5c9d91604
|
[
"Apache-2.0"
] | null | null | null |
tests/views/test_healthcheck.py
|
oliveryuen/python-flask
|
a53c9ed823fc2f63c416e5a3b47e91f5c9d91604
|
[
"Apache-2.0"
] | null | null | null |
tests/views/test_healthcheck.py
|
oliveryuen/python-flask
|
a53c9ed823fc2f63c416e5a3b47e91f5c9d91604
|
[
"Apache-2.0"
] | null | null | null |
"""Test health check"""
| 19.571429
| 41
| 0.70073
|
93fe622a14e935745be6617c3d7a3da20bbb3012
| 578
|
py
|
Python
|
venv/Lib/site-packages/fbs/_state.py
|
Acuf5928/check-
|
4b993e0bcee33434506565dab11ece3dfa9c5cab
|
[
"MIT"
] | 1
|
2020-03-30T00:08:41.000Z
|
2020-03-30T00:08:41.000Z
|
venv/Lib/site-packages/fbs/_state.py
|
Acuf5928/check-
|
4b993e0bcee33434506565dab11ece3dfa9c5cab
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/fbs/_state.py
|
Acuf5928/check-
|
4b993e0bcee33434506565dab11ece3dfa9c5cab
|
[
"MIT"
] | 2
|
2018-12-29T07:49:59.000Z
|
2020-03-18T02:44:31.000Z
|
"""
This INTERNAL module is used to manage fbs's global state. Having it here, in
one central place, allows fbs's test suite to manipulate the state to test
various scenarios.
"""
from collections import OrderedDict
SETTINGS = {}
LOADED_PROFILES = []
COMMANDS = OrderedDict()
| 27.52381
| 77
| 0.749135
|
93feb2b5aaee509b3ca59bd657fd9239d3cc9aa4
| 5,234
|
py
|
Python
|
rtk/dao/RTKMatrix.py
|
rakhimov/rtk
|
adc35e218ccfdcf3a6e3082f6a1a1d308ed4ff63
|
[
"BSD-3-Clause"
] | null | null | null |
rtk/dao/RTKMatrix.py
|
rakhimov/rtk
|
adc35e218ccfdcf3a6e3082f6a1a1d308ed4ff63
|
[
"BSD-3-Clause"
] | null | null | null |
rtk/dao/RTKMatrix.py
|
rakhimov/rtk
|
adc35e218ccfdcf3a6e3082f6a1a1d308ed4ff63
|
[
"BSD-3-Clause"
] | 2
|
2020-04-03T04:14:42.000Z
|
2021-02-22T05:30:35.000Z
|
# -*- coding: utf-8 -*-
#
# rtk.dao.RTKMatrix.py is part of The RTK Project
#
# All rights reserved.
# Copyright 2007 - 2017 Andrew Rowland andrew.rowland <AT> reliaqual <DOT> com
"""
===============================================================================
The RTKMatrix Table
===============================================================================
"""
# pylint: disable=E0401
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.orm import relationship # pylint: disable=E0401
# Import other RTK modules.
from Utilities import none_to_default # pylint: disable=E0401
from dao.RTKCommonDB import RTK_BASE # pylint: disable=E0401
| 40.261538
| 79
| 0.526175
|
93ff19152094c70f894a1b56b790e173ed1c2638
| 614
|
py
|
Python
|
tool/gitautopull.py
|
chaosannals/trial-python
|
740b91fa4b1b1b9839b7524515995a6d417612ca
|
[
"MIT"
] | null | null | null |
tool/gitautopull.py
|
chaosannals/trial-python
|
740b91fa4b1b1b9839b7524515995a6d417612ca
|
[
"MIT"
] | 8
|
2020-12-26T07:48:15.000Z
|
2022-03-12T00:25:14.000Z
|
tool/gitautopull.py
|
chaosannals/trial-python
|
740b91fa4b1b1b9839b7524515995a6d417612ca
|
[
"MIT"
] | null | null | null |
import os
import shutil
pull_default()
input('')
| 29.238095
| 65
| 0.583062
|
93ff6193a2b94edc476d54fd31667524f6fc80f3
| 815
|
py
|
Python
|
pyogp/apps/web/django/pyogp_webbot/urls.py
|
grobertson/PyOGP.Apps
|
03583baa8d3a2438b0d0a5452ee8c9e56aace9fd
|
[
"Apache-2.0"
] | null | null | null |
pyogp/apps/web/django/pyogp_webbot/urls.py
|
grobertson/PyOGP.Apps
|
03583baa8d3a2438b0d0a5452ee8c9e56aace9fd
|
[
"Apache-2.0"
] | null | null | null |
pyogp/apps/web/django/pyogp_webbot/urls.py
|
grobertson/PyOGP.Apps
|
03583baa8d3a2438b0d0a5452ee8c9e56aace9fd
|
[
"Apache-2.0"
] | null | null | null |
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Example:
# (r'^pyogp_webbot/', include('pyogp_webbot.foo.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# (r'^admin/', include(admin.site.urls)),
(r'^$', 'pyogp_webbot.login.views.index'),
(r'^pyogp_webbot/$', 'pyogp_webbot.login.views.index'),
(r'^pyogp_webbot/login/$', 'pyogp_webbot.login.views.login'),
(r'^pyogp_webbot/login/login_request/$', 'pyogp_webbot.login.views.login_request'),
)
| 37.045455
| 87
| 0.692025
|
93ffdac053f4b224bf9ac1f85bcc5aea184dd502
| 9,300
|
py
|
Python
|
emit.py
|
richardbenson91477/simile
|
aa1faa8902d24e57133cd2c9982e5d4eef6f913f
|
[
"Unlicense"
] | null | null | null |
emit.py
|
richardbenson91477/simile
|
aa1faa8902d24e57133cd2c9982e5d4eef6f913f
|
[
"Unlicense"
] | null | null | null |
emit.py
|
richardbenson91477/simile
|
aa1faa8902d24e57133cd2c9982e5d4eef6f913f
|
[
"Unlicense"
] | null | null | null |
''' code emitters '''
import out, enums as e
| 30.693069
| 78
| 0.475484
|
9e00237613a99687f5e6e25a05d24aa9f51580f2
| 2,291
|
py
|
Python
|
_static/cookbook/gravmag_euler_classic_expanding_window.py
|
fatiando/v0.1
|
1ab9876b247c67834b8e1c874d5b1d86f82802e2
|
[
"BSD-3-Clause"
] | null | null | null |
_static/cookbook/gravmag_euler_classic_expanding_window.py
|
fatiando/v0.1
|
1ab9876b247c67834b8e1c874d5b1d86f82802e2
|
[
"BSD-3-Clause"
] | null | null | null |
_static/cookbook/gravmag_euler_classic_expanding_window.py
|
fatiando/v0.1
|
1ab9876b247c67834b8e1c874d5b1d86f82802e2
|
[
"BSD-3-Clause"
] | null | null | null |
"""
GravMag: Classic 3D Euler deconvolution of magnetic data using an
expanding window
"""
from fatiando import logger, mesher, gridder, utils, gravmag
from fatiando.vis import mpl, myv
log = logger.get()
log.info(logger.header())
# Make a model
bounds = [-5000, 5000, -5000, 5000, 0, 5000]
model = [
mesher.Prism(-1500, -500, -1500, -500, 1000, 2000, {'magnetization':2}),
mesher.Prism(500, 1500, 500, 2000, 1000, 2000, {'magnetization':2})]
# Generate some data from the model
shape = (100, 100)
area = bounds[0:4]
xp, yp, zp = gridder.regular(area, shape, z=-1)
# Add a constant baselevel
baselevel = 10
# Convert from nanoTesla to Tesla because euler and derivatives require things
# in SI
tf = (utils.nt2si(gravmag.prism.tf(xp, yp, zp, model, inc=-45, dec=0))
+ baselevel)
# Calculate the derivatives using FFT
xderiv = gravmag.fourier.derivx(xp, yp, tf, shape)
yderiv = gravmag.fourier.derivy(xp, yp, tf, shape)
zderiv = gravmag.fourier.derivz(xp, yp, tf, shape)
mpl.figure()
titles = ['Total field', 'x derivative', 'y derivative', 'z derivative']
for i, f in enumerate([tf, xderiv, yderiv, zderiv]):
mpl.subplot(2, 2, i + 1)
mpl.title(titles[i])
mpl.axis('scaled')
mpl.contourf(yp, xp, f, shape, 50)
mpl.colorbar()
mpl.m2km()
mpl.show()
# Pick the centers of the expanding windows
# The number of final solutions will be the number of points picked
mpl.figure()
mpl.suptitle('Pick the centers of the expanding windows')
mpl.axis('scaled')
mpl.contourf(yp, xp, tf, shape, 50)
mpl.colorbar()
centers = mpl.pick_points(area, mpl.gca(), xy2ne=True)
# Run the euler deconvolution on an expanding window
# Structural index is 3
index = 3
results = []
for center in centers:
results.append(
gravmag.euler.expanding_window(xp, yp, zp, tf, xderiv, yderiv, zderiv,
index, gravmag.euler.classic, center, 500, 5000))
print "Base level used: %g" % (baselevel)
print "Estimated base level: %g" % (results[-1]['baselevel'])
print "Estimated source location: %s" % (str(results[-1]['point']))
myv.figure()
myv.points([r['point'] for r in results], size=300.)
myv.prisms(model, opacity=0.5)
axes = myv.axes(myv.outline(bounds), ranges=[b*0.001 for b in bounds])
myv.wall_bottom(bounds)
myv.wall_north(bounds)
myv.show()
| 32.728571
| 78
| 0.690965
|
9e00350a4a2fb7dd0ecbdf440b5912df33e77fb3
| 324
|
py
|
Python
|
popmemes/backend/popmemes/migrations/0002_auto_20190122_0939.py
|
hangulu/twitter
|
e4461652d35fc6d547e93f364b56c1a1637c5547
|
[
"MIT"
] | null | null | null |
popmemes/backend/popmemes/migrations/0002_auto_20190122_0939.py
|
hangulu/twitter
|
e4461652d35fc6d547e93f364b56c1a1637c5547
|
[
"MIT"
] | 7
|
2019-12-29T08:23:25.000Z
|
2022-02-26T14:04:51.000Z
|
popmemes/backend/popmemes/migrations/0002_auto_20190122_0939.py
|
hangulu/twitter
|
e4461652d35fc6d547e93f364b56c1a1637c5547
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.5 on 2019-01-22 09:39
from django.db import migrations
| 18
| 47
| 0.58642
|
9e06be00254eeed4fa569ee854389c69ab61f743
| 6,434
|
py
|
Python
|
tests/ampligraph/datasets/test_datasets.py
|
ojasviyadav/AmpliGraph
|
07ce70ff9e30812ac8f4a34d245d1d5decec27f7
|
[
"Apache-2.0"
] | null | null | null |
tests/ampligraph/datasets/test_datasets.py
|
ojasviyadav/AmpliGraph
|
07ce70ff9e30812ac8f4a34d245d1d5decec27f7
|
[
"Apache-2.0"
] | null | null | null |
tests/ampligraph/datasets/test_datasets.py
|
ojasviyadav/AmpliGraph
|
07ce70ff9e30812ac8f4a34d245d1d5decec27f7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The AmpliGraph Authors. All Rights Reserved.
#
# This file is Licensed under the Apache License, Version 2.0.
# A copy of the Licence is available in LICENCE, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
from ampligraph.datasets import load_wn18, load_fb15k, load_fb15k_237, load_yago3_10, load_wn18rr, load_wn11, load_fb13
from ampligraph.datasets.datasets import _clean_data
import numpy as np
| 42.328947
| 119
| 0.615636
|
9e08eda6cab019bc0097ad8470c08bcc09a74c92
| 5,959
|
py
|
Python
|
code/geometry/sector.py
|
Prometheus3375/inno-thesis
|
72245706fa25b49f333e08d6074d421b5becfdb5
|
[
"BSD-3-Clause"
] | null | null | null |
code/geometry/sector.py
|
Prometheus3375/inno-thesis
|
72245706fa25b49f333e08d6074d421b5becfdb5
|
[
"BSD-3-Clause"
] | null | null | null |
code/geometry/sector.py
|
Prometheus3375/inno-thesis
|
72245706fa25b49f333e08d6074d421b5becfdb5
|
[
"BSD-3-Clause"
] | null | null | null |
from io import StringIO
from math import atan2, ceil
from typing import Literal, Union, overload
from common import PI, Real, TWOPI, deg, real, reduce_angle
from functions import qbezeir_svg_given_middle
from .circle import CircleBase, FixedCircle
from .point import PointBase, Polar
class FixedSector(SectorBase):
__slots__ = '_hash',
class MutableSector(SectorBase):
__slots__ = ()
# TODO: add circle changing
def fix(self, /):
return FixedSector(self.circle, self.arc, self.start_arm)
def unfix(self, /):
return self
def rotate(self, angle: Real, /):
"""
Rotates the sector by the given angle clockwise
"""
self.start_arm -= angle
def Sector(circle: CircleBase, arc: Real, start_arm: Real = PI, /, *, fix: bool = True) -> SectorBase:
check_arc(arc)
arc = float(arc)
start_arm = float(start_arm)
if fix:
return FixedSector(circle.fix(), arc, start_arm)
return MutableSector(circle.fix(), arc, start_arm)
| 26.721973
| 112
| 0.587515
|
9e097d9b8021dd21364c747eb7b2f08352fe9ba6
| 1,155
|
py
|
Python
|
scripts/gen_mh.py
|
bplank/DaNplus
|
220428109c9ae5abc60e8968a7fe7a4aa6ad92e3
|
[
"MIT"
] | 5
|
2020-12-11T17:11:03.000Z
|
2022-01-01T12:14:04.000Z
|
scripts/gen_mh.py
|
bplank/DaNplus
|
220428109c9ae5abc60e8968a7fe7a4aa6ad92e3
|
[
"MIT"
] | null | null | null |
scripts/gen_mh.py
|
bplank/DaNplus
|
220428109c9ae5abc60e8968a7fe7a4aa6ad92e3
|
[
"MIT"
] | null | null | null |
import os
COL_SEPARATOR = "\t"
MULTI_SEPARATOR = "$"
for neFile in os.listdir('data/'):
neFile = 'data/' + neFile
out_filename = neFile.replace('.tsv', "_mh.tsv")
if os.path.isfile(out_filename) or '_mh' in neFile or os.path.isdir(neFile):
continue
out_f = open(out_filename, "w")
with open(neFile, "r") as in_f:
for line in in_f:
if len(line) > 2:
token_attrs = line.rstrip().split(COL_SEPARATOR)
if (token_attrs[1] == "O") and (token_attrs[2] == "O"):
new_label = "O"
elif (token_attrs[1] != "O") and (token_attrs[2] == "O"):
new_label = token_attrs[1]
elif (token_attrs[1] == "O") and (token_attrs[2] != "O"):
new_label = token_attrs[2]
else:
labels = [token_attrs[1], token_attrs[2]]
labels.sort()
new_label = labels[0] + MULTI_SEPARATOR + labels[1]
out_f.write(token_attrs[0] + COL_SEPARATOR + new_label + "\n")
else:
out_f.write(line)
out_f.close()
| 38.5
| 80
| 0.507359
|
9e0bbeb93835b36e23fb310038a044e9818c4553
| 13,451
|
py
|
Python
|
kecpkg/commands/sign.py
|
jberends/kecpkg-tools
|
3c288c5b91b619fe76cd3622615f3ffe43509725
|
[
"Apache-2.0"
] | null | null | null |
kecpkg/commands/sign.py
|
jberends/kecpkg-tools
|
3c288c5b91b619fe76cd3622615f3ffe43509725
|
[
"Apache-2.0"
] | 7
|
2017-12-07T11:16:07.000Z
|
2019-12-11T15:25:07.000Z
|
kecpkg/commands/sign.py
|
KE-works/kecpkg-tools
|
3c288c5b91b619fe76cd3622615f3ffe43509725
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
from pprint import pprint
import click
from pykechain.utils import temp_chdir
from kecpkg.commands.utils import CONTEXT_SETTINGS
from kecpkg.gpg import get_gpg, list_keys, hash_of_file
from kecpkg.settings import SETTINGS_FILENAME, GNUPG_KECPKG_HOME, load_settings, DEFAULT_SETTINGS, ARTIFACTS_FILENAME, \
ARTIFACTS_SIG_FILENAME
from kecpkg.utils import remove_path, echo_info, echo_success, echo_failure, get_package_dir, unzip_package
def verify_signature(package_dir, artifacts_filename, artifacts_sig_filename):
"""
Check signature of the package.
:param package_dir: directory fullpath of the package
:param artifacts_filename: path of the artifacts file
:param artifacts_sig_filename: path of the artifacts signature file
:return: None
"""
gpg = get_gpg()
artifacts_fp = os.path.join(package_dir, artifacts_filename)
artifacts_sig_fp = os.path.join(package_dir, artifacts_sig_filename)
if not os.path.exists(artifacts_fp):
echo_failure("Artifacts file does not exist: '{}'".format(artifacts_filename))
sys.exit(1)
if not os.path.exists(artifacts_sig_fp):
echo_failure("Artifacts signature file does not exist: '{}'. Is the package signed?".
format(artifacts_filename))
sys.exit(1)
with open(artifacts_sig_fp, 'rb') as sig_fd:
results = gpg.verify_file(sig_fd, data_filename=artifacts_fp)
if results.valid:
echo_info("Verified the signature and the signature is valid")
echo_info("Signed with: '{}'".format(results.username))
elif not results.valid:
echo_failure("Signature of the package is invalid")
echo_failure(pprint(results.__dict__))
sys.exit(1)
def verify_artifacts_hashes(package_dir, artifacts_filename):
"""
Check the hashes of the artifacts in the package.
:param package_dir: directory fullpath of the package
:param artifacts_filename: filename of the artifacts file
:return:
"""
artifacts_fp = os.path.join(package_dir, artifacts_filename)
if not os.path.exists(artifacts_fp):
echo_failure("Artifacts file does not exist: '{}'".format(artifacts_filename))
sys.exit(1)
with open(artifacts_fp, 'r') as fd:
artifacts = fd.readlines()
# process the file contents
# A line is "README.md,sha256=d831....ccf79a,336"
# ^filename ^algo ^hash ^size in bytes
fails = []
for af in artifacts:
# noinspection PyShadowingBuiltins,PyShadowingBuiltins
filename, hash, orig_size = af.split(',')
algorithm, orig_hash = hash.split('=')
fp = os.path.join(package_dir, filename)
if os.path.exists(fp):
found_hash = hash_of_file(fp, algorithm)
found_size = os.stat(fp).st_size
if found_hash != orig_hash.strip() or found_size != int(orig_size.strip()):
fails.append("File '{}' is changed in the package.".format(filename))
fails.append("File '{}' original checksum: '{}', found: '{}'".format(filename, orig_hash, found_hash))
fails.append("File '{}' original size: {}, found: {}".format(filename, orig_size, found_size))
else:
fails.append("File '{}' does not exist".format(filename))
if fails:
echo_failure('The package has been changed after building the package.')
for fail in fails:
print(fail)
sys.exit(1)
else:
echo_info("Package contents succesfully verified.")
| 46.867596
| 120
| 0.63564
|
9e0cb81c07d1ab8a7ecca4e8e8464e12bdf7bef9
| 954
|
py
|
Python
|
benchmark/generate_data.py
|
etra0/sapeaob
|
3e21bd66f8530f983130c52e37d612cc53181acd
|
[
"MIT"
] | 3
|
2021-08-04T13:00:25.000Z
|
2021-12-21T21:07:40.000Z
|
benchmark/generate_data.py
|
etra0/sapeaob
|
3e21bd66f8530f983130c52e37d612cc53181acd
|
[
"MIT"
] | null | null | null |
benchmark/generate_data.py
|
etra0/sapeaob
|
3e21bd66f8530f983130c52e37d612cc53181acd
|
[
"MIT"
] | null | null | null |
import argparse
import random
SIGNATURE = b'\xDE\xAD\xBE\xEF\xC0\xFF\xEE\xAA\xBB\xCC\xDD\xEE\xFF\x00\x11\x22\x33\x44\x55\x66\x77\x88\x99\xAA'
# Generate data in chunks of 1024 bytes.
if __name__ == "__main__":
main()
| 25.783784
| 111
| 0.669811
|
9e0dd95d1aaf80cae2655fcee6b6427ac437b94c
| 10,563
|
py
|
Python
|
doctor/lib/utils.py
|
freelawproject/doctor
|
3858b6f5de7903353f4376303329a986db5b7983
|
[
"BSD-2-Clause"
] | null | null | null |
doctor/lib/utils.py
|
freelawproject/doctor
|
3858b6f5de7903353f4376303329a986db5b7983
|
[
"BSD-2-Clause"
] | null | null | null |
doctor/lib/utils.py
|
freelawproject/doctor
|
3858b6f5de7903353f4376303329a986db5b7983
|
[
"BSD-2-Clause"
] | null | null | null |
import datetime
import io
import os
import re
import subprocess
import warnings
from collections import namedtuple
from decimal import Decimal
from pathlib import Path
import six
from PyPDF2 import PdfFileMerger
from reportlab.pdfgen import canvas
def force_bytes(s, encoding="utf-8", strings_only=False, errors="strict"):
"""
Similar to smart_bytes, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if isinstance(s, bytes):
if encoding == "utf-8":
return s
else:
return s.decode("utf-8", errors).encode(encoding, errors)
if strings_only and is_protected_type(s):
return s
if isinstance(s, six.memoryview):
return bytes(s)
if isinstance(s, Promise):
return six.text_type(s).encode(encoding, errors)
if not isinstance(s, six.string_types):
try:
if six.PY3:
return six.text_type(s).encode(encoding)
else:
return bytes(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return b" ".join(
force_bytes(arg, encoding, strings_only, errors) for arg in s
)
return six.text_type(s).encode(encoding, errors)
else:
return s.encode(encoding, errors)
def force_text(s, encoding="utf-8", strings_only=False, errors="strict"):
"""
Similar to smart_text, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if issubclass(type(s), six.text_type):
return s
if strings_only and is_protected_type(s):
return s
try:
if not issubclass(type(s), six.string_types):
if six.PY3:
if isinstance(s, bytes):
s = six.text_type(s, encoding, errors)
else:
s = six.text_type(s)
elif hasattr(s, "__unicode__"):
s = six.text_type(s)
else:
s = six.text_type(bytes(s), encoding, errors)
else:
# Note: We use .decode() here, instead of six.text_type(s, encoding,
# errors), so that if s is a SafeBytes, it ends up being a
# SafeText at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError as e:
if not isinstance(s, Exception):
raise DoctorUnicodeDecodeError(s, *e.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
s = " ".join(force_text(arg, encoding, strings_only, errors) for arg in s)
return s
def smart_text(s, encoding="utf-8", strings_only=False, errors="strict"):
"""
Returns a text object representing 's' -- unicode on Python 2 and str on
Python 3. Treats bytestrings using the 'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_text(s, encoding, strings_only, errors)
_PROTECTED_TYPES = six.integer_types + (
type(None),
float,
Decimal,
datetime.datetime,
datetime.date,
datetime.time,
)
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_text(strings_only=True).
"""
return isinstance(obj, _PROTECTED_TYPES)
def make_png_thumbnail_for_instance(filepath, max_dimension):
"""Abstract function for making a thumbnail for a PDF
See helper functions below for how to use this in a simple way.
:param filepath: The attr where the PDF is located on the item
:param max_dimension: The longest you want any edge to be
:param response: Flask response object
"""
command = [
"pdftoppm",
"-singlefile",
"-f",
"1",
"-scale-to",
str(max_dimension),
filepath,
"-png",
]
p = subprocess.Popen(
command, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, stderr = p.communicate()
return stdout, stderr.decode("utf-8"), str(p.returncode)
def make_png_thumbnails(filepath, max_dimension, pages, directory):
"""Abstract function for making a thumbnail for a PDF
See helper functions below for how to use this in a simple way.
:param filepath: The attr where the PDF is located on the item
:param max_dimension: The longest you want any edge to be
:param response: Flask response object
"""
for page in pages:
command = [
"pdftoppm",
"-singlefile",
"-f",
str(page),
"-scale-to",
str(max_dimension),
filepath,
"-png",
f"{directory.name}/thumb-{page}",
]
p = subprocess.Popen(
command, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
p.communicate()
def pdf_bytes_from_image_array(image_list, output_path) -> None:
"""Make a pdf given an array of Image files
:param image_list: List of images
:type image_list: list
:return: pdf_data
:type pdf_data: PDF as bytes
"""
image_list[0].save(
output_path,
"PDF",
resolution=100.0,
save_all=True,
append_images=image_list[1:],
)
del image_list
def strip_metadata_from_path(file_path):
"""Convert PDF file into PDF and remove metadata from it
Stripping the metadata allows us to hash the PDFs
:param pdf_bytes: PDF as binary content
:return: PDF bytes with metadata removed.
"""
with open(file_path, "rb") as f:
pdf_merger = PdfFileMerger()
pdf_merger.append(io.BytesIO(f.read()))
pdf_merger.addMetadata({"/CreationDate": "", "/ModDate": ""})
byte_writer = io.BytesIO()
pdf_merger.write(byte_writer)
return force_bytes(byte_writer.getvalue())
def strip_metadata_from_bytes(pdf_bytes):
"""Convert PDF bytes into PDF and remove metadata from it
Stripping the metadata allows us to hash the PDFs
:param pdf_bytes: PDF as binary content
:return: PDF bytes with metadata removed.
"""
pdf_merger = PdfFileMerger()
pdf_merger.append(io.BytesIO(pdf_bytes))
pdf_merger.addMetadata({"/CreationDate": "", "/ModDate": ""})
byte_writer = io.BytesIO()
pdf_merger.write(byte_writer)
return force_bytes(byte_writer.getvalue())
def cleanup_form(form):
"""Clean up a form object"""
os.remove(form.cleaned_data["fp"])
def pdf_has_images(path: str) -> bool:
"""Check raw PDF for embedded images.
We need to check if a PDF contains any images. If a PDF contains images it
likely has content that needs to be scanned.
:param path: Location of PDF to process.
:return: Does the PDF contain images?
:type: bool
"""
with open(path, "rb") as pdf_file:
pdf_bytes = pdf_file.read()
return True if re.search(rb"/Image ?/", pdf_bytes) else False
def ocr_needed(path: str, content: str) -> bool:
"""Check if OCR is needed on a PDF
Check if images are in PDF or content is empty.
:param path: The path to the PDF
:param content: The content extracted from the PDF.
:return: Whether OCR should be run on the document.
"""
if content.strip() == "" or pdf_has_images(path):
return True
return False
def make_page_with_text(page, data, h, w):
"""Make a page with text
:param page:
:param data:
:param h:
:param w:
:return:
"""
packet = io.BytesIO()
can = canvas.Canvas(packet, pagesize=(w, h))
# Set to a standard size and font for now.
can.setFont("Helvetica", 9)
# Make the text transparent
can.setFillAlpha(0)
for i in range(len(data["level"])):
try:
letter, (x, y, ww, hh), pg = (
data["text"][i],
(data["left"][i], data["top"][i], data["width"][i], data["height"][i]),
data["page_num"][i],
)
except:
continue
# Adjust the text to an 8.5 by 11 inch page
sub = ((11 * 72) / h) * int(hh)
x = ((8.5 * 72) / w) * int(x)
y = ((11 * 72) / h) * int(y)
yy = (11 * 72) - y
if int(page) == int(pg):
can.drawString(x, yy - sub, letter)
can.showPage()
can.save()
packet.seek(0)
return packet
| 30.528902
| 87
| 0.619142
|
9e0e1c62ee116428b55cffa380260139fb9ea5d8
| 906
|
py
|
Python
|
src/xrl/env_tester.py
|
k4ntz/XmodRL
|
dffb416bcd91010d8075ee1ac00cc4b9a3021967
|
[
"MIT"
] | null | null | null |
src/xrl/env_tester.py
|
k4ntz/XmodRL
|
dffb416bcd91010d8075ee1ac00cc4b9a3021967
|
[
"MIT"
] | null | null | null |
src/xrl/env_tester.py
|
k4ntz/XmodRL
|
dffb416bcd91010d8075ee1ac00cc4b9a3021967
|
[
"MIT"
] | 1
|
2021-11-10T18:09:27.000Z
|
2021-11-10T18:09:27.000Z
|
import gym
import numpy as np
import os
import random
import matplotlib.pyplot as plt
from atariari.benchmark.wrapper import AtariARIWrapper
# YarsRevenge
#
env_name = "DemonAttackDeterministic-v4"
env = AtariARIWrapper(gym.make(env_name))
name = env.unwrapped.spec.id
#ballgame = any(game in name for game in ["Pong", "Tennis"])
print(np.int16(3))
sad
n_actions = env.action_space.n
_ = env.reset()
obs, _, done, info = env.step(0)
r = 0
for t in range(50000):
plt.imshow(env.render(mode='rgb_array'), interpolation='none')
plt.plot()
plt.pause(0.0001) # pause a bit so that plots are updated
action = random.randint(0, n_actions - 1)
obs, reward, done, info = env.step(action)
r += reward
print(reward)
print_labels(info)
if(done):
break
print(r)
| 22.65
| 66
| 0.695364
|
9e0ef102e2826e6b9febd80bed5d0193a3687555
| 2,711
|
py
|
Python
|
packages/lrn/model/question.py
|
genropy/learn
|
019286c1fa1548482f64ccbd91082e069ec62a56
|
[
"MIT"
] | 3
|
2019-11-16T12:38:20.000Z
|
2019-11-17T08:44:41.000Z
|
packages/lrn/model/question.py
|
genropy/learn
|
019286c1fa1548482f64ccbd91082e069ec62a56
|
[
"MIT"
] | null | null | null |
packages/lrn/model/question.py
|
genropy/learn
|
019286c1fa1548482f64ccbd91082e069ec62a56
|
[
"MIT"
] | 5
|
2019-11-16T16:22:10.000Z
|
2019-11-18T21:46:50.000Z
|
# encoding: utf-8
from datetime import datetime
| 54.22
| 138
| 0.570638
|
9e11d3f12bcf35bac083ace9a1b7490250555694
| 3,087
|
py
|
Python
|
core/api/OxfordAPI.py
|
vimarind/Complete-GRE-Vocab
|
6dc8bb8ed0506ed572edd1a01a456d9a27238c94
|
[
"MIT"
] | null | null | null |
core/api/OxfordAPI.py
|
vimarind/Complete-GRE-Vocab
|
6dc8bb8ed0506ed572edd1a01a456d9a27238c94
|
[
"MIT"
] | null | null | null |
core/api/OxfordAPI.py
|
vimarind/Complete-GRE-Vocab
|
6dc8bb8ed0506ed572edd1a01a456d9a27238c94
|
[
"MIT"
] | null | null | null |
import json
import requests
from os import path
| 38.111111
| 117
| 0.618724
|
9e1202ada111dfedf3e1239998ddc9e7e0c2bac2
| 2,568
|
py
|
Python
|
linked_list.py
|
bentsi/data-structures
|
ce4a3a49ec131550ec0b77875b8f0367addcca05
|
[
"Apache-2.0"
] | null | null | null |
linked_list.py
|
bentsi/data-structures
|
ce4a3a49ec131550ec0b77875b8f0367addcca05
|
[
"Apache-2.0"
] | null | null | null |
linked_list.py
|
bentsi/data-structures
|
ce4a3a49ec131550ec0b77875b8f0367addcca05
|
[
"Apache-2.0"
] | 1
|
2021-01-10T15:41:50.000Z
|
2021-01-10T15:41:50.000Z
|
if __name__ == '__main__':
ll = LinkedList()
ll.append(data="Fedor")
ll.append(data="Julia")
ll.append(data="Bentsi")
ll.print()
print("Length of the Linked list is: ", ll.length())
idx = 1
print(ll.get(index=idx))
print(f"Data at index {idx} is {ll[idx]}")
print("Deleted: ", ll.erase(index=0))
ll.append(data="Fedor")
ll.append(data="Bentsi")
ll.set(index=3, new_data="Tim Peters")
print(ll)
| 24.457143
| 73
| 0.550234
|
9e13f559b1f0c5960c9398a871cf6613d7ce918c
| 5,442
|
py
|
Python
|
apps/login/views.py
|
kwarodom/bemoss_web_ui-1
|
6c65c49b8f52bc7d189c9f2391f9098ec0f2dd92
|
[
"Unlicense"
] | null | null | null |
apps/login/views.py
|
kwarodom/bemoss_web_ui-1
|
6c65c49b8f52bc7d189c9f2391f9098ec0f2dd92
|
[
"Unlicense"
] | null | null | null |
apps/login/views.py
|
kwarodom/bemoss_web_ui-1
|
6c65c49b8f52bc7d189c9f2391f9098ec0f2dd92
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Copyright (c) 2016, Virginia Tech
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those of the authors and should not be
interpreted as representing official policies, either expressed or implied, of the FreeBSD Project.
This material was prepared as an account of work sponsored by an agency of the United States Government. Neither the
United States Government nor the United States Department of Energy, nor Virginia Tech, nor any of their employees,
nor any jurisdiction or organization that has cooperated in the development of these materials, makes any warranty,
express or implied, or assumes any legal liability or responsibility for the accuracy, completeness, or usefulness or
any information, apparatus, product, software, or process disclosed, or represents that its use would not infringe
privately owned rights.
Reference herein to any specific commercial product, process, or service by trade name, trademark, manufacturer, or
otherwise does not necessarily constitute or imply its endorsement, recommendation, favoring by the United States
Government or any agency thereof, or Virginia Tech - Advanced Research Institute. The views and opinions of authors
expressed herein do not necessarily state or reflect those of the United States Government or any agency thereof.
VIRGINIA TECH ADVANCED RESEARCH INSTITUTE
under Contract DE-EE0006352
#__author__ = "BEMOSS Team"
#__credits__ = ""
#__version__ = "2.0"
#__maintainer__ = "BEMOSS Team"
#__email__ = "aribemoss@gmail.com"
#__website__ = "www.bemoss.org"
#__created__ = "2014-09-12 12:04:50"
#__lastUpdated__ = "2016-03-14 11:23:33"
'''
from django.shortcuts import render_to_response
from django.contrib.auth import authenticate, login
from django.template import RequestContext
from django.http import HttpResponseRedirect
from django.contrib import messages
import logging
logger = logging.getLogger("views")
| 52.326923
| 118
| 0.717567
|
9e14d6737904e50f196708249c8435de6151b062
| 2,768
|
py
|
Python
|
custom_html_validator/custom_html_validator.py
|
koan-u/custom_html_validator
|
1a6735146e64d3c346201d10eddfd9ebfe1377c2
|
[
"MIT"
] | null | null | null |
custom_html_validator/custom_html_validator.py
|
koan-u/custom_html_validator
|
1a6735146e64d3c346201d10eddfd9ebfe1377c2
|
[
"MIT"
] | null | null | null |
custom_html_validator/custom_html_validator.py
|
koan-u/custom_html_validator
|
1a6735146e64d3c346201d10eddfd9ebfe1377c2
|
[
"MIT"
] | null | null | null |
from html.parser import HTMLParser
| 33.756098
| 66
| 0.462789
|
9e14d840b0d68fa20db94e8f512ad11ba709e64f
| 1,841
|
py
|
Python
|
boltfile.py
|
arahmanhamdy/bolt
|
8f5d9b8149db833b54a7b353162b2c28a53c8aff
|
[
"MIT"
] | 15
|
2016-10-21T14:30:38.000Z
|
2021-10-12T04:50:48.000Z
|
boltfile.py
|
arahmanhamdy/bolt
|
8f5d9b8149db833b54a7b353162b2c28a53c8aff
|
[
"MIT"
] | 51
|
2016-02-05T01:24:32.000Z
|
2019-12-09T16:52:20.000Z
|
boltfile.py
|
arahmanhamdy/bolt
|
8f5d9b8149db833b54a7b353162b2c28a53c8aff
|
[
"MIT"
] | 6
|
2016-10-17T13:48:16.000Z
|
2021-03-28T20:40:14.000Z
|
import logging
import os.path
import bolt
import bolt.about
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
_src_dir = os.path.join(PROJECT_ROOT, 'bolt')
_test_dir = os.path.join(PROJECT_ROOT, 'test')
_output_dir = os.path.join(PROJECT_ROOT, 'output')
_coverage_dir = os.path.join(_output_dir, 'coverage')
config = {
'pip': {
'command': 'install',
'options': {
'r': './requirements.txt'
}
},
'delete-pyc': {
'sourcedir': _src_dir,
'recursive': True,
'test-pyc': {
'sourcedir': _test_dir,
}
},
'conttest' : {
'task': 'ut'
},
'mkdir': {
'directory': _output_dir,
},
'nose': {
'directory': _test_dir,
'ci': {
'options': {
'with-xunit': True,
'xunit-file': os.path.join(_output_dir, 'unit_tests_log.xml'),
'with-coverage': True,
'cover-erase': True,
'cover-package': 'bolt',
'cover-html': True,
'cover-html-dir': _coverage_dir,
'cover-branches': True,
}
}
},
'setup': {
'command': 'bdist_wheel',
'egg-info': {
'command': 'egg_info'
}
},
'coverage': {
'task': 'nose',
'include': ['bolt'],
'output': os.path.join(_output_dir, 'ut_coverage')
}
}
# Development tasks
bolt.register_task('clear-pyc', ['delete-pyc', 'delete-pyc.test-pyc'])
bolt.register_task('ut', ['clear-pyc', 'nose'])
bolt.register_task('ct', ['conttest'])
bolt.register_task('pack', ['setup', 'setup.egg-info'])
# CI/CD tasks
bolt.register_task('run-unit-tests', ['clear-pyc', 'mkdir', 'nose.ci'])
# Default task (not final).
bolt.register_task('default', ['pip', 'ut'])
| 25.569444
| 78
| 0.523628
|
9e15279f716f2f3dcaf04f7c355aee058fe22e12
| 1,120
|
py
|
Python
|
pyxedit/xedit/object_classes/HDPT.py
|
leontristain/pyxedit
|
4100506930ab6d62d6e2c4a89fee024dbbf87c7b
|
[
"MIT"
] | null | null | null |
pyxedit/xedit/object_classes/HDPT.py
|
leontristain/pyxedit
|
4100506930ab6d62d6e2c4a89fee024dbbf87c7b
|
[
"MIT"
] | 13
|
2019-04-09T06:14:22.000Z
|
2020-07-03T07:35:30.000Z
|
pyxedit/xedit/object_classes/HDPT.py
|
leontristain/pyxedit
|
4100506930ab6d62d6e2c4a89fee024dbbf87c7b
|
[
"MIT"
] | null | null | null |
from enum import Enum
from pyxedit.xedit.attribute import XEditAttribute
from pyxedit.xedit.generic import XEditGenericObject
| 31.111111
| 80
| 0.669643
|
9e158c914469c96413a23f9b7926f662ec188191
| 1,309
|
py
|
Python
|
assignments/04_head/head.py
|
emma-huffman/biosystems-analytics-2020
|
eaf9c084407fa6d25b815b7d63077ed9aec53447
|
[
"MIT"
] | null | null | null |
assignments/04_head/head.py
|
emma-huffman/biosystems-analytics-2020
|
eaf9c084407fa6d25b815b7d63077ed9aec53447
|
[
"MIT"
] | null | null | null |
assignments/04_head/head.py
|
emma-huffman/biosystems-analytics-2020
|
eaf9c084407fa6d25b815b7d63077ed9aec53447
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
Author : Me <me@foo.com>
Date : today
Purpose: Rock the Casbah
"""
import argparse
import io
import os
import sys
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Rock the Casbah',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-n',
'--num',
help='Number of lines',
metavar='int',
type=int,
default=10)
parser.add_argument('file',
help='Input File',
type=argparse.FileType('r'))
args = parser.parse_args()
if not args.num > 0:
parser.error(f'--num "{args.num}" must be greater than 0')
return args
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
for fh in args.file:
print(fh.name)
num_line = 0
for line in fh:
num_line += 1
print(line, end='')
if num_line == args.num:
break
# --------------------------------------------------
if __name__ == '__main__':
main()
| 23.375
| 66
| 0.450726
|
9e16d5b26d929cfae2b8a212b8915e0500128d25
| 1,040
|
py
|
Python
|
tests/CornerCasesTest.py
|
dpep/py_pluckit
|
be7c1cd6e2555234f08dd0cb6239db2c249562a4
|
[
"MIT"
] | null | null | null |
tests/CornerCasesTest.py
|
dpep/py_pluckit
|
be7c1cd6e2555234f08dd0cb6239db2c249562a4
|
[
"MIT"
] | null | null | null |
tests/CornerCasesTest.py
|
dpep/py_pluckit
|
be7c1cd6e2555234f08dd0cb6239db2c249562a4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
import unittest
from collections import namedtuple
sys.path = [ os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')) ] + sys.path
from pluckit import pluck
if __name__ == '__main__':
unittest.main()
| 21.22449
| 91
| 0.516346
|
9e177263a0ebd8c89321b840a71c84edfd2ea746
| 1,520
|
py
|
Python
|
src/lib/transforms/cross_drop.py
|
pfnet-research/kaggle-lyft-motion-prediction-4th-place-solution
|
0bc51075db31a747eeebb7f4775a3cd26ad5f870
|
[
"MIT"
] | 44
|
2020-12-09T06:15:15.000Z
|
2022-03-31T02:37:47.000Z
|
src/lib/transforms/cross_drop.py
|
pfnet-research/kaggle-lyft-motion-prediction-4th-place-solution
|
0bc51075db31a747eeebb7f4775a3cd26ad5f870
|
[
"MIT"
] | null | null | null |
src/lib/transforms/cross_drop.py
|
pfnet-research/kaggle-lyft-motion-prediction-4th-place-solution
|
0bc51075db31a747eeebb7f4775a3cd26ad5f870
|
[
"MIT"
] | 7
|
2020-12-09T10:08:32.000Z
|
2021-08-17T01:53:51.000Z
|
from albumentations.core.transforms_interface import ImageOnlyTransform
import albumentations.augmentations.functional as F
import random
| 33.777778
| 94
| 0.620395
|
9e1798f13a1e5958c9273e51efaff12141f4e76c
| 9,497
|
py
|
Python
|
js_components/cms_plugins.py
|
compoundpartners/js-components
|
a58a944254354078a0a7b53a4c9a7df50790267a
|
[
"BSD-3-Clause"
] | null | null | null |
js_components/cms_plugins.py
|
compoundpartners/js-components
|
a58a944254354078a0a7b53a4c9a7df50790267a
|
[
"BSD-3-Clause"
] | null | null | null |
js_components/cms_plugins.py
|
compoundpartners/js-components
|
a58a944254354078a0a7b53a4c9a7df50790267a
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import functools
import six
from django.utils.translation import ugettext_lazy as _
from django.template import TemplateDoesNotExist
from django.template.loader import select_template
from cms.plugin_base import CMSPluginBase, CMSPluginBaseMetaclass
from cms.plugin_pool import plugin_pool
from . import models, forms
from .utils.urlmatch import urlmatch
from .constants import (
HIDE_PROMO,
HIDE_PROMO_ROLLOVER,
HIDE_PROMO_VIDEO,
HIDE_TWITTER,
HIDE_COUNTERS,
HIDE_RAWHTML,
HIDE_GATED_CONTENT,
HIDE_FLOAT,
HIDE_LIGHTBOX,
CUSTOM_PLUGINS,
PROMO_CHILD_CLASSES,
)
if not HIDE_PROMO:
plugin_pool.register_plugin(PromoUnitPlugin)
if not HIDE_TWITTER:
plugin_pool.register_plugin(TwitterFeedPlugin)
if not HIDE_COUNTERS:
plugin_pool.register_plugin(CountersContainerPlugin)
plugin_pool.register_plugin(CounterPlugin)
#if 'Bootstrap4GridRowPlugin' in plugin_pool.plugins:
#plugin_pool.plugins['Bootstrap4GridRowPlugin'].child_classes.append('CountersContainerPlugin')
if not HIDE_RAWHTML:
plugin_pool.register_plugin(RawHTMLPlugin)
plugin_pool.register_plugin(RawHTMLWithIDPlugin)
for name, parameters in CUSTOM_PLUGINS.items():
p = type(
str(name.replace(' ', '') + 'Plugin'),
(CustomPlugin,),
{'name': name},
)
plugin_pool.register_plugin(p)
if not HIDE_GATED_CONTENT:
plugin_pool.register_plugin(GatedContentPlugin)
class FloatPlugin(LayoutMixin, CMSPluginBase):
module = 'JumpSuite Componens'
name = _('Float Container')
model = models.Float
form = forms.FloatForm
render_template = 'js_components/float.html'
TEMPLATE_NAME = 'js_components/float_%s.html'
#change_form_template = 'admin/js_components/float.html'
allow_children = True
if not HIDE_FLOAT:
plugin_pool.register_plugin(FloatPlugin)
class LightboxPlugin(LayoutMixin, CMSPluginBase):
module = 'JumpSuite Componens'
TEMPLATE_NAME = 'js_components/lightbox_%s.html'
name = _('Lightbox')
model = models.Lightbox
form = forms.LightboxForm
render_template = 'js_components/lightbox.html'
allow_children = True
child_classes = ['Bootstrap4PicturePlugin']
if not HIDE_LIGHTBOX:
plugin_pool.register_plugin(LightboxPlugin)
| 29.958991
| 134
| 0.666737
|
9e18ddf285ec21f8d58dafd4142a06363020741a
| 1,232
|
py
|
Python
|
src/julia/tests/test_juliaoptions.py
|
dpinol/pyjulia
|
cec4bf0b0eac7e39cecd8f3e7882563062903d0f
|
[
"MIT"
] | 649
|
2016-09-09T07:38:19.000Z
|
2022-03-28T04:30:55.000Z
|
src/julia/tests/test_juliaoptions.py
|
dpinol/pyjulia
|
cec4bf0b0eac7e39cecd8f3e7882563062903d0f
|
[
"MIT"
] | 362
|
2016-09-08T16:25:30.000Z
|
2022-03-05T23:15:05.000Z
|
src/julia/tests/test_juliaoptions.py
|
dpinol/pyjulia
|
cec4bf0b0eac7e39cecd8f3e7882563062903d0f
|
[
"MIT"
] | 85
|
2016-11-08T09:32:44.000Z
|
2022-03-03T13:10:37.000Z
|
import pytest
from julia.core import JuliaOptions
# fmt: off
# fmt: off
| 28
| 65
| 0.655844
|
9e19326d841517afadd3d42542cc9b11a5c4a5d7
| 1,578
|
py
|
Python
|
tests/common.py
|
Algomorph/ext_argparse
|
fbca26f8a551f84677475a11fb5415ddda78abd9
|
[
"Apache-2.0"
] | 1
|
2021-09-06T23:22:07.000Z
|
2021-09-06T23:22:07.000Z
|
tests/common.py
|
Algomorph/ext_argparse
|
fbca26f8a551f84677475a11fb5415ddda78abd9
|
[
"Apache-2.0"
] | 11
|
2021-09-07T14:13:39.000Z
|
2021-09-29T15:17:46.000Z
|
tests/common.py
|
Algomorph/ext_argparse
|
fbca26f8a551f84677475a11fb5415ddda78abd9
|
[
"Apache-2.0"
] | null | null | null |
import os
import pathlib
import typing
import pytest
from ext_argparse.parameter import Parameter
from ext_argparse.param_enum import ParameterEnum
from enum import Enum
| 28.690909
| 116
| 0.693916
|
9e1b5f4b3183d1482047160b015715a1f35d97f0
| 389
|
py
|
Python
|
lambda/exercices/PhotoCollector/photo_uploader_from_csv.py
|
Mythridor/aws-scripting
|
5f978ae7f2b05a40862cbe35d766534fcc40fef0
|
[
"MIT"
] | null | null | null |
lambda/exercices/PhotoCollector/photo_uploader_from_csv.py
|
Mythridor/aws-scripting
|
5f978ae7f2b05a40862cbe35d766534fcc40fef0
|
[
"MIT"
] | null | null | null |
lambda/exercices/PhotoCollector/photo_uploader_from_csv.py
|
Mythridor/aws-scripting
|
5f978ae7f2b05a40862cbe35d766534fcc40fef0
|
[
"MIT"
] | null | null | null |
#! /usr/local/bin/Python3.5
import urllib.request
with open("images.csv", 'r') as csv:
i = 0
for line in csv:
line = line.split(',')
if line[1] != '' and line[1] != "\n":
urllib.request.urlretrieve(line[1].encode('utf-8'), ("img_" + str(i) + ".jpg").encode('utf-8'))
print("Image saved".encode('utf-8'))
i += 1
print("No result")
| 25.933333
| 107
| 0.524422
|
9e1b7c970fedf5252f5d2635e9703e31344e54e5
| 1,031
|
py
|
Python
|
src/main/tools/api.py
|
NGnius/streamq
|
aa31085befc7da2e3f7461698b2638a246a73eef
|
[
"MIT"
] | null | null | null |
src/main/tools/api.py
|
NGnius/streamq
|
aa31085befc7da2e3f7461698b2638a246a73eef
|
[
"MIT"
] | null | null | null |
src/main/tools/api.py
|
NGnius/streamq
|
aa31085befc7da2e3f7461698b2638a246a73eef
|
[
"MIT"
] | null | null | null |
'''
API-related functions in one spot for convenience
Created by NGnius 2019-06-15
'''
from flask import jsonify, request
from threading import Semaphore, RLock
single_semaphores = dict()
resource_lock = RLock()
| 26.435897
| 69
| 0.693501
|
9e1ba32a4a9094fb6f7a46a889dbdd7f780c6321
| 323
|
py
|
Python
|
run.py
|
kamidox/weixin_producthunt
|
24269da93e75374ee481b1b78257b18abda4d0c7
|
[
"BSD-3-Clause"
] | 10
|
2015-01-07T06:01:13.000Z
|
2021-02-14T09:11:10.000Z
|
run.py
|
kamidox/weixin_producthunt
|
24269da93e75374ee481b1b78257b18abda4d0c7
|
[
"BSD-3-Clause"
] | 3
|
2015-01-01T09:56:04.000Z
|
2015-01-06T01:34:44.000Z
|
run.py
|
kamidox/weixin_producthunt
|
24269da93e75374ee481b1b78257b18abda4d0c7
|
[
"BSD-3-Clause"
] | 5
|
2015-01-01T10:31:50.000Z
|
2018-03-09T05:22:16.000Z
|
"""
productporter
~~~~~~~~~~~~~~~~~~~~
helper for uwsgi
:copyright: (c) 2014 by the ProductPorter Team.
:license: BSD, see LICENSE for more details.
"""
from productporter.app import create_app
from productporter.configs.production import ProductionConfig
app = create_app(config=ProductionConfig())
| 23.071429
| 61
| 0.693498
|
9e1bee51dd0ea1878f4a4736c40b34f0977aa174
| 3,968
|
py
|
Python
|
built-in/PyTorch/Official/cv/image_classification/MobileNetV1_ID0094_for_PyTorch/benchmark.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 12
|
2020-12-13T08:34:24.000Z
|
2022-03-20T15:17:17.000Z
|
built-in/PyTorch/Official/cv/image_classification/MobileNetV1_ID0094_for_PyTorch/benchmark.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 1
|
2022-01-20T03:11:05.000Z
|
2022-01-20T06:53:39.000Z
|
built-in/PyTorch/Official/cv/image_classification/MobileNetV1_ID0094_for_PyTorch/benchmark.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 2
|
2021-07-10T12:40:46.000Z
|
2021-12-17T07:55:15.000Z
|
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
import time
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision.models as models
from torch.autograd import Variable
if __name__ == '__main__':
#cudnn.benchmark = True # This will make network slow ??
resnet18 = models.resnet18().npu()
alexnet = models.alexnet().npu()
vgg16 = models.vgg16().npu()
squeezenet = models.squeezenet1_0().npu()
mobilenet = MobileNet().npu()
speed(resnet18, 'resnet18')
speed(alexnet, 'alexnet')
speed(vgg16, 'vgg16')
speed(squeezenet, 'squeezenet')
speed(mobilenet, 'mobilenet')
| 35.115044
| 80
| 0.618952
|
9e1da62e19fe4f3008c5d21f24d0decbe6f6039d
| 1,012
|
py
|
Python
|
client/setup.py
|
nnabeyang/tepra-lite-esp32
|
69cbbafce6a3f8b0214178cc80d2fea024ab8c07
|
[
"MIT"
] | 33
|
2021-09-04T08:46:48.000Z
|
2022-02-04T08:12:55.000Z
|
client/setup.py
|
nnabeyang/tepra-lite-esp32
|
69cbbafce6a3f8b0214178cc80d2fea024ab8c07
|
[
"MIT"
] | 2
|
2021-09-28T12:05:21.000Z
|
2021-12-11T04:08:04.000Z
|
client/setup.py
|
nnabeyang/tepra-lite-esp32
|
69cbbafce6a3f8b0214178cc80d2fea024ab8c07
|
[
"MIT"
] | 2
|
2021-09-28T10:51:27.000Z
|
2021-12-10T09:56:22.000Z
|
from setuptools import setup, find_packages
__version__ = '1.0.0'
__author__ = 'Takumi Sueda'
__author_email__ = 'puhitaku@gmail.com'
__license__ = 'MIT License'
__classifiers__ = (
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
)
with open('README.md', 'r') as f:
readme = f.read()
setup(
name='tepracli',
version=__version__,
license=__license__,
author=__author__,
author_email=__author_email__,
url='https://github.com/puhitaku/tepra-lite-esp32/tree/master/client',
description='An example of tepra-lite-esp32 client / CLI',
long_description=readme,
long_description_content_type='text/markdown',
classifiers=__classifiers__,
packages=find_packages(),
package_data={'': ['assets/ss3.ttf']},
include_package_data=True,
install_requires=['click', 'pillow', 'qrcode[pil]', 'requests'],
)
| 29.764706
| 74
| 0.6917
|
9e1db727da394433477d2ebb717048f5a98a0ab1
| 962
|
py
|
Python
|
03.Trabalhando_com_Arquivos/003.path_arquivos.py
|
heliton1986/Descubra_Python
|
66738b295b0c5f526529ce0588fa3189eff110a1
|
[
"MIT"
] | null | null | null |
03.Trabalhando_com_Arquivos/003.path_arquivos.py
|
heliton1986/Descubra_Python
|
66738b295b0c5f526529ce0588fa3189eff110a1
|
[
"MIT"
] | null | null | null |
03.Trabalhando_com_Arquivos/003.path_arquivos.py
|
heliton1986/Descubra_Python
|
66738b295b0c5f526529ce0588fa3189eff110a1
|
[
"MIT"
] | null | null | null |
# Como trabalhar com paths
from os import path
import time
dadosArquivo()
| 31.032258
| 96
| 0.745322
|
9e210cf9cae77591487ca0d70ca7341aca8bd44a
| 16,303
|
py
|
Python
|
src/colorpredicate.py
|
petrusmassabki/color-predicate
|
828f62b50985cb795aa5b5743e4f7e5c305d2175
|
[
"MIT"
] | null | null | null |
src/colorpredicate.py
|
petrusmassabki/color-predicate
|
828f62b50985cb795aa5b5743e4f7e5c305d2175
|
[
"MIT"
] | null | null | null |
src/colorpredicate.py
|
petrusmassabki/color-predicate
|
828f62b50985cb795aa5b5743e4f7e5c305d2175
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import colorsys
import cv2
import numpy as np
from scipy.stats import multivariate_normal
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
| 37.825986
| 90
| 0.56235
|
9e2177e43f0a318b03307485c7498c4b6cef36fa
| 2,127
|
py
|
Python
|
src/environments/migrations/0001_initial.py
|
nixplay/bullet-train-api
|
608422d174443a4d9178d875ccaeb756a771e908
|
[
"BSD-3-Clause"
] | 1,259
|
2021-06-10T11:24:09.000Z
|
2022-03-31T10:30:44.000Z
|
src/environments/migrations/0001_initial.py
|
nixplay/bullet-train-api
|
608422d174443a4d9178d875ccaeb756a771e908
|
[
"BSD-3-Clause"
] | 392
|
2021-06-10T11:12:29.000Z
|
2022-03-31T10:13:53.000Z
|
src/environments/migrations/0001_initial.py
|
nixplay/bullet-train-api
|
608422d174443a4d9178d875ccaeb756a771e908
|
[
"BSD-3-Clause"
] | 58
|
2021-06-11T03:18:07.000Z
|
2022-03-31T14:39:10.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-25 15:41
from __future__ import unicode_literals
import app.utils
from django.db import migrations, models
import django.db.models.deletion
| 39.388889
| 99
| 0.487541
|
9e23d085a14f192cef141c0732be27df361cf10b
| 4,456
|
py
|
Python
|
tests/test_basic_train.py
|
maxwellmckinnon/fastai
|
b67bf7184ac2be1825697709051c5bcba058a40d
|
[
"Apache-2.0"
] | 1
|
2019-04-08T09:52:28.000Z
|
2019-04-08T09:52:28.000Z
|
tests/test_basic_train.py
|
maxwellmckinnon/fastai
|
b67bf7184ac2be1825697709051c5bcba058a40d
|
[
"Apache-2.0"
] | null | null | null |
tests/test_basic_train.py
|
maxwellmckinnon/fastai
|
b67bf7184ac2be1825697709051c5bcba058a40d
|
[
"Apache-2.0"
] | 1
|
2020-05-19T12:56:20.000Z
|
2020-05-19T12:56:20.000Z
|
"""
module: basic_train.py - Model fitting methods
docs : https://docs.fast.ai/train.html
"""
import pytest, fastai
from fastai.vision import *
from utils.fakes import *
from utils.text import *
from utils.mem import *
from fastai.utils.mem import *
from math import isclose
torch_preload_mem()
# this is not a fixture on purpose - the memory measurement tests are very sensitive, so
# they need to be able to get a fresh learn object and not one modified by other tests.
def learn_large_unfit(data):
learn = create_cnn(data, models.resnet18, metrics=accuracy)
return learn
#check_mem_expected = report_mem_real
#@pytest.mark.skip(reason="WIP")
| 38.08547
| 134
| 0.710727
|
9e246f8197a11b73a278225e329244b30642e5a1
| 196
|
py
|
Python
|
ch04/return.none.py
|
kxen42/Learn-Python-Programming-Third-Edition
|
851ddc5e6094fadd44f31a9ad1d3876456b04372
|
[
"MIT"
] | 19
|
2021-11-05T22:54:09.000Z
|
2022-03-29T15:03:47.000Z
|
ch04/return.none.py
|
kxen42/Learn-Python-Programming-Third-Edition
|
851ddc5e6094fadd44f31a9ad1d3876456b04372
|
[
"MIT"
] | null | null | null |
ch04/return.none.py
|
kxen42/Learn-Python-Programming-Third-Edition
|
851ddc5e6094fadd44f31a9ad1d3876456b04372
|
[
"MIT"
] | 26
|
2021-11-12T17:04:50.000Z
|
2022-03-29T01:10:35.000Z
|
# return.none.py
func() # the return of this call won't be collected. It's lost.
a = func() # the return of this one instead is collected into `a`
print(a) # prints: None
| 24.5
| 66
| 0.663265
|
9e24c2357a655395c364e4237fd2c11303d74334
| 204
|
py
|
Python
|
pyperi/__init__.py
|
takeontom/PyPeri
|
181b6c60bf5ec5c57cd24418ee4524ed81c9a998
|
[
"MIT"
] | 5
|
2017-03-10T10:43:07.000Z
|
2021-04-01T06:28:29.000Z
|
pyperi/__init__.py
|
takeontom/PyPeri
|
181b6c60bf5ec5c57cd24418ee4524ed81c9a998
|
[
"MIT"
] | 359
|
2016-12-12T20:19:16.000Z
|
2022-03-28T09:04:19.000Z
|
pyperi/__init__.py
|
takeontom/PyPeri
|
181b6c60bf5ec5c57cd24418ee4524ed81c9a998
|
[
"MIT"
] | 3
|
2018-08-12T13:38:30.000Z
|
2020-07-10T14:36:31.000Z
|
# -*- coding: utf-8 -*-
__author__ = """Tom Smith"""
__email__ = 'tom@takeontom.com'
__version__ = '0.2.0'
from pyperi.pyperi import Peri # noqa
from pyperi.pyperi import PyPeriConnectionError # noqa
| 22.666667
| 55
| 0.70098
|
f508079561b7a2a57df3ea9bb24da6c3cf24ed29
| 13,454
|
py
|
Python
|
examples/cartpole_example/test/cartpole_PID_MPC_sim.py
|
marcosfelt/sysid-neural-structures-fitting
|
80eda427251e8cce1d2a565b5cbca533252315e4
|
[
"MIT"
] | 17
|
2019-11-15T06:27:05.000Z
|
2021-10-02T14:24:25.000Z
|
examples/cartpole_example/test/cartpole_PID_MPC_sim.py
|
marcosfelt/sysid-neural-structures-fitting
|
80eda427251e8cce1d2a565b5cbca533252315e4
|
[
"MIT"
] | null | null | null |
examples/cartpole_example/test/cartpole_PID_MPC_sim.py
|
marcosfelt/sysid-neural-structures-fitting
|
80eda427251e8cce1d2a565b5cbca533252315e4
|
[
"MIT"
] | 4
|
2020-09-03T17:01:34.000Z
|
2021-11-05T04:09:24.000Z
|
import numpy as np
import scipy.sparse as sparse
from scipy.integrate import ode
from scipy.interpolate import interp1d
import time
import control
import control.matlab
import numpy.random
import pandas as pd
from ltisim import LinearStateSpaceSystem
from pendulum_model import *
from pyMPC.mpc import MPCController
# Reference model default parameters
k_def = 5.0
tau_def = 120e-3
Acl_c_def = np.array([[0,1,0], [0, 0, k_def], [0, 0, -1/tau_def]])
Bcl_c_def = np.array([[0],
[k_def],
[1/tau_def]
])
# PID default parameters
Ts_PID = 1e-3
# Reference trajectory
t_ref_vec = np.array([0.0, 5.0, 10.0, 20.0, 25.0, 30.0, 40.0, 100.0])
p_ref_vec = np.array([0.0, 0.0, 0.8, 0.8, 0.0, 0.0, 0.8, 0.8])
rp_fun = interp1d(t_ref_vec, p_ref_vec, kind='linear')
# MPC parameters
Ts_MPC_def = 10e-3
Qx_def = 1.0 * sparse.diags([1.0, 0, 10.0]) # Quadratic cost for states x0, x1, ..., x_N-1
QxN_def = Qx_def
Qr_def = 0.0 * sparse.eye(1) # Quadratic cost for u0, u1, ...., u_N-1
QDr_def = 1e-1 / (Ts_MPC_def ** 2) * sparse.eye(1) # Quadratic cost for Du0, Du1, ...., Du_N-1
# Defaults
DEFAULTS_PENDULUM_MPC = {
'xref_cl_fun': xref_cl_fun_def,
'uref': np.array([0.0]), # N
'std_npos': 0*0.001, # m
'std_nphi': 0*0.00005, # rad
'std_dF': 0.05, # N
'w_F':20, # rad
'len_sim': 40, #s
'Acl_c': Acl_c_def,
'Bcl_c': Bcl_c_def,
'Ts_MPC': Ts_MPC_def,
'Np': 100,
'Nc': 50,
'Qx': Qx_def,
'QxN': QxN_def,
'Qr': Qr_def,
'QDr': QDr_def,
'Q_kal': np.diag([0.1, 10, 0.1, 10]),
'R_kal': 1*np.eye(2),
'QP_eps_abs': 1e-3,
'QP_eps_rel': 1e-3,
'seed_val': None
}
def get_default_parameters(sim_options):
""" Which parameters are left to default ??"""
default_keys = [key for key in DEFAULTS_PENDULUM_MPC if key not in sim_options]
return default_keys
if __name__ == '__main__':
import matplotlib.pyplot as plt
import matplotlib
plt.close('all')
simopt = DEFAULTS_PENDULUM_MPC
time_sim_start = time.perf_counter()
simout = simulate_pendulum_MPC(simopt)
time_sim = time.perf_counter() - time_sim_start
t = simout['t']
x = simout['x']
u = simout['u']
y = simout['y']
y_meas = simout['y_meas']
x_ref = simout['x_ref']
x_fast = simout['x_fast']
y_meas_fast = simout['y_meas_fast']
u_fast = simout['u_fast']
x_model = simout['x_model']
t_fast = simout['t_fast']
x_ref_fast = simout['x_ref_fast']
F_input = simout['Fd_fast']
status = simout['status']
ref_phi_fast = simout['ref_phi_fast']
uref = get_parameter(simopt, 'uref')
nsim = len(t)
nx = x.shape[1]
ny = y.shape[1]
y_ref = x_ref[:, [0, 2]]
fig,axes = plt.subplots(4,1, figsize=(10,10), sharex=True)
axes[0].plot(t, y_meas[:, 0], "b", label='p_meas')
axes[0].plot(t_fast, x_fast[:, 0], "k", label='p')
axes[0].plot(t, x_model[:, 0], "r", label='p model')
axes[0].plot(t, x_ref[:, 0], "k--", label='p reference')
axes[0].set_ylim(-2.0,2.0)
axes[0].set_title("Position (m)")
axes[1].plot(t_fast, x_fast[:, 1], "k", label='v')
axes[1].plot(t, x_model[:, 1], "r", label='v model')
axes[1].set_ylim(-3,3.0)
axes[1].set_title("Speed (m/s)")
axes[2].plot(t, y_meas[:, 1]*RAD_TO_DEG, "b", label='phi_meas')
axes[2].plot(t_fast, x_fast[:, 2]*RAD_TO_DEG, 'k', label="phi")
axes[2].plot(t, x_model[:, 2]*RAD_TO_DEG, "r", label='phi model')
axes[2].plot(t_fast, ref_phi_fast[:,0]*RAD_TO_DEG, "k--", label="phi_ref")
axes[2].set_ylim(-20,20)
axes[2].set_title("Angle (deg)")
axes[3].plot(t, u[:,0], label="F")
axes[3].plot(t_fast, F_input, "k", label="Fd")
axes[3].plot(t, uref*np.ones(np.shape(t)), "r--", label="F_ref")
axes[3].set_ylim(-20,20)
axes[3].set_title("Force (N)")
for ax in axes:
ax.grid(True)
ax.legend()
X = np.hstack((t_fast, x_fast, u_fast, y_meas_fast, F_input))
COL_T = ['time']
COL_X = ['p', 'v', 'theta', 'omega']
COL_U = ['u']
COL_D = ['d']
COL_Y = ['p_meas', 'theta_meas']
COL = COL_T + COL_X + COL_U + COL_Y + COL_D
df_X = pd.DataFrame(X, columns=COL)
df_X.to_csv("pendulum_data_PID.csv", index=False)
| 33.219753
| 179
| 0.612829
|
f508880d35de4e5c61d56d0000d4001cccea0293
| 163
|
py
|
Python
|
server/controllers/main.py
|
eruixma/trading-app
|
3db3bbd27fdc405b9ecb41d9e68330899a3dcc6a
|
[
"BSD-2-Clause"
] | 2
|
2019-03-27T04:46:29.000Z
|
2019-10-10T13:03:24.000Z
|
server/controllers/main.py
|
eruixma/trading-app
|
3db3bbd27fdc405b9ecb41d9e68330899a3dcc6a
|
[
"BSD-2-Clause"
] | 1
|
2021-06-01T23:31:21.000Z
|
2021-06-01T23:31:21.000Z
|
server/controllers/main.py
|
eruixma/trading-app
|
3db3bbd27fdc405b9ecb41d9e68330899a3dcc6a
|
[
"BSD-2-Clause"
] | 1
|
2021-08-19T03:09:05.000Z
|
2021-08-19T03:09:05.000Z
|
from flask import Blueprint, current_app
main = Blueprint('main', __name__)
| 16.3
| 53
| 0.723926
|
f508dd3ad06395335728ce6e7db17f8e899fd2f6
| 2,221
|
py
|
Python
|
pypad/collab.py
|
candyninja001/pypad
|
82bfc104c2524ca54cc415d37d2c21fec471838f
|
[
"MIT"
] | null | null | null |
pypad/collab.py
|
candyninja001/pypad
|
82bfc104c2524ca54cc415d37d2c21fec471838f
|
[
"MIT"
] | null | null | null |
pypad/collab.py
|
candyninja001/pypad
|
82bfc104c2524ca54cc415d37d2c21fec471838f
|
[
"MIT"
] | null | null | null |
from enum import Enum
from .dev import Dev
| 24.677778
| 61
| 0.662765
|
f50910b14f5b09655a9e1eaecc696a5cfe950b0f
| 4,923
|
py
|
Python
|
settings.py
|
msetzu/data-mining
|
9e01d00964004dea4a2aea88dfe855f785302ef1
|
[
"MIT"
] | 1
|
2018-10-09T14:41:59.000Z
|
2018-10-09T14:41:59.000Z
|
settings.py
|
msetzu/data-mining
|
9e01d00964004dea4a2aea88dfe855f785302ef1
|
[
"MIT"
] | null | null | null |
settings.py
|
msetzu/data-mining
|
9e01d00964004dea4a2aea88dfe855f785302ef1
|
[
"MIT"
] | null | null | null |
import pandas as pd
from matplotlib.colors import LinearSegmentedColormap
# Dataset
data = pd.read_csv("./hr.csv")
entries = len(data)
bins = 10
# Data analysis
analysis = {
"bins": 10,
"balance_threshold": 0.1
}
# Plot labels
labels = ["satisfaction_level",
"average_montly_hours",
"last_evaluation",
"time_spend_company",
"number_project",
"Work_accident",
"left",
"promotion_last_5years",
"sales",
"salary"]
pretty_prints = ["Self-reported satisfaction",
"AVG Monthly hours",
"Time since last valuation, in years",
"Time in company, in years",
"Projects",
"Accidents",
"Left",
"Promoted (last 5 years)",
"Department",
"Salary"]
short_pretty_prints = ["Injuries",
"Work hours",
"Last evaluation",
"Left",
"Projects",
"Promotion",
"Wage",
"Satisfaction",
"Years in company",
"Dpt."]
departments_pretty_prints = ["Information Technology",
"R&D",
"Accounting",
"Human Resources",
"Management",
"Marketing",
"Product Management",
"Sales",
"Support",
"Technical"]
labels_pretty_print = {k: v for k, v in zip(labels, pretty_prints)}
short_labels_pretty_print = {k: v for k, v in zip(labels, short_pretty_prints)}
labels_pretty_print["salary_int"] = "Salary"
continuous_labels = labels[0:2]
discrete_labels = labels[2:5]
categorical_labels = labels[5:-1]
ordinal_labels = labels[-1:]
correlated_labels = continuous_labels + discrete_labels + ["salary_int"]
categorical_labels_pretty_prints = {
"Work_accident": ("Not Injured", "Injured"),
"left": ("Stayed", "Left"),
"promotion_last_5years": ("Not promoted", "Promoted"),
"sales": tuple(departments_pretty_prints)
}
ordinal_labels_pretty_prints = {
"salary": ("Low", "Medium", "High"),
}
ordered_ordinal_vars = {
"salary": ["low", "medium", "high"]
}
departments = set(data["sales"])
# Scatter plot
scatter = {
"sampling_size": 100, # size of each sample
"samples": 5, # number of samples to extract
"edge_bins": 1, # edge bins possibly containing outliers
"bins": 10,
"replace": True
}
clusetering_types = ["normal", "discrete", "raw"]
# Graphs
palette = {
"main": "#FE4365",
"complementary": "#FC9D9A",
"pr_complementary": "#F9CDAD",
"sc_complementary": "#C8C8A9",
"secondary": "#83AF9B"
}
round_palette = {
"main": palette["secondary"],
"secondary": palette["complementary"],
"pr_complementary": palette["sc_complementary"],
"sc_complementary": palette["secondary"]
}
large_palette = {
"navy": "#001f3f",
"blue": "#0074D9",
"green": "#2ECC40",
"olive": "#3D9970",
"orange": "#FF851B",
"yellow": "#FFDC00",
"red": "#FF4136",
"maroon": "#85144b",
"black": "#111111",
"grey": "#AAAAAA"
}
large_palette_full = {
"navy": "#001f3f",
"blue": "#0074D9",
"aqua": "#7FDBFF",
"teal": "#39CCCC",
"olive": "#3D9970",
"green": "#2ECC40",
"lime": "#01FF70",
"yellow": "#FFDC00",
"orange": "#FF851B",
"red": "#FF4136",
"maroon": "#85144b",
"fuchsia": "#F012BE",
"purple": "#B10DC9",
"black": "#111111",
"grey": "#AAAAAA",
"silver": "#DDDDDD"
}
large_palette_stacked = {
"navy": "#001f3f",
"blue": "#0074D9",
"olive": "#3D9970",
"orange": "#FF851B",
"green": "#2ECC40",
"yellow": "#FFDC00",
"red": "#FF4136",
"maroon": "#85144b",
"black": "#111111",
"grey": "#AAAAAA",
"stack": large_palette["orange"]
}
cmap_pale_pink = LinearSegmentedColormap.from_list("Pale pink",
[palette["pr_complementary"], palette["main"]],
N=1000000)
cmap_pale_pink_and_green = LinearSegmentedColormap.from_list("Pale pink&green",
[palette["main"],
palette["complementary"],
palette["pr_complementary"],
palette["sc_complementary"],
palette["secondary"]],
N=1000000)
| 30.018293
| 98
| 0.487508
|
f509ca15e0e12b426c5e187595364f7eea92a920
| 397
|
py
|
Python
|
GCD - Euclidean (Basic)/Python3/gcdEuclid.py
|
i-vishi/ds-and-algo
|
90a8635db9570eb17539201be29ec1cfd4b5ae18
|
[
"MIT"
] | 1
|
2021-03-01T04:15:08.000Z
|
2021-03-01T04:15:08.000Z
|
GCD - Euclidean (Basic)/Python3/gcdEuclid.py
|
i-vishi/ds-and-algo
|
90a8635db9570eb17539201be29ec1cfd4b5ae18
|
[
"MIT"
] | null | null | null |
GCD - Euclidean (Basic)/Python3/gcdEuclid.py
|
i-vishi/ds-and-algo
|
90a8635db9570eb17539201be29ec1cfd4b5ae18
|
[
"MIT"
] | null | null | null |
# Author: Vishal Gaur
# Created: 17-01-2021 20:31:34
# function to find GCD using Basic Euclidean Algorithm
# Driver Code to test above function
a = 14
b = 35
g = gcdEuclid(a, b)
print("GCD of", a, "&", b, "is: ", g)
a = 56
b = 125
g = gcdEuclid(a, b)
print("GCD of", a, "&", b, "is: ", g)
| 17.26087
| 54
| 0.566751
|
f50a95d4fbb66571658a68aa0a66854f9c5c4220
| 437
|
py
|
Python
|
src/my_package/todelete/modules/MotionSymmetryModule.py
|
laomao0/AIM_DAIN
|
8322569498d675d3b2c1f35475c1299cad580bde
|
[
"MIT"
] | 3
|
2020-05-08T20:45:57.000Z
|
2021-01-18T11:32:38.000Z
|
src/my_package/todelete/modules/MotionSymmetryModule.py
|
laomao0/AIM_DAIN
|
8322569498d675d3b2c1f35475c1299cad580bde
|
[
"MIT"
] | null | null | null |
src/my_package/todelete/modules/MotionSymmetryModule.py
|
laomao0/AIM_DAIN
|
8322569498d675d3b2c1f35475c1299cad580bde
|
[
"MIT"
] | null | null | null |
# modules/InterpolationLayer.py
from torch.nn import Module
from functions.MotionSymmetryLayer import MotionSymmetryLayer
| 29.133333
| 81
| 0.741419
|
f50adc78c47a350acb70a96ee1ecc3c1259e77a6
| 2,997
|
py
|
Python
|
steer.py
|
nlw0/pyPyrTools
|
91fc0932906054f6d43a32a205069aa25d884545
|
[
"MIT"
] | 1
|
2020-10-13T14:40:39.000Z
|
2020-10-13T14:40:39.000Z
|
steer.py
|
umeshraj/pyPyrTools
|
91fc0932906054f6d43a32a205069aa25d884545
|
[
"MIT"
] | null | null | null |
steer.py
|
umeshraj/pyPyrTools
|
91fc0932906054f6d43a32a205069aa25d884545
|
[
"MIT"
] | 1
|
2018-04-26T10:06:47.000Z
|
2018-04-26T10:06:47.000Z
|
import numpy
from steer2HarmMtx import steer2HarmMtx
def steer(*args):
''' Steer BASIS to the specfied ANGLE.
function res = steer(basis,angle,harmonics,steermtx)
BASIS should be a matrix whose columns are vectorized rotated copies
of a steerable function, or the responses of a set of steerable filters.
ANGLE can be a scalar, or a column vector the size of the basis.
HARMONICS (optional, default is N even or odd low frequencies, as for
derivative filters) should be a list of harmonic numbers indicating
the angular harmonic content of the basis.
STEERMTX (optional, default assumes cosine phase harmonic components,
and filter positions at 2pi*n/N) should be a matrix which maps
the filters onto Fourier series components (ordered [cos0 cos1 sin1
cos2 sin2 ... sinN]). See steer2HarmMtx.m
Eero Simoncelli, 7/96. Ported to Python by Rob Young, 5/14. '''
if len(args) < 2:
print 'Error: input parameters basis and angle are required!'
return
basis = args[0]
num = basis.shape[1]
angle = args[1]
if isinstance(angle, (int, long, float)):
angle = numpy.array([angle])
else:
if angle.shape[0] != basis.shape[0] or angle.shape[1] != 1:
print 'ANGLE must be a scalar, or a column vector the size of the basis elements'
return
# If HARMONICS are not passed, assume derivatives.
if len(args) < 3:
if num%2 == 0:
harmonics = numpy.array(range(num/2))*2+1
else:
harmonics = numpy.array(range((15+1)/2))*2
else:
harmonics = args[2]
if len(harmonics.shape) == 1 or harmonics.shape[0] == 1:
# reshape to column matrix
harmonics = harmonics.reshape(harmonics.shape[0], 1)
elif harmonics.shape[0] != 1 and harmonics.shape[1] != 1:
print 'Error: input parameter HARMONICS must be 1D!'
return
if 2*harmonics.shape[0] - (harmonics == 0).sum() != num:
print 'harmonics list is incompatible with basis size!'
return
# If STEERMTX not passed, assume evenly distributed cosine-phase filters:
if len(args) < 4:
steermtx = steer2HarmMtx(harmonics,
numpy.pi*numpy.array(range(num))/num,
'even')
else:
steermtx = args[3]
steervect = numpy.zeros((angle.shape[0], num))
arg = angle * harmonics[numpy.nonzero(harmonics)[0]].T
if all(harmonics):
steervect[:, range(0,num,2)] = numpy.cos(arg)
steervect[:, range(1,num,2)] = numpy.sin(arg)
else:
steervect[:, 1] = numpy.ones((arg.shape[0],1))
steervect[:, range(0,num,2)] = numpy.cos(arg)
steervect[:, range(1,num,2)] = numpy.sin(arg)
steervect = numpy.dot(steervect,steermtx)
if steervect.shape[0] > 1:
tmp = numpy.dot(basis, steervect)
res = sum(tmp).T
else:
res = numpy.dot(basis, steervect.T)
return res
| 34.448276
| 93
| 0.621288
|
f50baf297616723fd430fcda467e665dcd88c479
| 2,057
|
py
|
Python
|
pydest/pydest.py
|
henworth/pydest
|
9537696c39f36f8250082891ddcc0198142d22eb
|
[
"MIT"
] | null | null | null |
pydest/pydest.py
|
henworth/pydest
|
9537696c39f36f8250082891ddcc0198142d22eb
|
[
"MIT"
] | null | null | null |
pydest/pydest.py
|
henworth/pydest
|
9537696c39f36f8250082891ddcc0198142d22eb
|
[
"MIT"
] | null | null | null |
import aiohttp
import asyncio
import os
import zipfile
from pydest.api import API
from pydest.manifest import Manifest
| 29.811594
| 96
| 0.619349
|
f50ea0f4e8fc7432c99f5e054323909249a31983
| 784
|
py
|
Python
|
vaccinate/core/migrations/0120_geography_fields.py
|
MoralCode/vial
|
cdaaab053a9cf1cef40104a2cdf480b7932d58f7
|
[
"MIT"
] | 7
|
2021-06-28T17:33:47.000Z
|
2022-02-12T21:54:59.000Z
|
vaccinate/core/migrations/0120_geography_fields.py
|
MoralCode/vial
|
cdaaab053a9cf1cef40104a2cdf480b7932d58f7
|
[
"MIT"
] | 104
|
2021-06-17T21:25:30.000Z
|
2022-03-28T14:21:57.000Z
|
vaccinate/core/migrations/0120_geography_fields.py
|
MoralCode/vial
|
cdaaab053a9cf1cef40104a2cdf480b7932d58f7
|
[
"MIT"
] | 1
|
2021-06-25T17:52:23.000Z
|
2021-06-25T17:52:23.000Z
|
# Generated by Django 3.2.1 on 2021-05-06 22:30
import django.contrib.gis.db.models.fields
from django.db import migrations
| 27.034483
| 68
| 0.58801
|
f50f1a90c240661a8974cc7923b38f46dce70bae
| 29,856
|
py
|
Python
|
views.py
|
milos-korenciak/2018.ossconf.sk
|
f121dde4f313a207e39c2f2e187bdad046b86592
|
[
"MIT"
] | 7
|
2017-07-16T05:59:07.000Z
|
2018-01-22T09:35:21.000Z
|
views.py
|
milos-korenciak/2018.ossconf.sk
|
f121dde4f313a207e39c2f2e187bdad046b86592
|
[
"MIT"
] | 17
|
2017-07-31T20:35:24.000Z
|
2018-02-26T22:00:12.000Z
|
views.py
|
milos-korenciak/2018.ossconf.sk
|
f121dde4f313a207e39c2f2e187bdad046b86592
|
[
"MIT"
] | 13
|
2017-08-01T17:03:40.000Z
|
2021-11-02T13:24:30.000Z
|
#!/usr/bin/python
# -*- coding: utf8 -*-
import os
import re
import textwrap
import requests
import unicodedata
from datetime import datetime, timedelta
from flask import Flask, g, request, render_template, abort, make_response
from flask_babel import Babel, gettext
from jinja2 import evalcontextfilter, Markup
app = Flask(__name__, static_url_path='/static')
app.config['BABEL_DEFAULT_LOCALE'] = 'sk'
app.jinja_options = {'extensions': ['jinja2.ext.with_', 'jinja2.ext.i18n']}
babel = Babel(app)
EVENT = gettext('PyCon SK 2018')
DOMAIN = 'https://2018.pycon.sk'
API_DOMAIN = 'https://api.pycon.sk'
LANGS = ('en', 'sk')
TIME_FORMAT = '%Y-%m-%dT%H:%M:%S+00:00'
NOW = datetime.utcnow().strftime(TIME_FORMAT)
SRC_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__)))
LOGO_PYCON = 'logo/pycon_logo_square.svg'
LDJSON_SPY = {
"@type": "Organization",
"name": "SPy o. z.",
"url": "https://spy.pycon.sk",
"logo": "https://spy.pycon.sk/img/logo/spy-logo.png",
"sameAs": [
"https://facebook.com/pyconsk",
"https://twitter.com/pyconsk",
"https://www.linkedin.com/company/spy-o--z-",
"https://github.com/pyconsk",
]
}
LDJSON_PYCON = {
"@context": "http://schema.org",
"@type": "Event",
"name": EVENT,
"description": gettext("PyCon will be back at Slovakia in 2018 again. PyCon SK is a community-organized conference "
"for the Python programming language."),
"startDate": "2018-03-09T9:00:00+01:00",
"endDate": "2018-03-11T18:00:00+01:00",
"image": DOMAIN + "/static/img/logo/pycon_long_2018.png",
"location": {
"@type": "Place",
"name": "FIIT STU",
"address": {
"@type": "PostalAddress",
"streetAddress": "Ilkoviova 2",
"addressLocality": "Bratislava 4",
"postalCode": "842 16",
"addressCountry": gettext("Slovak Republic")
},
},
"url": DOMAIN,
"workPerformed": {
"@type": "CreativeWork",
"name": EVENT,
"creator": LDJSON_SPY
}
}
# calendar settings
ICAL_LEN = 70 # length of a calendar (ical) line
ICAL_NL = '\\n\n' # calendar newline
IGNORE_TALKS = ['Break', 'Coffee Break']
TYPE = {
'talk': gettext('Talk'),
'workshop': gettext('Workshop'),
}
TAGS = {
'ai': gettext('Machine Learning / AI'),
'community': gettext('Community / Diversity / Social'),
'data': gettext('Data Science'),
'devops': 'DevOps',
'docs': gettext('Documentation'),
'edu': gettext('Education'),
'generic': gettext('Python General'),
'security': gettext('Security'),
'softskills': gettext('Soft Skills'),
'hardware': gettext('Hardware'),
'web': gettext('Web Development'),
'other': gettext('Other'),
}
FRIDAY_START = datetime(2018, 3, 9, hour=9)
SATURDAY_START = datetime(2018, 3, 10, hour=9)
SUNDAY_START = datetime(2018, 3, 11, hour=10, minute=15)
FRIDAY_TRACK1 = (
{"pause": 5, 'title': gettext("Conference Opening"), 'duration': 25, 'flag': 'other', 'type': 'talk'},
{"pause": 15, 'title': gettext("FaaS and Furious - Zero to Serverless in 60 seconds - Anywhere")},
{"pause": 15, 'title': gettext("Docs or it didn't happen")},
{"pause": 5, 'title': gettext("GraphQL is the new black")},
{"pause": 60, 'title': gettext("To the Google in 80 Days")},
{"pause": 5, 'title': gettext("Unsafe at Any Speed")},
{"pause": 15, 'title': gettext("Protecting Privacy and Security For Yourself and Your Community")},
{"pause": 5, 'title': gettext("ZODB: The Graph database for Python Developers.")},
{"pause": 15, 'title': gettext("Differentiable programming in Python and Gluon for (not only medical) image analysis")},
{"pause": 5, 'title': gettext("Vim your Python, Python your Vim")},
)
FRIDAY_TRACK2 = (
{"pause": 5, 'title': gettext("Conference Opening in Kiwi.com Hall"), 'duration': 25},
{"pause": 5, 'title': gettext("Python Days in Martin and follow-up activities")},
{"pause": 15, 'title': gettext("Python programming till graduation")},
{"pause": 5, 'title': gettext("Open educational resources for learning Python")},
{"pause": 60, 'title': gettext("About Ninjas and Mentors: CoderDojo in Slovakia")},
{"pause": 5, 'title': gettext("Community based courses")},
{"pause": 15, 'title': gettext("How do we struggle with Python in Martin?")},
{"pause": 5, 'title': gettext("Why hardware attracts kids and adults to IT")},
{"pause": 5, 'title': gettext("Panel discussion: Teaching IT in Slovakia - where is it heading?")},
{"pause": 5, 'title': gettext("EDU Talks"), 'duration': 30, 'language': 'SK', 'flag': 'edu', 'type': 'talk'},
)
FRIDAY_WORKSHOPS1 = (
{"pause": 10, 'title': gettext("How to create interactive maps in Python / R")},
{"pause": 60, 'title': gettext("Working with XML")},
{"pause": 5, 'title': gettext("Managing high-available applications in production")},
)
FRIDAY_WORKSHOPS2 = (
{"pause": 40, 'title': gettext("Workshop: An Introduction to Ansible")},
{"pause": 5, 'title': gettext("Introduction to Machine Learning with Python")},
)
FRIDAY_HALLWAY = (
{"pause": 0, 'title': gettext("OpenPGP key-signing party"), 'duration': 30, 'link': 'https://github.com/pyconsk/2018.pycon.sk/tree/master/openpgp-key-signing-party', 'flag': 'security'},
)
SATURDAY_TRACK1 = (
{"pause": 5, 'title': gettext("Conference Opening"), 'duration': 25, 'flag': 'other', 'type': 'talk'},
{"pause": 5, 'title': gettext("Solutions Reviews")},
{"pause": 15, 'title': gettext("Campaign Automation & Abusing Celery Properly")},
{"pause": 5, 'title': gettext("The Truth about Mastering Big Data")},
{"pause": 5, 'title': gettext("Industrial Machine Learning: Building scalable distributed machine learning pipelines with Python")},
{"pause": 25, 'title': gettext("Programming contest Semi finale"), 'duration': 30, 'flag': 'other', 'link': 'https://app.pycon.sk'},
{"pause": 5, 'title': gettext("Pythonic code, by example")},
{"pause": 15, 'title': gettext("Our DevOps journey, is SRE the next stop?")},
{"pause": 5, 'title': gettext("Implementing distributed systems with Consul")},
{"pause": 15, 'title': gettext("Designing fast and scalable Python MicroServices with django")},
{"pause": 5, 'title': gettext("When your wetware has too many threads - Tips from an ADHDer on how to improve your focus")},
{"pause": 5, 'title': gettext("Programming Python as performance: live coding with FoxDot")},
{"pause": 5, 'title': gettext("Programming Contest Grand Finale"), 'duration': 30, 'flag': 'other', 'type': 'talk', 'language': 'EN'},
{"pause": 5, 'title': gettext("Lightning Talks"), 'duration': 45, 'flag': 'other', 'type': 'talk'},
)
SATURDAY_TRACK2 = (
{"pause": 5, 'title': gettext("Conference Opening in Kiwi.com Hall"), 'duration': 25},
{"pause": 5, 'title': gettext("Meteo data in Python. Effectively.")},
{"pause": 15, 'title': gettext("Around the World in 30 minutes")},
{"pause": 5, 'title': gettext("LOCKED SHIELDS: What a good cyber testing looks like")},
{"pause": 60, 'title': gettext("Kiwi.com in ZOO")},
{"pause": 5, 'title': gettext("Keynote in Kiwi.com Hall"), 'duration': 30, 'flag': 'generic', 'type': 'talk'},
{"pause": 15, 'title': gettext("Skynet your Infrastructure with QUADS")},
{"pause": 5, 'title': gettext("Automated network OS testing")},
{"pause": 15, 'title': gettext("Tools to interact with Bitcoin and Ethereum")},
{"pause": 5, 'title': gettext("7 Steps to a Clean Issue Tracker")},
{"pause": 5, 'title': gettext("The Concierge Paradigm")},
)
SATURDAY_WORKSHOPS1 = (
{"pause": 55, 'title': gettext("Effectively running python applications in Kubernetes/OpenShift")},
{"pause": 5, 'title': gettext("Roboworkshop")},
)
SATURDAY_WORKSHOPS2 = (
{"pause": 55, 'title': gettext("Microbit:Slovakia")},
{"pause": 5, 'title': gettext("Coding in Python: A high-school programming lesson")},
)
SATURDAY_HALLWAY1 = (
{"pause": 0, 'title': gettext("Pandas documentation sprint"), 'duration': 360, 'link': 'https://python-sprints.github.io/pandas/', 'flag': 'docs'},
)
SATURDAY_HALLWAY2 = (
{"pause": 145, 'title': gettext("Programming contest"), 'duration': 95, 'flag': 'other', 'link': 'https://app.pycon.sk'},
{"pause": 5, 'title': gettext("Conference organizers meetup"), 'duration': 30, 'flag': 'community'},
)
SUNDAY_TRACK1 = (
{"pause": 5, 'title': gettext("Charon and the way out from a pickle hell")},
{"pause": 15, 'title': gettext("Making Python Behave")},
{"pause": 5, 'title': gettext("Secret information about the code we write")},
{"pause": 60, 'title': gettext("How to connect objects with each other in different situations with Pythonic ways - association, aggregation, composition and etc.")},
{"pause": 5, 'title': gettext("APIs: Gateway to world's data")},
{"pause": 15, 'title': gettext("Getting started with HDF5 and PyTables")},
{"pause": 5, 'title': gettext("Real-time personalized recommendations using embeddings")},
{"pause": 5, 'title': gettext("Quiz"), 'duration': 30, 'flag': 'other', 'type': 'talk'},
)
SUNDAY_WORKSHOPS1 = (
{"pause": 40, 'title': gettext("Real-time transcription and sentiment analysis of audio streams; on the phone and in the browser")},
{"pause": 5, 'title': gettext("Learn MongoDB by modeling PyPI in a document database")},
)
SUNDAY_WORKSHOPS2 = (
{"pause": 15, 'title': gettext("Testing Essentials for Scientists and Engineers")},
{"pause": 5, 'title': gettext("Cython: Speed up your code without going insane")},
)
SUNDAY_WORKSHOPS3 = (
{"pause": 15, 'title': gettext("Meet the pandas")},
{"pause": 5, 'title': gettext("Serverless with OpenFaaS and Python")},
)
SUNDAY_WORKSHOPS4 = (
{"pause": 5, 'title': gettext("Django Girls"), 'duration': 540, 'flag': 'web', 'type': 'workshop'},
)
SUNDAY_HALLWAY = (
{"pause": 5, 'title': gettext("Documentation clinic/helpdesk")},
)
AULA1 = {
'name': gettext('Kiwi.com Hall'),
'number': '-1.61',
}
AULA2 = {
'name': gettext('Python Software Foundation Hall'),
'number': '-1.65',
}
AULA3 = {
'name': gettext('SPy - Hall A'),
'number': '-1.57',
}
AULA4 = {
'name': gettext('SPy - Hall B'),
'number': '-1.57',
}
AULA5 = {
'name': gettext('Django Girls Auditorium'),
'number': '+1.31',
}
HALLWAY = {
'name': gettext('Hallway'),
'number': '',
}
def get_conference_data(url='', filters=''):
"""Connect to API and get public talks and speakers data."""
url = API_DOMAIN + url
if filters:
url = url + '&' + filters
r = requests.get(url)
return r.json()
API_DATA_SPEAKERS = get_conference_data(url='/event/2018/speakers/')
API_DATA_TALKS = get_conference_data(url='/event/2018/talks/')
def _get_template_variables(**kwargs):
"""Collect variables for template that repeats, e.g. are in body.html template"""
lang = get_locale()
variables = {
'title': EVENT,
'logo': LOGO_PYCON, # TODO: Do we need this?
'ld_json': LDJSON_PYCON
}
variables['ld_json']['url'] = DOMAIN + '/' + lang + '/'
variables.update(kwargs)
if 'current_lang' in g:
variables['lang_code'] = g.current_lang
else:
variables['lang_code'] = app.config['BABEL_DEFAULT_LOCALE']
return variables
def generate_track(api_data, track_data, start, flag=None):
"""Helper function to mix'n'match API data, with schedule order defined here, to generate schedule dict"""
template_track_data = []
for talk in track_data:
# Check if talk is in API
talk_api_data = next((item for item in api_data if item['title'] == talk['title']), None)
# If talk is not in API data we'll use text from track_data dict == same structure for template generation
if not talk_api_data:
talk_api_data = talk
if not flag or ('flag' in talk_api_data and flag == talk_api_data['flag']):
# Store data to be displayed in template
template_track_data.append({
"start": start,
"talk": talk_api_data
})
start = start + timedelta(minutes=talk_api_data.get('duration', 0))
# start = start + timedelta(minutes=talk_api_data['duration'])
if not flag:
# Generate break
break_name = gettext('Break')
if talk['pause'] in (40, 60):
break_name = gettext('Lunch ')
if talk['pause'] in (15, 20):
break_name = gettext('Coffee Break ')
template_track_data.append({
'start': start,
'talk': {'title': break_name},
'css': 'break'
})
start = start + timedelta(minutes=talk['pause']) # break time does not comes from API always defined in track
return template_track_data
def generate_schedule(api_data, flag=None):
return [
{
'room': AULA1,
'start': FRIDAY_START,
'schedule': generate_track(api_data, FRIDAY_TRACK1, FRIDAY_START, flag=flag),
'day': 'friday',
'block_start': True,
},
{
'room': AULA2,
'start': FRIDAY_START,
'schedule': generate_track(api_data, FRIDAY_TRACK2, FRIDAY_START, flag=flag),
'day': 'friday'
},
{
'room': AULA3,
'start': FRIDAY_START,
'schedule': generate_track(api_data, FRIDAY_WORKSHOPS1, FRIDAY_START+timedelta(minutes=30), flag=flag),
'day': 'friday'
},
{
'room': AULA4,
'start': FRIDAY_START,
'schedule': generate_track(api_data, FRIDAY_WORKSHOPS2, FRIDAY_START+timedelta(minutes=30), flag=flag),
'day': 'friday',
},
{
'room': HALLWAY,
'start': FRIDAY_START+timedelta(minutes=395),
'schedule': generate_track(api_data, FRIDAY_HALLWAY, FRIDAY_START+timedelta(minutes=395), flag=flag),
'day': 'saturday',
'block_end': True,
},
{
'room': AULA1,
'start': SATURDAY_START,
'schedule': generate_track(api_data, SATURDAY_TRACK1, SATURDAY_START, flag=flag),
'day': 'saturday',
'block_start': True,
},
{
'room': AULA2,
'start': SATURDAY_START,
'schedule': generate_track(api_data, SATURDAY_TRACK2, SATURDAY_START, flag=flag),
'day': 'saturday'
},
{
'room': AULA3,
'start': SATURDAY_START,
'schedule': generate_track(api_data, SATURDAY_WORKSHOPS1, SATURDAY_START+timedelta(minutes=30), flag=flag),
'day': 'saturday'
},
{
'room': AULA4,
'start': SATURDAY_START,
'schedule': generate_track(api_data, SATURDAY_WORKSHOPS2, SATURDAY_START+timedelta(minutes=30), flag=flag),
'day': 'saturday'
},
{
'room': HALLWAY,
'start': SATURDAY_START+timedelta(minutes=60),
'schedule': generate_track(api_data, SATURDAY_HALLWAY1, SATURDAY_START+timedelta(minutes=60), flag=flag),
'day': 'saturday',
},
{
'room': HALLWAY,
'start': SATURDAY_START+timedelta(minutes=30),
'schedule': generate_track(api_data, SATURDAY_HALLWAY2, SATURDAY_START+timedelta(minutes=30), flag=flag),
'day': 'saturday',
'block_end': True,
},
{
'room': AULA1,
'start': SUNDAY_START,
'schedule': generate_track(api_data, SUNDAY_TRACK1, SUNDAY_START, flag=flag),
'day': 'sunday',
'block_start': True,
},
{
'room': AULA2,
'start': SUNDAY_START,
'schedule': generate_track(api_data, SUNDAY_WORKSHOPS1, SUNDAY_START, flag=flag),
'day': 'sunday'
},
{
'room': AULA3,
'start': SUNDAY_START,
'schedule': generate_track(api_data, SUNDAY_WORKSHOPS2, SUNDAY_START, flag=flag),
'day': 'sunday'
},
{
'room': AULA4,
'start': SUNDAY_START,
'schedule': generate_track(api_data, SUNDAY_WORKSHOPS3, SUNDAY_START, flag=flag),
'day': 'sunday'
},
{
'room': AULA5,
'start': SUNDAY_START,
'schedule': generate_track(api_data, SUNDAY_WORKSHOPS4, SUNDAY_START-timedelta(minutes=135), flag=flag),
'day': 'sunday',
},
{
'room': HALLWAY,
'start': SUNDAY_START,
'schedule': generate_track(api_data, SUNDAY_HALLWAY, SUNDAY_START+timedelta(minutes=45), flag=flag),
'day': 'sunday',
'block_end': True,
},
]
def _timestamp(dt=None):
if dt is None:
dt = datetime.now()
fmt = '%Y%m%dT%H%M%S'
return dt.strftime(fmt)
def _ignore_talk(title, names=IGNORE_TALKS):
# yes, we can paste unicode symbols, but if we change the symbol this test will still work
max_appended_symbols = 2
return any((title == name or title[:-(_len+1)] == name)
for _len in range(max_appended_symbols) for name in names)
def _hash_event(track, slot):
room = track.get('room')
name = room.get('name')
ts = _timestamp(slot.get('start'))
_hash = str(hash('{name}:{ts}'.format(name=name, ts=ts)))
_hash = _hash.replace('-', '*')
return '-'.join(_hash[i*5:(i+1)*5] for i in range(4))
def _normalize(text, tag=None, subsequent_indent=' ', **kwargs):
# tag must be always included to determine amount of space left in the first line
if tag:
max_width = ICAL_LEN - len(tag) - 1
else:
max_width = ICAL_LEN
text = text.strip().replace('\n', ICAL_NL)
return '\n'.join(textwrap.wrap(text, width=max_width, subsequent_indent=subsequent_indent, **kwargs))
# CALENDAR FUNCTIONS
def get_mtime(filename):
"""Get last modification time from file"""
mtime = datetime.fromtimestamp(os.path.getmtime(filename))
return mtime.strftime(TIME_FORMAT)
SITEMAP_DEFAULT = {'prio': '0.1', 'freq': 'weekly'}
SITEMAP = {
'sitemap.xml': {'prio': '0.9', 'freq': 'daily', 'lastmod': get_mtime(__file__)},
'index.html': {'prio': '1', 'freq': 'daily'},
'schedule.html': {'prio': '0.9', 'freq': 'daily'},
'speakers.html': {'prio': '0.9', 'freq': 'daily'},
'hall_of_fame.html': {'prio': '0.5', 'freq': 'weekly'},
'tickets.html': {'prio': '0.5', 'freq': 'weekly'},
}
def get_lastmod(route, sitemap_entry):
"""Used by sitemap() below"""
if 'lastmod' in sitemap_entry:
return sitemap_entry['lastmod']
template = route.rule.split('/')[-1]
template_file = os.path.join(SRC_DIR, 'templates', template)
if os.path.exists(template_file):
return get_mtime(template_file)
return NOW
if __name__ == "__main__":
app.run(debug=True, host=os.environ.get('FLASK_HOST', '127.0.0.1'), port=int(os.environ.get('FLASK_PORT', 5000)),
use_reloader=True)
| 36.81381
| 190
| 0.612775
|
f50f7d07d1b11d4dc8dcf82534a3b3e6a3a87158
| 2,891
|
py
|
Python
|
ietf/community/migrations/0002_auto_20141222_1749.py
|
ekr/ietfdb
|
8d936836b0b9ff31cda415b0a423e3f5b33ab695
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2021-11-20T03:40:40.000Z
|
2021-11-20T03:40:42.000Z
|
ietf/community/migrations/0002_auto_20141222_1749.py
|
ekr/ietfdb
|
8d936836b0b9ff31cda415b0a423e3f5b33ab695
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
ietf/community/migrations/0002_auto_20141222_1749.py
|
ekr/ietfdb
|
8d936836b0b9ff31cda415b0a423e3f5b33ab695
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
| 32.483146
| 88
| 0.570391
|
f510f358811538f9c09860ccdb42030579e71a1a
| 928
|
py
|
Python
|
scripts/fishvalidate.py
|
justinbois/fishactivity
|
6c6ac06c391b75b2725e2e2a61dd80afc34daf31
|
[
"MIT"
] | null | null | null |
scripts/fishvalidate.py
|
justinbois/fishactivity
|
6c6ac06c391b75b2725e2e2a61dd80afc34daf31
|
[
"MIT"
] | null | null | null |
scripts/fishvalidate.py
|
justinbois/fishactivity
|
6c6ac06c391b75b2725e2e2a61dd80afc34daf31
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import argparse
import fishact
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Validate data files.')
parser.add_argument('activity_fname', metavar='activity_file', type=str,
help='Name of activity file.')
parser.add_argument('gtype_fname', metavar='genotype_file', type=str,
help='Name of genotype file.')
args = parser.parse_args()
print('------------------------------------------------')
print('Checking genotype file...')
fishact.validate.test_genotype_file(args.gtype_fname)
print('------------------------------------------------\n\n\n')
print('------------------------------------------------')
print('Checking activity file...')
fishact.validate.test_activity_file(args.activity_fname, args.gtype_fname)
print('------------------------------------------------')
| 37.12
| 78
| 0.519397
|
f513b5c28a4eaca8eb08a50fccfcd5204171dfdc
| 1,682
|
py
|
Python
|
scripts/rescale.py
|
danydoerr/spp_dcj
|
1ab9dacb1f0dc34a3ebbeed9e74226a9a53c297a
|
[
"MIT"
] | 2
|
2021-08-24T16:03:30.000Z
|
2022-03-18T14:52:43.000Z
|
scripts/rescale.py
|
danydoerr/spp_dcj
|
1ab9dacb1f0dc34a3ebbeed9e74226a9a53c297a
|
[
"MIT"
] | null | null | null |
scripts/rescale.py
|
danydoerr/spp_dcj
|
1ab9dacb1f0dc34a3ebbeed9e74226a9a53c297a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from sys import stdout,stderr,exit
from optparse import OptionParser
from newick_parser import parse_tree_iterator, Branch
from tree_span import calculateSpan
from copy import deepcopy
if __name__ == '__main__':
usage = 'usage: %prog [options] <NEWICK FILE>'
parser = OptionParser(usage=usage)
parser.add_option('-s', '--scale_factor', dest='scale_factor',
help='Scale factor of distances in tree',
type=float, default=0, metavar='FLOAT')
parser.add_option('-a', '--absolute_length', dest='absolute',
help='Absolute length of maximal distance in tree',
type=float, default=0, metavar='FLOAT')
(options, args) = parser.parse_args()
if len(args) != 1:
parser.print_help()
exit(1)
if not ((options.absolute > 0) ^ (options.scale_factor > 0)):
print('!! Specify either scale factor or absolute length with ' + \
'strictly positive number', file = stderr)
exit(1)
for tree in parse_tree_iterator(open(args[0])):
if options.absolute > 0:
print(rescale_absolute(tree, options.absolute), file = stdout)
else:
print(rescale(tree, options.scale_factor), file = stdout)
| 29
| 75
| 0.633175
|
f51915e704bb43425413f02d24086079a01a04be
| 743
|
py
|
Python
|
mytest/playsnake.py
|
mrzhuzhe/stable-baselines3
|
6c3bc5fa4c3faba951099e3ccb5c74b763134b38
|
[
"MIT"
] | null | null | null |
mytest/playsnake.py
|
mrzhuzhe/stable-baselines3
|
6c3bc5fa4c3faba951099e3ccb5c74b763134b38
|
[
"MIT"
] | null | null | null |
mytest/playsnake.py
|
mrzhuzhe/stable-baselines3
|
6c3bc5fa4c3faba951099e3ccb5c74b763134b38
|
[
"MIT"
] | null | null | null |
from stable_baselines3 import PPO
import os
from setup_gym_env import SnakeEnv
import time
#models_dir = "./models/1644408901/" + "40000"
#models_dir = "./models/1644462865/" + "120000"
#models_dir = "./models/1644466638/" + "100000"
models_dir = "./models/1644485414/" + "100000"
env = SnakeEnv()
env.reset()
model = PPO.load(models_dir)
episodes = 10
# snake doesn't known where itself
for episode in range(episodes):
done = False
obs = env.reset()
#while True:#not done:
while not done:
action, _states = model.predict(obs)
#print("action",action)
obs, reward, done, info = env.step(action)
#print('reward',reward)
if done == True:
print(done)
env.render()
| 22.515152
| 50
| 0.641992
|
f519a4dd8609848cb4fec6b2221b463e32b9ae3b
| 13,105
|
py
|
Python
|
yoda2h5.py
|
iamholger/yodf5
|
79ad8d77fd2b48e1b71403339e2502b42a5435c8
|
[
"MIT"
] | 4
|
2020-04-22T11:00:13.000Z
|
2020-12-16T17:49:47.000Z
|
yoda2h5.py
|
iamholger/yodf5
|
79ad8d77fd2b48e1b71403339e2502b42a5435c8
|
[
"MIT"
] | 4
|
2020-12-17T16:26:16.000Z
|
2020-12-17T16:30:34.000Z
|
yoda2h5.py
|
iamholger/yodf5
|
79ad8d77fd2b48e1b71403339e2502b42a5435c8
|
[
"MIT"
] | 2
|
2020-05-06T17:30:05.000Z
|
2020-12-16T17:58:23.000Z
|
#!/usr/bin/env python3
import yoda, sys
import h5py
import numpy as np
def createDatasets(f, binids, variations, depth=1, compression=4):
"""
Create data sets in the HDF5 file.
"""
nbins=len(binids)
nvars=len(variations)
# The fundamental moments/elements of yoda objecs
floats = [
"sumw",
"sumw2",
"sumwx",
"sumwx2",
"sumwy",
"sumwy2",
"sumwxy",
"numEntries",
"xval",
"xerr-",
"xerr+",
"yval",
"yerr-",
"yerr+",
"xmin",
"xmax",
"ymin",
"ymax"
]
# The datasets have 3 axes: binid, weight variation, point in parameter space
for df in floats: f.create_dataset(df, (nbins,nvars,depth), maxshape=(None,None,None), dtype='f' , chunks=True, compression=compression)
# Lookups --- helps when reading data and reconstucting YODA objects
f.create_group("Histo1D")
f.create_group("Histo2D")
f.create_group("Profile1D")
f.create_group("Counter")
f.create_group("Scatter1D")
f.create_group("Scatter2D")
# This is the one that works well with hdf5 when reading std::string in C++
dt = h5py.special_dtype(vlen=str)
# We use these simple lists as lookup tables to associate the elements of the datasets ^^^ with
# the actual YODA Analysis objects
import numpy as np
f.create_dataset("binids", data=np.array(binids, dtype=dt))
f.create_dataset("variations", data=np.array(variations, dtype=dt))
def dbn1ToArray(dbn):
"""
The try except block deals with the underflow things not having xmin, xmax
"""
try:
return np.array([dbn.sumW(), dbn.sumW2(), dbn.sumWX(), dbn.sumWX2(), dbn.numEntries(), dbn.xMin(), dbn.xMax()])
except:
return np.array([dbn.sumW(), dbn.sumW2(), dbn.sumWX(), dbn.sumWX2(), dbn.numEntries(), 0, 0])
def H2dbn2ToArray(dbn):
"""
The try except block deals with the underflow things not having xmin, xmax
"""
try:
return np.array([dbn.sumW(), dbn.sumW2(), dbn.sumWX(), dbn.sumWX2(), dbn.sumWY(), dbn.sumWY2(), dbn.sumWXY(), dbn.numEntries(), dbn.xMin(), dbn.xMax(), dbn.yMin(), dbn.yMax()])
except:
return np.array([dbn.sumW(), dbn.sumW2(), dbn.sumWX(), dbn.sumWX2(), dbn.sumWY(), dbn.sumWY2(), dbn.sumWXY(), dbn.numEntries(), 0, 0, 0, 0])
if __name__=="__main__":
import sys
import optparse, os, sys
op = optparse.OptionParser(usage=__doc__)
op.add_option("-v", "--debug", dest="DEBUG", action="store_true", default=False, help="Turn on some debug messages")
op.add_option("-o", dest="OUTPUT", default="analysisobjects.h5", help="Output HDF5 file (default: %default)")
opts, args = op.parse_args()
YODAFILES = args
from mpi4py import MPI
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
binids, VVV, aix, aix_flat, central = None, None, None, None, None
if rank==0:
# TODO if len(args)==1 and os.path.isdir(args[0]) --- hierarchical reading with pnames finding etc
# Let's assume they are all consistent TODO add robustness
DATA0 = yoda.readYODA(args[0])
L = sorted(list(DATA0.keys()))
names = [x for x in L ]# if not "/RAW" in x]
central = [x for x in names if not x.endswith("]")]
variations = [x for x in names if x.endswith("]")]
# TODO In principle one probably should check that all variations are always the
# same, we assume this is the case here
var = []
for c in central:
var.append([x for x in variations if x.startswith(c+"[")])
## Thats the weight and weight variation order we store the data in
VVV = ["CentralWeight"]
import re
p=re.compile("\[(.*?)\]")
for x in var[0]:
try:
VVV.append(p.findall(x)[0])
except Exception as e:
print(x, e)
binids = mkBinids(DATA0)
# Hierarchical, i.e. top layer is the AnalysisObject type
aix = mkIndexDict(DATA0, binids)
# Object name as keys and lists of indices as values
aix_flat = {}
for k, v in aix.items(): aix_flat.update(v)
binids = comm.bcast(binids, root=0)
VVV = comm.bcast(VVV, root=0)
aix = comm.bcast(aix, root=0)
aix_flat = comm.bcast(aix_flat, root=0)
central = comm.bcast(central, root=0)
# NOTE dataset operations are collective
# This require h5py to use and H5 that is build with MPI
try:
f = h5py.File(opts.OUTPUT, "w", driver='mpio', comm=MPI.COMM_WORLD)
except:
f = h5py.File(opts.OUTPUT, "w")
createDatasets(f, binids, VVV, depth=len(YODAFILES))
createIndexDS(f, aix)
rankwork = chunkIt([i for i in range(len(YODAFILES))], size) if rank==0 else None
rankwork = comm.scatter(rankwork, root=0)
# This part is MPI trivial
for num, findex in enumerate(rankwork):
DATA = yoda.readYODA(YODAFILES[findex])
for hname in central:
_hname=mkSafeHname(hname)
fillDatasets(f, aix_flat[_hname], VVV, DATA, hname, depth=findex)
if rank==0:
print("[{}] --- {}/{} complete".format(rank, num, len(rankwork)))
sys.stdout.flush()
f.close()
| 37.766571
| 184
| 0.546814
|
f51c993aef58b3c9c160f8b68cd78fc8daf5ff42
| 1,703
|
py
|
Python
|
main.py
|
RichezA/UnRecurZipper
|
dffe16811e3d79fdc20e0aada0f2ffe9c77da9a1
|
[
"MIT"
] | null | null | null |
main.py
|
RichezA/UnRecurZipper
|
dffe16811e3d79fdc20e0aada0f2ffe9c77da9a1
|
[
"MIT"
] | null | null | null |
main.py
|
RichezA/UnRecurZipper
|
dffe16811e3d79fdc20e0aada0f2ffe9c77da9a1
|
[
"MIT"
] | null | null | null |
import zipfile
import os
import glob
import sys
# Actual directory that we could find somewhere
# Reading the first arg written in the console (program name not included)
fileTest = Folder(sys.argv[1])
| 47.305556
| 116
| 0.593658
|
f51f1a4cfc64468547a1bf70b97687f67b00823c
| 634
|
py
|
Python
|
dagger/dag_creator/airflow/utils/operator_factories.py
|
jorgetagle/dagger
|
dafcfb9df904e512f050aefdacf6581c571bac23
|
[
"MIT"
] | null | null | null |
dagger/dag_creator/airflow/utils/operator_factories.py
|
jorgetagle/dagger
|
dafcfb9df904e512f050aefdacf6581c571bac23
|
[
"MIT"
] | null | null | null |
dagger/dag_creator/airflow/utils/operator_factories.py
|
jorgetagle/dagger
|
dafcfb9df904e512f050aefdacf6581c571bac23
|
[
"MIT"
] | null | null | null |
from functools import partial
from airflow.operators.python_operator import ShortCircuitOperator
| 25.36
| 84
| 0.747634
|
f51fe70db140c3154b176531ad8f28b9ef267b5a
| 1,974
|
py
|
Python
|
predict_CNN.py
|
slimtomatillo/toxic_waste_dump
|
4bc820f0b31f4420e789af11a9338c475c068889
|
[
"MIT"
] | 2
|
2018-07-13T16:44:24.000Z
|
2019-10-14T21:31:02.000Z
|
predict_CNN.py
|
slimtomatillo/toxic_waste_dump
|
4bc820f0b31f4420e789af11a9338c475c068889
|
[
"MIT"
] | null | null | null |
predict_CNN.py
|
slimtomatillo/toxic_waste_dump
|
4bc820f0b31f4420e789af11a9338c475c068889
|
[
"MIT"
] | null | null | null |
# Imports
import pandas as pd
import pickle
from keras.models import load_model
from preprocess import preprocess
from preprocess import prep_text
#Logging
import logging
logging.getLogger().setLevel(logging.INFO)
logging.info('Loading comments to classify...')
# Enter comment to be classified below
comment_to_classify = ''
def return_label(predicted_probs):
"""
Function that takes in a list of 7 class
probabilities and returns the labels
with probabilities over a certain threshold.
"""
threshold = 0.4
labels = []
classes = ['clean', 'toxic', 'severe toxic', 'obscene',
'threat', 'insult', 'identity hate']
i = 0
while i < len(classes):
if predicted_probs[i] > threshold:
labels.append(classes[i])
i += 1
return (labels)
def predict_label(comment_str):
"""
Function that takes in a comment in
string form and returns the predicted
class labels: not toxic, toxic, severe
toxic, obscene, threat, insults, identity
hate. May output multiple labels.
"""
data = pd.DataFrame(data=[comment_str], columns=['comment_text'])
logging.info('Comments loaded.')
# Preprocess text
X_to_predict = preprocess(data)
# Identify data to make predictions from
X_to_predict = X_to_predict['model_text']
# Format data properly
X_to_predict = prep_text(X_to_predict)
logging.info('Loading model...')
# Load CNN from disk
cnn = load_model('model/CNN/binarycrossentropy_adam/model-04-0.9781.hdf5')
logging.info('Model loaded.')
logging.info('Making prediction(s)...')
# Make predictions
preds = cnn.predict(X_to_predict)
for each_comment, prob in zip(data['comment_text'], preds):
print('COMMENT:')
print(each_comment)
print()
print('PREDICTION:')
print(return_label(prob))
print()
logging.info('Finished.')
predict_label(comment_to_classify)
| 24.675
| 78
| 0.670719
|
f523394a40a39dc77c57c643a75603c33eb11af0
| 176
|
py
|
Python
|
lib/models/backbones/efficientdet/__init__.py
|
shachargluska/centerpose
|
01c2c8bfa9d3ee91807f2ffdcc48728d104265bd
|
[
"MIT"
] | 245
|
2019-11-29T02:55:25.000Z
|
2022-03-30T07:30:18.000Z
|
lib/models/backbones/efficientdet/__init__.py
|
shachargluska/centerpose
|
01c2c8bfa9d3ee91807f2ffdcc48728d104265bd
|
[
"MIT"
] | 24
|
2019-11-29T10:05:00.000Z
|
2022-03-30T07:16:06.000Z
|
lib/models/backbones/efficientdet/__init__.py
|
FishLiuabc/centerpose
|
555d753cd82693476f91f78c53aa4147f5a83015
|
[
"MIT"
] | 45
|
2019-11-29T05:12:02.000Z
|
2022-03-21T02:20:36.000Z
|
from .efficientdet import EfficientDet
| 25.142857
| 78
| 0.8125
|
f528bf891d405b1631574286911aea9a15dea4b2
| 1,566
|
py
|
Python
|
codesmith/CloudFormation/CogCondPreAuthSettings/cog_cond_pre_auth_settings.py
|
codesmith-gmbh/forge
|
43c334d829a727b48f8e21e273017c51394010f9
|
[
"Apache-2.0"
] | null | null | null |
codesmith/CloudFormation/CogCondPreAuthSettings/cog_cond_pre_auth_settings.py
|
codesmith-gmbh/forge
|
43c334d829a727b48f8e21e273017c51394010f9
|
[
"Apache-2.0"
] | null | null | null |
codesmith/CloudFormation/CogCondPreAuthSettings/cog_cond_pre_auth_settings.py
|
codesmith-gmbh/forge
|
43c334d829a727b48f8e21e273017c51394010f9
|
[
"Apache-2.0"
] | null | null | null |
import json
import logging
import boto3
from box import Box
from crhelper import CfnResource
from schema import Optional
import codesmith.common.naming as naming
from codesmith.common.cfn import resource_properties
from codesmith.common.schema import encoded_bool, non_empty_string, tolerant_schema
from codesmith.common.ssm import put_string_parameter, silent_delete_parameter_from_event
helper = CfnResource()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
properties_schema = tolerant_schema({
'UserPoolId': non_empty_string,
'UserPoolClientId': non_empty_string,
Optional('All', default=False): encoded_bool,
Optional('Domains', default=[]): [str],
Optional('Emails', default=[]): [str]
})
ssm = boto3.client('ssm')
def handler(event, context):
logger.info('event: %s', event)
helper(event, context)
| 29.54717
| 99
| 0.751596
|
f528d3c7d1c051d306cd7f8c1738faafc34bc81c
| 125
|
py
|
Python
|
Mundo 1/Exercicios/Desafio005.py
|
yWolfBR/Python-CursoEmVideo
|
17bab8ad3c4293daf8377c5d49242942845b3577
|
[
"MIT"
] | null | null | null |
Mundo 1/Exercicios/Desafio005.py
|
yWolfBR/Python-CursoEmVideo
|
17bab8ad3c4293daf8377c5d49242942845b3577
|
[
"MIT"
] | null | null | null |
Mundo 1/Exercicios/Desafio005.py
|
yWolfBR/Python-CursoEmVideo
|
17bab8ad3c4293daf8377c5d49242942845b3577
|
[
"MIT"
] | null | null | null |
n = int(input('Digite um nmero: '))
print('Seu nmero {}. O antecessor {} e seu sucessor {}'.format(n, n - 1, n + 1))
| 41.666667
| 87
| 0.592
|
f52b501702b28918819f4339d418e24bd36e3fba
| 1,506
|
py
|
Python
|
geeksforgeeks/Data Structures/check_if_subtree.py
|
codervikash/online-courses
|
a60efad23af65080a98e7dd038fb2c750237b781
|
[
"MIT"
] | null | null | null |
geeksforgeeks/Data Structures/check_if_subtree.py
|
codervikash/online-courses
|
a60efad23af65080a98e7dd038fb2c750237b781
|
[
"MIT"
] | null | null | null |
geeksforgeeks/Data Structures/check_if_subtree.py
|
codervikash/online-courses
|
a60efad23af65080a98e7dd038fb2c750237b781
|
[
"MIT"
] | null | null | null |
# Given two binary trees, check if the first tree is subtree of the second one.
# A subtree of a tree T is a tree S consisting of a node in T and all of its descendants in T.
# The subtree corresponding to the root node is the entire tree; the subtree corresponding to any other node is called a proper subtree.
# Driver program to test above function
""" TREE 1
Construct the following tree
26
/ \
10 3
/ \ \
4 6 3
\
30
"""
T = Node(26)
T.right = Node(3)
T.right.right = Node(3)
T.left = Node(10)
T.left.left = Node(4)
T.left.left.right = Node(30)
T.left.right = Node(6)
""" TREE 2
Construct the following tree
10
/ \
4 6
\
30
"""
S = Node(10)
S.right = Node(6)
S.left = Node(4)
S.left.right = Node(30)
if is_subtree(T, S):
print "Tree 2 is subtree of Tree 1"
else :
print "Tree 2 is not a subtree of Tree 1"
| 20.351351
| 136
| 0.632802
|
f52c3d4d080221bb0849b2d7854dea28cf442e0d
| 619
|
py
|
Python
|
papi_sdk/models/search/hotelpage/affiliate.py
|
stanislav-losev/papi-sdk-python
|
4a296745d626ef13c6d1170e9d3569cb1c37eb3c
|
[
"MIT"
] | 1
|
2022-02-01T08:53:24.000Z
|
2022-02-01T08:53:24.000Z
|
papi_sdk/models/search/hotelpage/affiliate.py
|
stanislav-losev/papi-sdk-python
|
4a296745d626ef13c6d1170e9d3569cb1c37eb3c
|
[
"MIT"
] | 2
|
2021-01-18T07:57:29.000Z
|
2021-06-23T11:04:14.000Z
|
papi_sdk/models/search/hotelpage/affiliate.py
|
stanislav-losev/papi-sdk-python
|
4a296745d626ef13c6d1170e9d3569cb1c37eb3c
|
[
"MIT"
] | 3
|
2020-12-30T13:09:45.000Z
|
2020-12-30T13:42:33.000Z
|
from typing import List, Optional
from papi_sdk.models.search.base_affiliate_response import (
BaseAffiliateSearchData,
BaseAffiliateSearchResponse,
BaseHotel,
BaseRate,
)
from papi_sdk.models.search.base_request import BaseAffiliateRequest
| 20.633333
| 68
| 0.799677
|
f52d16005d54fc06009e6a33b0d9fa26ef35fd47
| 2,093
|
py
|
Python
|
dl_tutorials/torch_neural_networks.py
|
learnerzhang/AnalyticsVidhya
|
697689a24a9d73785164512cab8ac4ee5494afe8
|
[
"Apache-2.0"
] | 1
|
2018-07-04T09:14:26.000Z
|
2018-07-04T09:14:26.000Z
|
dl_tutorials/torch_neural_networks.py
|
learnerzhang/AnalyticsVidhya
|
697689a24a9d73785164512cab8ac4ee5494afe8
|
[
"Apache-2.0"
] | null | null | null |
dl_tutorials/torch_neural_networks.py
|
learnerzhang/AnalyticsVidhya
|
697689a24a9d73785164512cab8ac4ee5494afe8
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-01-02 16:44
# @Author : zhangzhen
# @Site :
# @File : torch_neural_networks.py
# @Software: PyCharm
import torch
import torch.nn as nn
import torch.nn.functional as F
if __name__ == '__main__':
net = Net()
criterion = nn.MSELoss()
print(net)
params = list(net.parameters())
print(":", len(params))
for param in params:
print(param.size())
input = torch.randn(1, 1, 32, 32)
target = torch.randn(10)
out = net(input)
loss = criterion(out, target)
print(100 * "=")
print(out, target)
print("Loss:", loss)
print(loss.grad_fn) # MSELoss
print(loss.grad_fn.next_functions[0][0]) # Linear
print(loss.grad_fn.next_functions[0][0].next_functions[0][0]) # ReLU
net.zero_grad()
print('conv1.bias.grad before backward')
print(net.conv1.bias.grad)
loss.backward()
print('conv1.bias.grad after backward')
print(net.conv1.bias.grad)
| 26.493671
| 77
| 0.592929
|
f52eac71ac3094d6c2f5d753f7dc5413e91d3ecd
| 422
|
py
|
Python
|
webhook.py
|
cadamswaite/RPI-Jekyll-Compiler
|
acf905b11f41c3bda286d4907a038b7888b1c8fa
|
[
"Unlicense"
] | null | null | null |
webhook.py
|
cadamswaite/RPI-Jekyll-Compiler
|
acf905b11f41c3bda286d4907a038b7888b1c8fa
|
[
"Unlicense"
] | null | null | null |
webhook.py
|
cadamswaite/RPI-Jekyll-Compiler
|
acf905b11f41c3bda286d4907a038b7888b1c8fa
|
[
"Unlicense"
] | null | null | null |
from bottle import route, run, template
gitdict = {'po2go':{'https://github.com/cadamswaite/po2go.git:master':'https://github.com/cadamswaite/po2go.git:gh-pages'}}
# Handle http requests to the root address
run(host='0.0.0.0', port=80)
| 23.444444
| 123
| 0.684834
|
f52ec88be52c378180af93ce81749dca618e2061
| 2,577
|
py
|
Python
|
shldn/leonard.py
|
arrieta/shldn
|
8335aaeb1bfe91698bd9dfb83487393ede9225e6
|
[
"MIT"
] | null | null | null |
shldn/leonard.py
|
arrieta/shldn
|
8335aaeb1bfe91698bd9dfb83487393ede9225e6
|
[
"MIT"
] | null | null | null |
shldn/leonard.py
|
arrieta/shldn
|
8335aaeb1bfe91698bd9dfb83487393ede9225e6
|
[
"MIT"
] | null | null | null |
"""
Leonard always DRIVES Sheldon (this module is the __main__ driver for Sheldon)
"""
import argparse
import sys
import os
try:
from cooper import Sheldon
except:
from .cooper import Sheldon
# Extensions for python source files
EXTENSIONS = [".py", ".mpy"]
if __name__ == "__main__":
main()
| 28.633333
| 89
| 0.592549
|
f52eee40e1e0d598ea7f901674518fc574586952
| 705
|
py
|
Python
|
tests/002_finder/003_changeextension.py
|
Sam-prog-sudo/boussole
|
5d6ec94356f9a91ff4d6d23c1700d3512b67006a
|
[
"MIT"
] | 13
|
2016-05-19T15:18:41.000Z
|
2022-03-22T15:37:32.000Z
|
tests/002_finder/003_changeextension.py
|
Sam-prog-sudo/boussole
|
5d6ec94356f9a91ff4d6d23c1700d3512b67006a
|
[
"MIT"
] | 38
|
2016-04-07T00:30:58.000Z
|
2022-02-28T13:29:33.000Z
|
tests/002_finder/003_changeextension.py
|
Sam-prog-sudo/boussole
|
5d6ec94356f9a91ff4d6d23c1700d3512b67006a
|
[
"MIT"
] | 3
|
2016-05-20T09:21:57.000Z
|
2020-10-12T10:56:49.000Z
|
# -*- coding: utf-8 -*-
| 26.111111
| 72
| 0.670922
|
f52efbe88e2653ae5d1fd37a74f972d83828b114
| 40,749
|
py
|
Python
|
Lib/site-packages/cherrypy/test/test_core.py
|
raychorn/svn_Python-2.5.1
|
425005b1b489ba44ec0bb989e077297e8953d9be
|
[
"PSF-2.0"
] | null | null | null |
Lib/site-packages/cherrypy/test/test_core.py
|
raychorn/svn_Python-2.5.1
|
425005b1b489ba44ec0bb989e077297e8953d9be
|
[
"PSF-2.0"
] | null | null | null |
Lib/site-packages/cherrypy/test/test_core.py
|
raychorn/svn_Python-2.5.1
|
425005b1b489ba44ec0bb989e077297e8953d9be
|
[
"PSF-2.0"
] | null | null | null |
"""Basic tests for the CherryPy core: request handling."""
from cherrypy.test import test
test.prefer_parent_path()
import cherrypy
from cherrypy import _cptools, tools
from cherrypy.lib import http, static
import types
import os
localDir = os.path.dirname(__file__)
log_file = os.path.join(localDir, "test.log")
log_access_file = os.path.join(localDir, "access.log")
favicon_path = os.path.join(os.getcwd(), localDir, "../favicon.ico")
defined_http_methods = ("OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE",
"TRACE", "CONNECT", "PROPFIND")
tools.login_redir = _cptools.Tool('before_handler', login_redir)
root.divorce = Divorce()
cherrypy.config.update({
'log.error_file': log_file,
'environment': 'test_suite',
'server.max_request_body_size': 200,
'server.max_request_header_size': 500,
})
appconf = {
'/': {'log.access_file': log_access_file},
'/method': {'request.methods_with_bodies': ("POST", "PUT", "PROPFIND")},
}
cherrypy.tree.mount(root, config=appconf)
# Client-side code #
from cherrypy.test import helper
if __name__ == '__main__':
setup_server()
helper.testmain()
| 38.370056
| 104
| 0.545289
|
f52efca4ad0dbdcec53aee2fa61bc784274e7d40
| 1,036
|
py
|
Python
|
day4/solution1.py
|
zirne/aoc19
|
98feea895f0113ef60738723ca976dcbef0629b9
|
[
"MIT"
] | null | null | null |
day4/solution1.py
|
zirne/aoc19
|
98feea895f0113ef60738723ca976dcbef0629b9
|
[
"MIT"
] | null | null | null |
day4/solution1.py
|
zirne/aoc19
|
98feea895f0113ef60738723ca976dcbef0629b9
|
[
"MIT"
] | null | null | null |
# Solution 1
input = readInputFile("input.txt").strip()
print(input)
lowest = input.split("-")[0]
highest = input.split("-")[1]
current = int(input.split("-")[0])
print(lowest)
print(highest)
resultArr = []
while current <= int(highest):
if checkNeverDecreaseRule(current) and checkHasAdjacentSame(current):
resultArr.append(current)
#print(checkNeverDecreaseRule(lowest))
#print(checkHasAdjacentSame(lowest))
current += 1
print(len(resultArr))
| 18.836364
| 70
| 0.621622
|
f52fa19632597f93eba421103fbc7100653b7f9d
| 763
|
py
|
Python
|
e2e/Tests/Transactions/Verify.py
|
rikublock/Meros
|
7a3ae9c78af388eb523bc8a2c840018fc058ef44
|
[
"CC0-1.0"
] | null | null | null |
e2e/Tests/Transactions/Verify.py
|
rikublock/Meros
|
7a3ae9c78af388eb523bc8a2c840018fc058ef44
|
[
"CC0-1.0"
] | null | null | null |
e2e/Tests/Transactions/Verify.py
|
rikublock/Meros
|
7a3ae9c78af388eb523bc8a2c840018fc058ef44
|
[
"CC0-1.0"
] | 1
|
2021-02-08T23:46:35.000Z
|
2021-02-08T23:46:35.000Z
|
#Transactions classes.
from e2e.Classes.Transactions.Transaction import Transaction
from e2e.Classes.Transactions.Transactions import Transactions
#TestError Exception.
from e2e.Tests.Errors import TestError
#RPC class.
from e2e.Meros.RPC import RPC
#Sleep standard function.
from time import sleep
#Verify a Transaction.
#Verify the Transactions.
| 23.84375
| 80
| 0.756225
|
f52fef7331b7922effcc0ce7dc2004ff0e5e1b57
| 149
|
py
|
Python
|
src/pygame_utils/sprite/gamecomponent.py
|
MarronEyes/pygame_utils
|
27a1f1328533d04c20ccb95208d44fda3be81a09
|
[
"MIT"
] | null | null | null |
src/pygame_utils/sprite/gamecomponent.py
|
MarronEyes/pygame_utils
|
27a1f1328533d04c20ccb95208d44fda3be81a09
|
[
"MIT"
] | null | null | null |
src/pygame_utils/sprite/gamecomponent.py
|
MarronEyes/pygame_utils
|
27a1f1328533d04c20ccb95208d44fda3be81a09
|
[
"MIT"
] | null | null | null |
import pygame
from graphics.component import Component
| 16.555556
| 40
| 0.718121
|
f5301087690900f18790595cf080153f91b40dd0
| 954
|
py
|
Python
|
motivation_quote/app.py
|
lukas-weiss/motivation-quote
|
90c73342a71f6a8f8b5339b5d080d19ac67083b7
|
[
"MIT"
] | null | null | null |
motivation_quote/app.py
|
lukas-weiss/motivation-quote
|
90c73342a71f6a8f8b5339b5d080d19ac67083b7
|
[
"MIT"
] | null | null | null |
motivation_quote/app.py
|
lukas-weiss/motivation-quote
|
90c73342a71f6a8f8b5339b5d080d19ac67083b7
|
[
"MIT"
] | null | null | null |
import json
import os.path
import logging
import csv
from random import randint
logger = logging.getLogger()
logger.setLevel(logging.INFO)
| 25.105263
| 61
| 0.603774
|
f531e1bea64fba94ad609a7c42aeb9cf4d1498ca
| 3,142
|
py
|
Python
|
tools/extract_textline.py
|
bitcoder-17/scale-digits-recognition
|
b75c658ffdc830784ae4be9c007909e4c8f1d695
|
[
"MIT"
] | null | null | null |
tools/extract_textline.py
|
bitcoder-17/scale-digits-recognition
|
b75c658ffdc830784ae4be9c007909e4c8f1d695
|
[
"MIT"
] | null | null | null |
tools/extract_textline.py
|
bitcoder-17/scale-digits-recognition
|
b75c658ffdc830784ae4be9c007909e4c8f1d695
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import cv2
import json
import math
import numpy as np
from argparse import ArgumentParser
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('input_dir', type=str,
help='Directory where the frame image and the json label be')
parser.add_argument('output_dir', type=str,
help='Directory where the textline would be extracted to')
parser.add_argument('--ext', type=str, default='png')
args = parser.parse_args()
input_dir = Path(args.input_dir)
output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
jsons = list(input_dir.glob('*.json'))
json_path: Path
for json_path in jsons:
label_dict = json.load(open(json_path, 'rt'))
if len(label_dict['shapes']) == 0:
continue
frame = cv2.imread(str(json_path.with_suffix(f'.{args.ext}')))
for i, shape in enumerate(label_dict['shapes']):
points = order_points(shape['points'])
tl, tr, br, bl = points
width = int(np.round(max([distance(tl, tr), distance(bl, br)])))
height = int(np.round(max([distance(tl, bl), distance(tr, br)])))
dst = np.array([[0, 0],
[width - 1, 0],
[width - 1, height - 1],
[0, height - 1]], dtype=np.float32)
M = cv2.getPerspectiveTransform(np.array(points, dtype=np.float32), dst)
warp = cv2.warpPerspective(frame, M, (width, height))
output_path = output_dir.joinpath(json_path.stem + f'.{args.ext}')
cv2.imwrite(str(output_path), warp)
| 33.073684
| 85
| 0.54965
|
f532177c4078c1e01572de399b2bc77a18421da8
| 14,159
|
py
|
Python
|
blender/2.79/scripts/addons/io_coat3D/tex.py
|
uzairakbar/bpy2.79
|
3a3e0004ac6783c4e4b89d939e4432de99026a85
|
[
"MIT"
] | 2
|
2019-11-27T09:05:42.000Z
|
2020-02-20T01:25:23.000Z
|
io_coat3D/tex.py
|
1-MillionParanoidTterabytes/blender-addons-master
|
acc8fc23a38e6e89099c3e5079bea31ce85da06a
|
[
"Unlicense"
] | null | null | null |
io_coat3D/tex.py
|
1-MillionParanoidTterabytes/blender-addons-master
|
acc8fc23a38e6e89099c3e5079bea31ce85da06a
|
[
"Unlicense"
] | 4
|
2020-02-19T20:02:26.000Z
|
2022-02-11T18:47:56.000Z
|
# ***** BEGIN GPL LICENSE BLOCK *****
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENCE BLOCK *****
import bpy
import os
| 42.139881
| 127
| 0.634296
|
f5327046ce5fcde6d3a6fd1f71b52eace22cd4ab
| 3,613
|
py
|
Python
|
proxy_prometheus_alerts.py
|
mhagander/promnagios
|
12329b7abbbb76746784c4b706f4784c63bae194
|
[
"PostgreSQL"
] | 1
|
2019-06-07T14:10:14.000Z
|
2019-06-07T14:10:14.000Z
|
proxy_prometheus_alerts.py
|
mhagander/promnagios
|
12329b7abbbb76746784c4b706f4784c63bae194
|
[
"PostgreSQL"
] | null | null | null |
proxy_prometheus_alerts.py
|
mhagander/promnagios
|
12329b7abbbb76746784c4b706f4784c63bae194
|
[
"PostgreSQL"
] | null | null | null |
#!/usr/bin/env python3
#
# Proxy alerts generated by Prometheus Alertmanager turning them into
# nagios passive alert information.
#
# Copyright 2019-2020, PostgreSQL Infrastructure Team
# Author: Magnus Hagander
#
import argparse
import http.server
import json
import time
import sys
missed_alerts = 0
if __name__ == "__main__":
global args
parser = argparse.ArgumentParser(
description="Create nagios alerts from prometheus monitors"
)
parser.add_argument('--hostsuffix', help='Suffix to add to hostnamees')
parser.add_argument('--port', help='TCP port to bind to')
parser.add_argument('--nagioscmd', help='Path to nagios command file')
args = parser.parse_args()
if not args.port:
print("Port must be specified")
sys.exit(1)
if not args.nagioscmd:
print("Nagios command path must be specified")
sys.exit(1)
server_address = ('localhost', int(args.port))
httpd = http.server.HTTPServer(server_address, NotificationHandler)
httpd.serve_forever()
| 30.361345
| 82
| 0.568779
|
f53445eedfca0ebec216d205d17da7023b06710f
| 172
|
py
|
Python
|
QuantLib/tsa/smooth/__init__.py
|
wanhanwan/Packages
|
14dfbd70603d45bb6c8c161c56b9ed9cf7c301d3
|
[
"MIT"
] | 5
|
2018-06-29T16:56:10.000Z
|
2019-06-20T03:31:44.000Z
|
QuantLib/tsa/smooth/__init__.py
|
wanhanwan/Packages
|
14dfbd70603d45bb6c8c161c56b9ed9cf7c301d3
|
[
"MIT"
] | null | null | null |
QuantLib/tsa/smooth/__init__.py
|
wanhanwan/Packages
|
14dfbd70603d45bb6c8c161c56b9ed9cf7c301d3
|
[
"MIT"
] | 3
|
2018-06-25T06:37:17.000Z
|
2018-11-22T08:12:20.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# __init__.py
# @Author : wanhanwan (wanshuai_shufe@163.com)
# @Date : 2019/11/25 1:20:12
from .filter import llt_filter
| 24.571429
| 46
| 0.668605
|