hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9fedd39a8f50939e9c4f94bb47bef7f75ae894a9 | 8,113 | py | Python | mozi/layers/recurrent.py | hycis/Mozi | 7f2eccbe3169c10d231e07edf8bc650039fa4eb2 | [
"MIT"
] | 122 | 2015-07-24T09:29:06.000Z | 2022-02-22T02:51:00.000Z | mozi/layers/recurrent.py | hycis/Mozi | 7f2eccbe3169c10d231e07edf8bc650039fa4eb2 | [
"MIT"
] | 4 | 2015-07-27T04:37:11.000Z | 2020-04-04T08:05:00.000Z | mozi/layers/recurrent.py | hycis/Mozi | 7f2eccbe3169c10d231e07edf8bc650039fa4eb2 | [
"MIT"
] | 27 | 2015-07-24T12:59:35.000Z | 2020-04-14T00:21:43.000Z |
from mozi.utils.theano_utils import shared_zeros, alloc_zeros_matrix, shared_ones
from mozi.layers.template import Template
from mozi.weight_init import OrthogonalWeight, GaussianWeight, Identity
import theano.tensor as T
import theano
| 38.450237 | 92 | 0.609762 |
9ff112f147fc3eea03cddc2ce893a7da503429c2 | 1,045 | py | Python | emilia/modules/sql/admin_sql.py | masterisira/ELIZA_OF-master | 02a7dbf48e4a3d4ee0981e6a074529ab1497aafe | [
"Unlicense"
] | null | null | null | emilia/modules/sql/admin_sql.py | masterisira/ELIZA_OF-master | 02a7dbf48e4a3d4ee0981e6a074529ab1497aafe | [
"Unlicense"
] | null | null | null | emilia/modules/sql/admin_sql.py | masterisira/ELIZA_OF-master | 02a7dbf48e4a3d4ee0981e6a074529ab1497aafe | [
"Unlicense"
] | null | null | null | import threading
from typing import Union
from sqlalchemy import Column, Integer, String, Boolean
from emilia.modules.sql import SESSION, BASE
PermanentPin.__table__.create(checkfirst=True)
PERMPIN_LOCK = threading.RLock()
| 24.302326 | 64 | 0.677512 |
9ff556e97733100f33310335bf44e3b09364ba15 | 3,985 | py | Python | demo.py | danuker/piggies | 215495689122fc14f9deb40587aaf2f34f526120 | [
"MIT"
] | 5 | 2018-06-05T14:28:32.000Z | 2020-10-28T14:30:03.000Z | demo.py | danuker/piggies | 215495689122fc14f9deb40587aaf2f34f526120 | [
"MIT"
] | 5 | 2018-06-04T09:08:48.000Z | 2018-06-29T17:46:58.000Z | demo.py | danuker/piggies | 215495689122fc14f9deb40587aaf2f34f526120 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Before you can use Piggies, you need actual wallets.
# To fetch and extract the wallet clients, and create wallet files:
# mkdir wallets && cd wallets
#
# wget https://download.electrum.org/3.1.3/Electrum-3.1.3.tar.gz
# tar xvzf Electrum-3.1.3.tar.gz
# cd Electrum-3.1.3/
# mkdir -p ../../datastores/BTC/wallets/
# ./electrum create -w ../../datastores/BTC/wallets/your_BTC_wallet_name_here.dat
# cd ..
#
# wget https://dlsrc.getmonero.org/cli/monero-linux-x64-v0.12.2.0.tar.bz2
# tar xvjf monero-linux-x64-v0.12.2.0.tar.bz2
# cd monero-v0.12.2.0/
# mkdir -p ../../datastores/XMR/wallets/
# ./monero-wallet-cli --generate-new-wallet=../../datastores/XMR/wallets/your_XMR_wallet_name_here.dat
# cd ../..
#
# # The next command will sync the Monero blockchain.
# # It took about 48h (+/- 24h) on an SSD, on 2018-06-06.
# # An HDD (not SSD) would take about 4.7 times longer!!!
# # Also, make sure you are using a wired network connection, not Wi-Fi (which is slower)!
#
# # Required disk space: Multiply the last reported size here by 1.3:
# # https://moneroblocks.info/stats/blockchain-growth
# # Right now, that results in 52932.49 MB (51.69 GB)
# wallets/monero-v0.12.2.0/monerod --data-dir datastores/XMR --rpc-bind-port=37779
# cd ..
# wget https://releases.parity.io/v1.11.4/x86_64-unknown-debian-gnu/parity_1.11.4_debian_amd64.deb
# sudo dpkg -i parity_1.11.4_debian_amd64.deb
# parity account new -d datastores/ETH/
#
# # The Parity wallet also takes a while to sync (around 12h or so, as of 2018-06-28).
# # Using the CLI options in PiggyETH, the blockchain without ancient blocks only takes up ~24GB.
# # Check
# ./demo
import logging
from decimal import Decimal
from piggies import MasterPiggy
logger = logging.getLogger('piggy_logs')
# Requested piggy settings
piggy_settings = {
'BTC': {
'wallet_bin_path': 'wallets/Electrum-3.1.3/electrum',
'datastore_path': 'datastores/BTC',
'wallet_filename': 'your_BTC_wallet_name_here.dat',
'wallet_password': 'your_BTC_password_here',
'rpcuser':'your_BTC_RPC_username',
'rpcpassword': 'your_BTC_RPC_password',
'rpcport': 37778
},
'XMR': {
'daemon_bin_path': 'wallets/monero-v0.12.2.0/monerod',
'wallet_bin_path': 'wallets/monero-v0.12.2.0/monero-wallet-rpc',
'datastore_path': 'datastores/XMR',
'wallet_filename': 'your_XMR_wallet_name_here.dat',
'wallet_password': 'your_XMR_password_here',
'daemon_port': 37779, # For the default Monero client, the wallet has a separate server daemon
'rpcport': 37780
},
'ETH': {
'wallet_bin_path': '/usr/bin/parity',
'datastore_path': 'datastores/ETH',
'wallet_password': 'your_ETH_wallet_password_here'
}
}
if __name__ == '__main__':
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)
main()
| 37.242991 | 103 | 0.684065 |
9ff624252765d2c5657956ad0fdec3d525d53544 | 22,024 | py | Python | lcfit_utils.py | idekany/lcfit | 4a0080fca981afe2b8974db8f5d3484c663b6c13 | [
"MIT"
] | null | null | null | lcfit_utils.py | idekany/lcfit | 4a0080fca981afe2b8974db8f5d3484c663b6c13 | [
"MIT"
] | null | null | null | lcfit_utils.py | idekany/lcfit | 4a0080fca981afe2b8974db8f5d3484c663b6c13 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import sys
import os
import numpy as np
import fourier as ff
import matplotlib
import warnings
from matplotlib import pyplot as plt
from os.path import isfile
matplotlib.use('Agg')
def get_stratification_labels(data, n_folds):
"""
Create an array of stratification labels from an array of continuous values to be used in a stratified cross-
validation splitter.
:param data: list or numpy.ndarray
The input data array.
:param n_folds: int
The number of cross-validation folds to be used with the output labels.
:return: labels, numpy.ndarray
The array of integer stratification labels.
"""
assert isinstance(data, np.ndarray or list), "data must be of type list or numpy.ndarray"
if isinstance(data, list):
data = np.array(data)
ndata = len(data)
isort = np.argsort(data) # Indices of sorted phases
labels = np.empty(ndata)
labels[isort] = np.arange(ndata) # Compute phase order
labels = np.floor(labels / n_folds) # compute phase labels for StratifiedKFold
if np.min(np.bincount(labels.astype(int))) < n_folds: # If too few elements are with last label, ...
labels[labels == np.max(labels)] = np.max(
labels) - 1 # ... the then change that label to the one preceding it
return labels
def read_input(fname: str, do_gls=False, known_columns=False):
"""
Reads the input list file with columns: object ID, [period, [dataset]]
:param fname: string, the name of the input file
:param do_gls: boolean, whether to perform GLS on the input time series. If False, the second column of the input
file must contain the period.
:param known_columns: boolean; whether the dataset to be used is known. If True, the last column of the input
file must contain the number of the column.
:return: ndarray(s) or None(s); 1-d arrays with the obect IDs, periods, and datasets
"""
dtypes = ['|S25'] # dtype for first column: identifiers
if do_gls:
if known_columns:
usecols = (0, 1)
dtypes = dtypes + ['i']
else:
usecols = (0,)
else:
if known_columns:
usecols = (0, 1, 2)
dtypes = dtypes + ['f8'] + ['i']
else:
usecols = (0, 1)
dtypes = dtypes + ['f8']
arr = np.genfromtxt(fname, usecols=usecols,
dtype=dtypes, unpack=False, comments='#', filling_values=np.nan, names=True)
object_id = arr['id'].reshape(-1, ).astype(str)
if do_gls:
object_per = None
else:
object_per = arr['period'].reshape(-1, )
if known_columns:
object_ap = arr['ap'].reshape(-1, )
else:
object_ap = None
return object_id, object_per, object_ap
def extend_phases(p, y, phase_ext_neg=0.0, phase_ext_pos=0.0, sort=False):
"""
Extend a phase and a corresponding data vector in phase.
"""
# Extend data vectors in phase:
neg_ext_mask = (p - 1 > phase_ext_neg) # select phases in negative direction
pos_ext_mask = (p + 1 < phase_ext_pos) # select phases in positive direction
# Compose new data vectors according to extended phases:
p_ext = np.hstack((p[neg_ext_mask] - 1, p, p[pos_ext_mask] + 1))
y_ext = np.hstack((y[neg_ext_mask], y, y[pos_ext_mask]))
# magerr_ext=np.hstack((results['magerr_binned'][neg_ext_mask], results['magerr_binned'],
# results['magerr_binned'][pos_ext_mask]))
if sort:
# Sort data according to observed phases:
indx = np.argsort(p_ext) # indices of sorted ophase
p_ext_sorted = p_ext[indx]
y_ext_sorted = y_ext[indx]
return p_ext_sorted, y_ext_sorted
else:
return p_ext, y_ext
| 42.517375 | 133 | 0.582047 |
9ff65d9e76edd0a7d15ce5ca32d68a653fd8c1bc | 2,939 | py | Python | facetool/annotator.py | yliess86/FaceTool | f93c511e9868b4555225750efbac2228a00fea00 | [
"MIT"
] | 4 | 2020-05-03T01:29:23.000Z | 2020-07-15T08:13:05.000Z | facetool/annotator.py | yliess86/FaceTool | f93c511e9868b4555225750efbac2228a00fea00 | [
"MIT"
] | 3 | 2020-04-30T01:18:02.000Z | 2020-05-01T14:52:11.000Z | facetool/annotator.py | yliess86/FaceCrop | f93c511e9868b4555225750efbac2228a00fea00 | [
"MIT"
] | 1 | 2020-05-16T21:27:24.000Z | 2020-05-16T21:27:24.000Z | # -*- coding: utf-8 -*-
"""facetool.annotator
The files provides a Face Annotator in charge of combining the result of the
Face Detector and Face Landmark in a single pandas DataFrame. This Face
Annotator is the API built to be used by the end user.
"""
from facetool.detector import FaceDetector
from facetool.landmarker import FaceLandmarker
from tqdm import tqdm
from typing import Tuple
import numpy as np
import pandas as pd
| 36.7375 | 78 | 0.600204 |
9ff7ddf37d375ebc0e9b1af36cfd6f7f85ab8e18 | 1,338 | py | Python | pygrn/problems/air_quality.py | nico1as/pyGRN | 115d9d42dfbd374fc64393cabefb2a8e245aa6b7 | [
"Apache-2.0"
] | 7 | 2018-07-18T16:08:51.000Z | 2020-12-09T07:18:35.000Z | pygrn/problems/air_quality.py | nico1as/pyGRN | 115d9d42dfbd374fc64393cabefb2a8e245aa6b7 | [
"Apache-2.0"
] | 3 | 2018-04-13T11:44:59.000Z | 2018-04-19T13:58:06.000Z | pygrn/problems/air_quality.py | nico1as/pyGRN | 115d9d42dfbd374fc64393cabefb2a8e245aa6b7 | [
"Apache-2.0"
] | 6 | 2018-07-22T01:54:14.000Z | 2021-08-04T16:01:38.000Z | from __future__ import print_function
import numpy as np
import os
from datetime import datetime
from pygrn.problems import TimeRegression
| 31.857143 | 77 | 0.595665 |
9ff867269ebc563da12e37b56fdbdcb6807b0b80 | 3,572 | py | Python | vocabulary.py | retrieva/python_stm | 862e63e6f03b326cb036b1136dead280c42b9da8 | [
"MIT"
] | 11 | 2020-02-07T05:26:08.000Z | 2021-11-27T09:51:24.000Z | vocabulary.py | retrieva/python_stm | 862e63e6f03b326cb036b1136dead280c42b9da8 | [
"MIT"
] | null | null | null | vocabulary.py | retrieva/python_stm | 862e63e6f03b326cb036b1136dead280c42b9da8 | [
"MIT"
] | 1 | 2020-02-10T02:44:37.000Z | 2020-02-10T02:44:37.000Z | # This code is available under the MIT License.
# (c)2010-2011 Nakatani Shuyo / Cybozu Labs Inc.
# (c)2018-2019 Hiroki Iida / Retrieva Inc.
import nltk
import re
import MeCab
stopwords_list = nltk.corpus.stopwords.words('english')
recover_list = {"wa":"was", "ha":"has"}
wl = nltk.WordNetLemmatizer()
def load_corpus(ranges):
"""
load data from corpus
"""
tmp = re.match(r'(\d+):(\d+)$', ranges)
if tmp:
start = int(tmp.group(1))
end = int(tmp.group(2))
from nltk.corpus import brown as corpus
return [corpus.words(fileid) for fileid in corpus.fileids()[start:end]]
def load_file(filename):
"""
for one file
one line corresponds to one doc
"""
corpus = []
f = open(filename, 'r')
for line in f:
doc = re.findall(r'\w+(?:\'\w+)?', line)
if len(doc) > 0:
corpus.append(doc)
f.close()
return corpus
| 26.072993 | 79 | 0.56075 |
9ffac072e4010a04d6f1b435f72c2103f99a9533 | 7,664 | py | Python | kubb_match/views/rest.py | BartSaelen/kubb_match | 848663bb3db5da73b726a956aa887c3eec30db8b | [
"Apache-2.0"
] | 2 | 2015-05-03T13:42:27.000Z | 2015-08-07T07:42:29.000Z | kubb_match/views/rest.py | BartSaelen/kubb_match | 848663bb3db5da73b726a956aa887c3eec30db8b | [
"Apache-2.0"
] | 2 | 2016-09-15T12:38:22.000Z | 2016-09-15T12:41:18.000Z | kubb_match/views/rest.py | BartSaelen/kubb_match | 848663bb3db5da73b726a956aa887c3eec30db8b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from pyramid.httpexceptions import HTTPBadRequest, HTTPNotFound
from pyramid.view import view_defaults, view_config
from kubb_match.data.mappers import map_team, map_game
from kubb_match.data.models import Team
from kubb_match.service.tournament_service import TournamentService
| 34.678733 | 87 | 0.579201 |
9ffb3711d6a34d1adba73090bd3c202a99a4f456 | 2,651 | py | Python | CTCWordBeamSearch-master/tests/test_word_beam_search.py | brucegrapes/htr | 9f8f07173ccc740dd8a4dfc7e8038abe36664756 | [
"MIT"
] | 488 | 2018-03-01T11:18:26.000Z | 2022-03-10T09:29:32.000Z | CTCWordBeamSearch-master/tests/test_word_beam_search.py | brucegrapes/htr | 9f8f07173ccc740dd8a4dfc7e8038abe36664756 | [
"MIT"
] | 60 | 2018-03-10T18:37:51.000Z | 2022-03-30T19:37:18.000Z | CTCWordBeamSearch-master/tests/test_word_beam_search.py | brucegrapes/htr | 9f8f07173ccc740dd8a4dfc7e8038abe36664756 | [
"MIT"
] | 152 | 2018-03-01T11:18:25.000Z | 2022-03-08T23:37:46.000Z | import codecs
import numpy as np
from word_beam_search import WordBeamSearch
def apply_word_beam_search(mat, corpus, chars, word_chars):
"""Decode using word beam search. Result is tuple, first entry is label string, second entry is char string."""
T, B, C = mat.shape
# decode using the "Words" mode of word beam search with beam width set to 25 and add-k smoothing to 0.0
assert len(chars) + 1 == C
wbs = WordBeamSearch(25, 'Words', 0.0, corpus.encode('utf8'), chars.encode('utf8'), word_chars.encode('utf8'))
label_str = wbs.compute(mat)
# result is string of labels terminated by blank
char_str = []
for curr_label_str in label_str:
s = ''
for label in curr_label_str:
s += chars[label] # map label to char
char_str.append(s)
return label_str[0], char_str[0]
def load_mat(fn):
"""Load matrix from csv and apply softmax."""
mat = np.genfromtxt(fn, delimiter=';')[:, :-1] # load matrix from file
T = mat.shape[0] # dim0=t, dim1=c
# apply softmax
res = np.zeros(mat.shape)
for t in range(T):
y = mat[t, :]
e = np.exp(y)
s = np.sum(e)
res[t, :] = e / s
# expand to TxBxC
return np.expand_dims(res, 1)
def test_mini_example():
"""Mini example, just to check that everything is working."""
corpus = 'a ba' # two words "a" and "ba", separated by whitespace
chars = 'ab ' # the first three characters which occur in the matrix (in this ordering)
word_chars = 'ab' # whitespace not included which serves as word-separating character
mat = np.array([[[0.9, 0.1, 0.0, 0.0]], [[0.0, 0.0, 0.0, 1.0]],
[[0.6, 0.4, 0.0, 0.0]]]) # 3 time-steps and 4 characters per time time ("a", "b", " ", blank)
res = apply_word_beam_search(mat, corpus, chars, word_chars)
print('')
print('Mini example:')
print('Label string:', res[0])
print('Char string:', '"' + res[1] + '"')
assert res[1] == 'ba'
def test_real_example():
"""Real example using a sample from a HTR dataset."""
data_path = '../data/bentham/'
corpus = codecs.open(data_path + 'corpus.txt', 'r', 'utf8').read()
chars = codecs.open(data_path + 'chars.txt', 'r', 'utf8').read()
word_chars = codecs.open(data_path + 'wordChars.txt', 'r', 'utf8').read()
mat = load_mat(data_path + 'mat_2.csv')
res = apply_word_beam_search(mat, corpus, chars, word_chars)
print('')
print('Real example:')
print('Label string:', res[0])
print('Char string:', '"' + res[1] + '"')
assert res[1] == 'submitt both mental and corporeal, is far beyond any idea'
| 35.346667 | 115 | 0.614485 |
9ffdc1e59bb26b37e4cdbdb001abd755fccd616d | 859 | py | Python | src/api/migrations/versions/2021-09-25_add_session_type_and_instructor.py | YACS-RCOS/yacs.n | a04f8e79279826914b942e3a8c709c50f08ff149 | [
"MIT"
] | 20 | 2020-02-29T19:03:31.000Z | 2022-02-18T21:13:12.000Z | src/api/migrations/versions/2021-09-25_add_session_type_and_instructor.py | YACS-RCOS/yacs.n | a04f8e79279826914b942e3a8c709c50f08ff149 | [
"MIT"
] | 465 | 2020-02-29T19:08:18.000Z | 2022-03-18T22:21:49.000Z | src/api/migrations/versions/2021-09-25_add_session_type_and_instructor.py | YACS-RCOS/yacs.n | a04f8e79279826914b942e3a8c709c50f08ff149 | [
"MIT"
] | 19 | 2020-02-29T01:22:23.000Z | 2022-02-14T01:47:09.000Z | """add session type and instructor
Revision ID: 54df4fb8dfe9
Revises: a3be4710680d
Create Date: 2021-09-25 03:08:18.501929
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '54df4fb8dfe9'
down_revision = 'a3be4710680d'
branch_labels = None
depends_on = None
| 27.709677 | 101 | 0.71362 |
9ffddf9f2ec970e9ca9b3a8192c022d87d76144d | 1,656 | py | Python | plot_data.py | qzane/kmeans-cuda | f2a0e8dd6859cf735c95e1365342f4623f0a71ff | [
"MIT"
] | null | null | null | plot_data.py | qzane/kmeans-cuda | f2a0e8dd6859cf735c95e1365342f4623f0a71ff | [
"MIT"
] | null | null | null | plot_data.py | qzane/kmeans-cuda | f2a0e8dd6859cf735c95e1365342f4623f0a71ff | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 27 22:31:17 2018
@author: qzane
"""
import numpy as np
import matplotlib.pyplot as plt
from argparse import ArgumentParser
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('-p', '--points', action='store', type=str, required=True,
help='points.txt')
parser.add_argument('-c', '--classes', action='store', type=str, required=True,
help='classes.txt')
args = parser.parse_args()
points = read_points(args.points)
classes = read_classes(args.classes)
plot(points, classes) | 25.090909 | 83 | 0.532609 |
9ffe17de7805da9bfb7ad7d54bb9a08115c66b6e | 149 | py | Python | commonutils/__init__.py | lrbsunday/commonutils | 6a4f2106e877417eebc8b8c6a9c1610505bd21e3 | [
"BSD-3-Clause"
] | 1 | 2017-09-10T13:13:04.000Z | 2017-09-10T13:13:04.000Z | commonutils/__init__.py | lrbsunday/commonutils | 6a4f2106e877417eebc8b8c6a9c1610505bd21e3 | [
"BSD-3-Clause"
] | 2 | 2021-03-25T21:45:54.000Z | 2021-11-15T17:47:06.000Z | commonutils/__init__.py | lrbsunday/commonutils | 6a4f2106e877417eebc8b8c6a9c1610505bd21e3 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Top-level package for commonutils."""
__author__ = """lrbsunday"""
__email__ = '272316131@qq.com'
__version__ = '0.1.0'
| 18.625 | 40 | 0.637584 |
9ffe6ea421da07a4d91197e1ea46c83dd156f66f | 826 | py | Python | app/components/admin.py | Uniquode/uniquode2 | 385f3e0b26383c042d8da64b52350e82414580ea | [
"MIT"
] | null | null | null | app/components/admin.py | Uniquode/uniquode2 | 385f3e0b26383c042d8da64b52350e82414580ea | [
"MIT"
] | null | null | null | app/components/admin.py | Uniquode/uniquode2 | 385f3e0b26383c042d8da64b52350e82414580ea | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.utils.timesince import timesince
# noinspection PyMethodMayBeStatic
| 25.8125 | 69 | 0.690073 |
9fff12642cb00ff3e2ce7ae890c3d2b10cbbe1d1 | 8,936 | py | Python | src/WignerFunctionMeasurement.py | ngchihuan/WignerFunc_Measurement | 9c258180da4c1a1ff87b384f0aaf85dc0f92d667 | [
"MIT"
] | null | null | null | src/WignerFunctionMeasurement.py | ngchihuan/WignerFunc_Measurement | 9c258180da4c1a1ff87b384f0aaf85dc0f92d667 | [
"MIT"
] | null | null | null | src/WignerFunctionMeasurement.py | ngchihuan/WignerFunc_Measurement | 9c258180da4c1a1ff87b384f0aaf85dc0f92d667 | [
"MIT"
] | null | null | null | import os
from os.path import join, isfile
from shutil import Error
from sys import exec_prefix
import numpy as np
import fit
import simple_read_data
from tabulate import tabulate
import logging
np.seterr(all='raise')
def check_data_format(data):
'''
check if the input data satisfies the following requiresments:
1. it is a dictionary {x: [], y: [], yerr: []}.
2. The array must have same size
if the data format is wrong, raise a Type Error
'''
conf = {'x': [], 'y' : [], 'yerr' : [] }
if (check_structure(data,conf)==False):
raise DataFormatError("Wrong format for the input data")
else:
if (np.min(data['y']) < 0 or np.max(data['y'])>1.0):
raise DataFormatError("y is out of range (0,1)")
def print_debug():
debug_msg = 'debug'
return debug_msg
def check_structure(struct, conf):
if isinstance(struct, dict) and isinstance(conf, dict):
# struct is a dict of types or other dicts
return all(k in conf and check_structure(struct[k], conf[k]) for k in struct)
if isinstance(struct, list) and isinstance(conf, list):
# struct is list in the form [type or dict]
return all(check_structure(struct[0], c) for c in conf)
elif isinstance(struct, type):
# struct is the type of conf
return isinstance(conf, struct)
else:
# struct is neither a dict, nor list, not type
return False
if __name__ == '__main__':
fpath ='../tests/test_data'
wfm1 = WignerFunc_Measurement(fpath)
wfm1.setup_sbs()
wfm1.report() | 31.575972 | 173 | 0.57218 |
b000e8e09627008c8e1b4d9bdfd0f7e449d23a7e | 1,729 | py | Python | falmer/content/models/scheme.py | sussexstudent/services-api | ae735bd9d6177002c3d986e5c19a78102233308f | [
"MIT"
] | 2 | 2017-04-27T19:35:59.000Z | 2017-06-13T16:19:33.000Z | falmer/content/models/scheme.py | sussexstudent/falmer | ae735bd9d6177002c3d986e5c19a78102233308f | [
"MIT"
] | 975 | 2017-04-13T11:31:07.000Z | 2022-02-10T07:46:18.000Z | falmer/content/models/scheme.py | sussexstudent/services-api | ae735bd9d6177002c3d986e5c19a78102233308f | [
"MIT"
] | 3 | 2018-05-09T06:42:25.000Z | 2020-12-10T18:29:30.000Z | from django.db import models
from wagtail.admin.edit_handlers import FieldPanel, StreamFieldPanel, MultiFieldPanel
from wagtail.core.blocks import StreamBlock
from wagtail.core.fields import StreamField
from wagtail.images.edit_handlers import ImageChooserPanel
from falmer.content import components
from falmer.content.components.structures import sidebar_card
from falmer.content.models.mixins import SocialMediaMixin
from falmer.matte.models import MatteImage
from .core import Page
| 27.887097 | 97 | 0.685367 |
b0017ce65ff4bed42aaeae9f18c1a86d9bbd1f1d | 1,089 | py | Python | scripts/main_validation.py | platycristate/ptah | 15369382fc48860cc5bcd6a201a8b250ae8cb516 | [
"MIT"
] | null | null | null | scripts/main_validation.py | platycristate/ptah | 15369382fc48860cc5bcd6a201a8b250ae8cb516 | [
"MIT"
] | 1 | 2021-06-11T12:01:33.000Z | 2021-06-11T12:01:33.000Z | scripts/main_validation.py | platycristate/ptah | 15369382fc48860cc5bcd6a201a8b250ae8cb516 | [
"MIT"
] | 1 | 2021-06-11T11:57:06.000Z | 2021-06-11T11:57:06.000Z | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import re
import spacy
from time import time
import pickle
from collections import defaultdict
import pmi_tfidf_classifier as ptic
path = "../data/"
pd.set_option("display.max_rows", None, "display.max_columns", None)
np.random.seed(250)
spacy.prefer_gpu()
nlp = spacy.load("en_core_sci_sm", disable=['ner', 'parser'])
train_data = pd.read_csv(path + 'DILI_data_mixed.csv')
test_data = pd.read_csv(path + "Validation.tsv", sep="\t")
targets_train = train_data['Label'].values
tokenized_texts = ptic.tokenization(train_data)
tokenized_test_texts = ptic.tokenization(test_data)
N = len(tokenized_texts)
word2text_count = ptic.get_word_stat( tokenized_texts )
words_pmis = ptic.create_pmi_dict(tokenized_texts, targets_train, min_count=1)
t1 = time()
results = ptic.classify_pmi_based(words_pmis, word2text_count, tokenized_test_texts, N)
t2 = time()
test_data["Label"] = results
print("Classfication time: %s min" % (round(t2 - t1, 3)/60))
test_data.to_csv(path + "arsentii.ivasiuk@gmail.com_results.csv")
| 25.325581 | 87 | 0.769513 |
b00272462aa831ed8359bfb1b05ac3991b3aef99 | 956 | py | Python | src/marion/marion/tests/test_fields.py | openfun/marion | bf06b64bf78bca16685e62ff14b66897c1dbe80c | [
"MIT"
] | 7 | 2021-04-06T20:33:31.000Z | 2021-09-30T23:29:24.000Z | src/marion/marion/tests/test_fields.py | openfun/marion | bf06b64bf78bca16685e62ff14b66897c1dbe80c | [
"MIT"
] | 23 | 2020-09-09T15:01:50.000Z | 2022-01-03T08:58:36.000Z | src/marion/marion/tests/test_fields.py | openfun/marion | bf06b64bf78bca16685e62ff14b66897c1dbe80c | [
"MIT"
] | 2 | 2020-12-14T10:07:07.000Z | 2021-06-29T00:20:43.000Z | """Tests for the marion application fields"""
from marion.defaults import DocumentIssuerChoices
from ..fields import IssuerLazyChoiceField, LazyChoiceField
def test_fields_lazy_choice_field():
"""
LazyChoiceField class.
Choices instance attribute should not be customizable.
"""
field = LazyChoiceField(
name="lazy_choice_field",
choices=[("option1", "Option 1"), ("option2", "Option 2")],
max_length=200,
)
errors = field.check()
assert len(errors) == 0
assert field.choices == []
def test_fields_issuer_lazy_choice_field(settings):
"""
IssuerLazyChoiceField class.
Choices attribute relies on DOCUMENT_ISSUER_CHOICES_CLASS setting.
"""
settings.MARION_DOCUMENT_ISSUER_CHOICES_CLASS = (
"marion.defaults.DocumentIssuerChoices"
)
field = IssuerLazyChoiceField(name="issuer_lazy_choice_field")
assert field.choices == DocumentIssuerChoices.choices
| 26.555556 | 70 | 0.712343 |
b00495771d6a310aa5e5d77c1c05c91690f9a756 | 2,331 | py | Python | ObjectTrackingDrone/colorpickerusingTello.py | udayagopi587/ArealRobotics_AutonomousDrone | 6bc10ee167076086abb3b2eef311ae43f457f21d | [
"MIT"
] | 1 | 2022-03-12T00:47:24.000Z | 2022-03-12T00:47:24.000Z | ObjectTrackingDrone/colorpickerusingTello.py | udayagopi587/ArealRobotics_AutonomousDrone | 6bc10ee167076086abb3b2eef311ae43f457f21d | [
"MIT"
] | null | null | null | ObjectTrackingDrone/colorpickerusingTello.py | udayagopi587/ArealRobotics_AutonomousDrone | 6bc10ee167076086abb3b2eef311ae43f457f21d | [
"MIT"
] | 1 | 2022-03-14T23:42:57.000Z | 2022-03-14T23:42:57.000Z | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 3 12:15:40 2022
@author: udaya
"""
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 27 18:06:29 2022
@author: udaya
"""
import cv2
import numpy as np
from djitellopy import Tello
frameWidth = 640
frameHeight = 480
###############################
# CONNECT TO TELLO
# cap = cv2.VideoCapture(0)
# cap.set(3, frameWidth)
# cap.set(4, frameHeight)
myDrone = initializeTello()
cv2.namedWindow("HSV")
cv2.resizeWindow("HSV", 640, 240)
cv2.createTrackbar("HUE Min", "HSV", 0, 179, empty)
cv2.createTrackbar("HUE Max", "HSV", 179, 179, empty)
cv2.createTrackbar("SAT Min", "HSV", 0, 255, empty)
cv2.createTrackbar("SAT Max", "HSV", 255, 255, empty)
cv2.createTrackbar("VALUE Min", "HSV", 0, 255, empty)
cv2.createTrackbar("VALUE Max", "HSV", 255, 255, empty)
while True:
success, img = telloGetFrame(myDrone,frameWidth,frameHeight)
imgHsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h_min = cv2.getTrackbarPos("HUE Min", "HSV")
h_max = cv2.getTrackbarPos("HUE Max", "HSV")
s_min = cv2.getTrackbarPos("SAT Min", "HSV")
s_max = cv2.getTrackbarPos("SAT Max", "HSV")
v_min = cv2.getTrackbarPos("VALUE Min", "HSV")
v_max = cv2.getTrackbarPos("VALUE Max", "HSV")
print(h_min)
lower = np.array([h_min, s_min, v_min])
upper = np.array([h_max, s_max, v_max])
mask = cv2.inRange(imgHsv, lower, upper)
result = cv2.bitwise_and(img, img, mask=mask)
mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
hStack = np.hstack([img, mask, result])
cv2.imshow('Horizontal Stacking', hStack)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
#cap.release()
cv2.destroyAllWindows() | 26.793103 | 81 | 0.632347 |
b0050cae1ff0c2350a07478cbaf2f32a1d466c54 | 16,101 | py | Python | climetlab_plugin_tools/create_plugin_cmd.py | ecmwf-lab/climetlab-plugin-tools | 52fc1c6c07958ecfb8a5c946f4851725832b3cd0 | [
"Apache-2.0"
] | null | null | null | climetlab_plugin_tools/create_plugin_cmd.py | ecmwf-lab/climetlab-plugin-tools | 52fc1c6c07958ecfb8a5c946f4851725832b3cd0 | [
"Apache-2.0"
] | null | null | null | climetlab_plugin_tools/create_plugin_cmd.py | ecmwf-lab/climetlab-plugin-tools | 52fc1c6c07958ecfb8a5c946f4851725832b3cd0 | [
"Apache-2.0"
] | null | null | null | # (C) Copyright 2020 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import configparser
import datetime
import logging
import os
import pathlib
from climetlab.scripts.tools import parse_args
from .str_utils import CamelCase, alphanum, camelCase, dashes, underscores
LOG = logging.getLogger(__name__)
# import climetlab.debug
APACHE_LICENCE = """This software is licensed under the terms of the Apache Licence Version 2.0
which can be obtained at http://www.apache.org/licenses/LICENSE-2.0."""
PREFIX_ECMWF_LICENCE = (
"""(C) Copyright {year} European Centre for Medium-Range Weather Forecasts."""
)
POSTFIX_ECMWF_LICENCE = """In applying this licence, ECMWF does not waive the privileges and immunities
granted to it by virtue of its status as an intergovernmental organisation
nor does it submit to any jurisdiction."""
class Transformer:
_help = ""
glob = None
TRANSFORMERS_CLASSES = {
"dataset": [
PluginNameTransformer,
DatasetNameTransformer,
FullNameTransformer,
EmailTransformer,
GithubUsernameTransformer,
RepoUrlTransformer,
LicenceTransformer,
],
"source": [
PluginNameTransformer,
SourceNameTransformer,
FullNameTransformer,
EmailTransformer,
GithubUsernameTransformer,
RepoUrlTransformer,
LicenceTransformer,
],
}
| 33.266529 | 143 | 0.629464 |
b0074893c2e7005340588db291b50134738031f4 | 3,044 | py | Python | openclean/util/core.py | remram44/openclean-core | 8c09c8302cadbb3bb02c959907f91a3ae343f939 | [
"BSD-3-Clause"
] | 4 | 2021-04-20T09:06:26.000Z | 2021-11-20T20:31:28.000Z | openclean/util/core.py | remram44/openclean-core | 8c09c8302cadbb3bb02c959907f91a3ae343f939 | [
"BSD-3-Clause"
] | 14 | 2021-01-19T19:23:16.000Z | 2021-04-28T14:31:03.000Z | openclean/util/core.py | remram44/openclean-core | 8c09c8302cadbb3bb02c959907f91a3ae343f939 | [
"BSD-3-Clause"
] | 5 | 2021-08-24T11:57:21.000Z | 2022-03-17T04:39:04.000Z | # This file is part of the Data Cleaning Library (openclean).
#
# Copyright (C) 2018-2021 New York University.
#
# openclean is released under the Revised BSD License. See file LICENSE for
# full license details.
"""Collection of helper functions for various purpoeses."""
from typing import Optional
import uuid
def always_false(*args):
"""Predicate that always evaluates to False.
Parameters
----------
args: any
Variable list of arguments.
Returns
-------
bool
"""
return False
def is_list_or_tuple(value):
"""Test if a given value is a list or tuple that can be converted into
multiple arguments.
Parameters
----------
value: any
Any object that is tested for being a list or tuple.
Returns
-------
bool
"""
return isinstance(value, list) or isinstance(value, tuple)
def scalar_pass_through(value):
"""Pass-through method for single scalar values.
Parameters
----------
value: scalar
Scalar cell value from a data frame row.
Returns
-------
scalar
"""
return value
def tenary_pass_through(*args):
"""Pass-through method for a list of argument values.
Parameters
----------
args: list of scalar
List of argument values.
Returns
-------
scalar
"""
return args
def unique_identifier(length: Optional[int] = None) -> str:
"""Get an identifier string of given length. Uses UUID to generate a unique
string and return the requested number of characters from that string.
Parameters
----------
length: int, default=None
Number of characters in the returned string.
Returns
-------
string
"""
identifier = str(uuid.uuid4()).replace('-', '')
if length is not None:
identifier = identifier[:length]
return identifier
| 23.415385 | 79 | 0.617608 |
b0091d1b6caace04c666bba350b86f62370a21bc | 78 | py | Python | desafio1.py | sergioboff/Desafios-Curso-em-Video | f876396635b12c00bdd9523758364bbebfd70ae0 | [
"MIT"
] | null | null | null | desafio1.py | sergioboff/Desafios-Curso-em-Video | f876396635b12c00bdd9523758364bbebfd70ae0 | [
"MIT"
] | null | null | null | desafio1.py | sergioboff/Desafios-Curso-em-Video | f876396635b12c00bdd9523758364bbebfd70ae0 | [
"MIT"
] | null | null | null | nome= input('Qual seu nome ?: ')
print ('Ol {} Seja bem vindo'.format(nome))
| 26 | 44 | 0.641026 |
b00943e9be2f2f8a05e1b1e0bcce1f1c5bb49902 | 68 | py | Python | exquiro/parsers/openponk/__init__.py | xhusar2/conceptual_model_parser | 63eea4ab8b967a6d2ee612ffb4a06b93e97d0043 | [
"MIT"
] | null | null | null | exquiro/parsers/openponk/__init__.py | xhusar2/conceptual_model_parser | 63eea4ab8b967a6d2ee612ffb4a06b93e97d0043 | [
"MIT"
] | null | null | null | exquiro/parsers/openponk/__init__.py | xhusar2/conceptual_model_parser | 63eea4ab8b967a6d2ee612ffb4a06b93e97d0043 | [
"MIT"
] | null | null | null | from .openpondk_class_diagram_parser import OpenponkClsDiagramParser | 68 | 68 | 0.941176 |
b00a0ae9f8f71c5f857d2683a8d63e315db4a5e2 | 254 | py | Python | fastNLP/modules/encoder/__init__.py | awesome-archive/fastNLP | 767e7971e542783c0129ed88b7d871db775e653e | [
"Apache-2.0"
] | 4 | 2019-01-19T13:58:10.000Z | 2019-01-19T15:07:48.000Z | fastNLP/modules/encoder/__init__.py | TTTREE/fastNLP | ef82c1f10000752db32a5fa323668b94bcb940a1 | [
"Apache-2.0"
] | 1 | 2018-09-30T13:30:51.000Z | 2018-09-30T13:30:51.000Z | fastNLP/modules/encoder/__init__.py | TTTREE/fastNLP | ef82c1f10000752db32a5fa323668b94bcb940a1 | [
"Apache-2.0"
] | null | null | null | from .conv import Conv
from .conv_maxpool import ConvMaxpool
from .embedding import Embedding
from .linear import Linear
from .lstm import LSTM
__all__ = ["LSTM",
"Embedding",
"Linear",
"Conv",
"ConvMaxpool"]
| 21.166667 | 37 | 0.629921 |
b00bb16d432ae4e7eebbd1a8f438f11ad4838ec1 | 1,141 | py | Python | openCVTutorials/openCVimgChangeColorspaceTutorial.py | nahutch/BasketballAI_P1 | 9a44f80787231df386910c28f17bab465fee013d | [
"Apache-2.0"
] | 1 | 2019-01-24T19:07:08.000Z | 2019-01-24T19:07:08.000Z | openCVTutorials/openCVimgChangeColorspaceTutorial.py | nahutch/BasketballAI_P1 | 9a44f80787231df386910c28f17bab465fee013d | [
"Apache-2.0"
] | null | null | null | openCVTutorials/openCVimgChangeColorspaceTutorial.py | nahutch/BasketballAI_P1 | 9a44f80787231df386910c28f17bab465fee013d | [
"Apache-2.0"
] | null | null | null | #following tutorial: https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_colorspaces/py_colorspaces.html#converting-colorspaces
import numpy as np
import cv2
#there are more than 150 color-space conversions methods available in OpenCV
#why so many?
#gets all possible color space conversion flags
flags = [i for i in dir(cv2) if i.startswith("COLOR_")]
#print (flags)
#converts a bgr color to hsv
green = np.uint8([[[0,255,0]]])
hsv_green = cv2.cvtColor(green,cv2.COLOR_BGR2HSV)
print (hsv_green)
#extracts any blue colored object using the built in video camera
#can detect my blue eyes if I get close and widen them
cap = cv2.VideoCapture(0)
while(1):
#take each frame
_, frame = cap.read()
hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
lower_blue = np.array([110,50,50])
upper_blue = np.array([130,255,255])
mask = cv2.inRange(hsv,lower_blue,upper_blue)
res = cv2.bitwise_and(frame,frame,mask=mask)
cv2.imshow("frame",frame)
cv2.imshow("mask",mask)
cv2.imshow("result",res)
k = cv2.waitKey(5)& 0xFF
if k == 27:
break
cv2.destroyAllWindows()
| 26.534884 | 158 | 0.718668 |
b00c4cc641fafb1dc25683af3562c4fd4137c48c | 1,724 | py | Python | sdcflows/utils/misc.py | madisoth/sdcflows | c2f01e4f9b19dbd89ac1b54e3cfb0643fc3fd4f2 | [
"Apache-2.0"
] | 16 | 2020-02-25T17:47:10.000Z | 2022-03-07T02:54:51.000Z | sdcflows/utils/misc.py | madisoth/sdcflows | c2f01e4f9b19dbd89ac1b54e3cfb0643fc3fd4f2 | [
"Apache-2.0"
] | 175 | 2020-02-15T00:52:28.000Z | 2022-03-29T21:42:31.000Z | sdcflows/utils/misc.py | madisoth/sdcflows | c2f01e4f9b19dbd89ac1b54e3cfb0643fc3fd4f2 | [
"Apache-2.0"
] | 12 | 2019-05-28T23:34:37.000Z | 2020-01-22T21:32:22.000Z | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2021 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Basic miscellaneous utilities."""
def front(inlist):
"""
Pop from a list or tuple, otherwise return untouched.
Examples
--------
>>> front([1, 0])
1
>>> front("/path/somewhere")
'/path/somewhere'
"""
if isinstance(inlist, (list, tuple)):
return inlist[0]
return inlist
def last(inlist):
"""
Return the last element from a list or tuple, otherwise return untouched.
Examples
--------
>>> last([1, 0])
0
>>> last("/path/somewhere")
'/path/somewhere'
"""
if isinstance(inlist, (list, tuple)):
return inlist[-1]
return inlist
def get_free_mem():
"""Probe the free memory right now."""
try:
from psutil import virtual_memory
return round(virtual_memory().free, 1)
except Exception:
return None
| 24.628571 | 77 | 0.657773 |
b00d6bcbdc91daedbc8ff5cedd805b13268a4bca | 7,026 | py | Python | src/model1_predict.py | shubhampachori12110095/FashionAI-Clothing-Attribute-Labels-Classification | 04fb40948fcae55c379d8e878c41f281948155e8 | [
"Apache-2.0"
] | 2 | 2018-12-29T09:10:18.000Z | 2020-08-07T03:42:38.000Z | src/model1_predict.py | shubhampachori12110095/FashionAI-Clothing-Attribute-Labels-Classification | 04fb40948fcae55c379d8e878c41f281948155e8 | [
"Apache-2.0"
] | null | null | null | src/model1_predict.py | shubhampachori12110095/FashionAI-Clothing-Attribute-Labels-Classification | 04fb40948fcae55c379d8e878c41f281948155e8 | [
"Apache-2.0"
] | 3 | 2018-12-29T09:10:21.000Z | 2021-05-23T06:30:35.000Z | # -*- coding: UTF-8 -*-
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
import json
import cv2
from sklearn.model_selection import train_test_split
import matplotlib
from keras.utils import np_utils
from keras.optimizers import *
from keras.preprocessing.image import ImageDataGenerator
from fashionAI.config import config
from fashionAI.Utils.preprocessing.imagetoarraypreprocessor import ImageToArrayPreprocessor
from fashionAI.Utils.preprocessing.simplepreprocessor import SimplePreprocessor
from fashionAI.Utils.preprocessing.meanpreprocessor import MeanPreprocessor
from fashionAI.Utils.preprocessing.patchpreprocessor import PatchPreprocessor
from fashionAI.Utils.preprocessing.croppreprocessor import CropPreprocessor
from fashionAI.callbacks.trainingmonitor import TrainingMonitor
from fashionAI.Utils.io.datagenerator import DataGenerator
from fashionAI.nn.inceptionresnet_v2 import InceptionResnetV2
| 39.033333 | 117 | 0.646883 |
b00f67fa0503dd85f3c8d37c378d2f72c7f066bd | 700 | py | Python | venv/lib/python3.6/site-packages/phonenumbers/shortdata/region_QA.py | exdeam/opencrm | dfdcfdf99f0b42eb3959171927cb6574583f5ee0 | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/phonenumbers/shortdata/region_QA.py | exdeam/opencrm | dfdcfdf99f0b42eb3959171927cb6574583f5ee0 | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/phonenumbers/shortdata/region_QA.py | exdeam/opencrm | dfdcfdf99f0b42eb3959171927cb6574583f5ee0 | [
"MIT"
] | 1 | 2020-09-08T14:45:34.000Z | 2020-09-08T14:45:34.000Z | """Auto-generated file, do not edit by hand. QA metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_QA = PhoneMetadata(id='QA', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[129]\\d{2,4}', possible_length=(3, 4, 5)),
toll_free=PhoneNumberDesc(national_number_pattern='999', example_number='999', possible_length=(3,)),
emergency=PhoneNumberDesc(national_number_pattern='999', example_number='999', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='(?:1|20)\\d\\d|9(?:[27]\\d{3}|99)', example_number='100', possible_length=(3, 4, 5)),
short_data=True)
| 70 | 141 | 0.754286 |
b00f7bd4e39ef2e25f158e39f9604eb34518aa71 | 815 | py | Python | test_parametrized_tests.py | karianjahi/python_pytest_tutorial | d8cf7bc9d85e75cc3248a35d8abdfd24d76276cd | [
"MIT"
] | null | null | null | test_parametrized_tests.py | karianjahi/python_pytest_tutorial | d8cf7bc9d85e75cc3248a35d8abdfd24d76276cd | [
"MIT"
] | null | null | null | test_parametrized_tests.py | karianjahi/python_pytest_tutorial | d8cf7bc9d85e75cc3248a35d8abdfd24d76276cd | [
"MIT"
] | null | null | null | """
Organizing test and parametrizing
"""
# Parametrized tests: Run many tests in one
# pylint: disable=W0622
# pylint: disable=R0201
# pylint: disable=R0903
import pytest
from word_counter import count_words
| 27.166667 | 59 | 0.586503 |
b0110b071338ec4840e5427dcade83815657e854 | 1,685 | py | Python | src/dep_appearances/cli.py | jdlubrano/dep-appearances | bf752b469463ee8cb7351df37231d250be3bcf47 | [
"MIT"
] | null | null | null | src/dep_appearances/cli.py | jdlubrano/dep-appearances | bf752b469463ee8cb7351df37231d250be3bcf47 | [
"MIT"
] | null | null | null | src/dep_appearances/cli.py | jdlubrano/dep-appearances | bf752b469463ee8cb7351df37231d250be3bcf47 | [
"MIT"
] | null | null | null | from argparse import ArgumentParser
import os
import pdb
import sys
from dep_appearances.appearances_report import AppearancesReport
if __name__ == "__main__":
main()
| 30.089286 | 108 | 0.668249 |
b01166da273e45dbd1d37d892c58fe4b13c2a3e7 | 250 | py | Python | kernel/filters.py | pycodi/django-kernel | 87829a0d47d04a3bb3d5c7cb79a03f0772dfdf46 | [
"MIT"
] | 1 | 2016-09-16T11:40:45.000Z | 2016-09-16T11:40:45.000Z | kernel/filters.py | pycodi/django-kernel | 87829a0d47d04a3bb3d5c7cb79a03f0772dfdf46 | [
"MIT"
] | null | null | null | kernel/filters.py | pycodi/django-kernel | 87829a0d47d04a3bb3d5c7cb79a03f0772dfdf46 | [
"MIT"
] | null | null | null | from django_filters import Filter
from django_filters.fields import Lookup
| 31.25 | 75 | 0.716 |
b0120808c75c26295ac6097ea109b68947111348 | 323 | py | Python | tests/expr/expr08.py | ktok07b6/polyphony | 657c5c7440520db6b4985970bd50547407693ac4 | [
"MIT"
] | 83 | 2015-11-30T09:59:13.000Z | 2021-08-03T09:12:28.000Z | tests/expr/expr08.py | jesseclin/polyphony | 657c5c7440520db6b4985970bd50547407693ac4 | [
"MIT"
] | 4 | 2017-02-10T01:43:11.000Z | 2020-07-14T03:52:25.000Z | tests/expr/expr08.py | jesseclin/polyphony | 657c5c7440520db6b4985970bd50547407693ac4 | [
"MIT"
] | 11 | 2016-11-18T14:39:15.000Z | 2021-02-23T10:05:20.000Z | from polyphony import testbench
test()
| 21.533333 | 36 | 0.575851 |
b0134690af47b5e16baf709ce4dca459913ce34e | 1,175 | py | Python | pyfirmata_tmp36_MQ7_Mysql.py | amy861113/Arduino | 7592c2029242fca24245ee1c34b2b9f6043070d1 | [
"MIT"
] | null | null | null | pyfirmata_tmp36_MQ7_Mysql.py | amy861113/Arduino | 7592c2029242fca24245ee1c34b2b9f6043070d1 | [
"MIT"
] | null | null | null | pyfirmata_tmp36_MQ7_Mysql.py | amy861113/Arduino | 7592c2029242fca24245ee1c34b2b9f6043070d1 | [
"MIT"
] | null | null | null | from pyfirmata import Arduino, util
from time import sleep
import pymysql
PORT = "COM4"
uno = Arduino(PORT)
sleep(5)
it = util.Iterator(uno)
it.start()
a4 = uno.get_pin('a:4:i')
a5 = uno.get_pin('a:5:i')
db = pymysql.connect("120.110.114.14", "hanshin", "Hanshin519", "Student", port = 3306)
cursor = db.cursor()
print("Arduino start~")
try:
while True:
gas = a4.read()
tmp = a5.read()
try:
gasValue = round(gas * 1024)
Vout = arduino_map(tmp, 0, 1, 0, 5)
tmpValue = round((((Vout * 1000) - 500) / 10) , 2)
#tmpValue = ((round(tmp * 1024)) * (5.0/1024) -0.5) / 0.01
sleep(5)
except TypeError:
pass
print('{0} {1}'.format(gasValue, tmpValue))
sql = "update Student.articles_envdata set tmpValue = {1}, gasValue = {0} where data_id = 1".format(gasValue, tmpValue)
cursor.execute(sql)
db.commit()
print("Update Success~")
sleep(5)
except Exception as e:
db.rollback()
print("Error!:{0}".format(e))
except KeyboardInterrupt:
uno.exit()
| 23.5 | 124 | 0.612766 |
b01440159aa9a67d2eac6230f37afcedb41016ba | 303 | py | Python | app/views.py | kobrient/tinypilot | aa40f11a370e04b11e0f72d34647c0e01669bbe9 | [
"MIT"
] | null | null | null | app/views.py | kobrient/tinypilot | aa40f11a370e04b11e0f72d34647c0e01669bbe9 | [
"MIT"
] | null | null | null | app/views.py | kobrient/tinypilot | aa40f11a370e04b11e0f72d34647c0e01669bbe9 | [
"MIT"
] | null | null | null | import flask
from find_files import find as find_files
views_blueprint = flask.Blueprint('views', __name__, url_prefix='')
| 25.25 | 79 | 0.752475 |
b0144723fdb455462aff667b476dc0e86c2e8039 | 577 | py | Python | example.py | LAIRLAB/libpyarr | 9e973a4045519fa6aedae3aaabd8267f6f796a8c | [
"BSD-3-Clause"
] | 1 | 2016-04-09T02:37:03.000Z | 2016-04-09T02:37:03.000Z | example.py | LAIRLAB/libpyarr | 9e973a4045519fa6aedae3aaabd8267f6f796a8c | [
"BSD-3-Clause"
] | null | null | null | example.py | LAIRLAB/libpyarr | 9e973a4045519fa6aedae3aaabd8267f6f796a8c | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python
import warnings, numpy
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from libpyarr_example import *
if __name__=='__main__':
main()
| 28.85 | 83 | 0.694974 |
b0146dc56f96a9ee8522dfa5aeb06d9a9ea59827 | 1,167 | py | Python | kitty_tiny/tools/annoGen/AnnoEventHandler.py | sixxchung/mymm | 4e8cd43c2615c08a60bf21fe0c4604344b470602 | [
"MIT"
] | null | null | null | kitty_tiny/tools/annoGen/AnnoEventHandler.py | sixxchung/mymm | 4e8cd43c2615c08a60bf21fe0c4604344b470602 | [
"MIT"
] | null | null | null | kitty_tiny/tools/annoGen/AnnoEventHandler.py | sixxchung/mymm | 4e8cd43c2615c08a60bf21fe0c4604344b470602 | [
"MIT"
] | null | null | null |
import logging
from watchdog.events import LoggingEventHandler, FileSystemEventHandler
| 31.540541 | 88 | 0.640103 |
b01504199a00f0b0ea4a2e7806f9a6775f0b35bb | 11,037 | py | Python | BCPNN/backend/_cpu_base_backend.py | KTH-HPC/StreamBrain | 37b16e7c8e02e6d2800bcf89630a0f4419e90cd4 | [
"BSD-2-Clause"
] | 4 | 2020-10-20T22:15:25.000Z | 2022-02-10T10:25:24.000Z | BCPNN/backend/_cpu_base_backend.py | KTH-HPC/StreamBrain | 37b16e7c8e02e6d2800bcf89630a0f4419e90cd4 | [
"BSD-2-Clause"
] | 1 | 2020-12-16T10:46:50.000Z | 2020-12-16T10:46:50.000Z | BCPNN/backend/_cpu_base_backend.py | KTH-HPC/StreamBrain | 37b16e7c8e02e6d2800bcf89630a0f4419e90cd4 | [
"BSD-2-Clause"
] | 1 | 2020-10-20T22:15:29.000Z | 2020-10-20T22:15:29.000Z | import sys
import numpy as np
from tqdm import tqdm
from contextlib import nullcontext
| 35.038095 | 94 | 0.552596 |
b01639c2289f47ba698eea2092678bb22c032e75 | 6,879 | py | Python | flux_sensors/flux_sensor.py | Flux-Coordinator/flux-sensors | 44968c95e277023c3a6777d653e7b3cb4e333923 | [
"MIT"
] | null | null | null | flux_sensors/flux_sensor.py | Flux-Coordinator/flux-sensors | 44968c95e277023c3a6777d653e7b3cb4e333923 | [
"MIT"
] | 1 | 2018-06-14T18:21:33.000Z | 2018-06-14T18:21:33.000Z | flux_sensors/flux_sensor.py | Flux-Coordinator/flux-sensors | 44968c95e277023c3a6777d653e7b3cb4e333923 | [
"MIT"
] | null | null | null | from flux_sensors.localizer.localizer import Localizer, Coordinates, LocalizerError, PozyxDeviceError
from flux_sensors.light_sensor.light_sensor import LightSensor
from flux_sensors.config_loader import ConfigLoader
from flux_sensors.flux_server import FluxServer, FluxServerError
from flux_sensors.models import models
import time
import requests
import json
import logging
logger = logging.getLogger(__name__)
| 44.668831 | 118 | 0.602704 |
b019647d7984c42bcd98ff6521f630e19b83c858 | 11,288 | py | Python | Network.py | Coldog2333/pytoflow | 3cec913fa5a2ddb8133a075d4ff177cceb74f06a | [
"MIT"
] | 102 | 2018-12-29T16:19:18.000Z | 2022-01-13T03:54:04.000Z | Network.py | mengxiangyudlut/pytoflow | 3cec913fa5a2ddb8133a075d4ff177cceb74f06a | [
"MIT"
] | 19 | 2019-04-26T10:19:14.000Z | 2021-11-14T07:36:23.000Z | Network.py | mengxiangyudlut/pytoflow | 3cec913fa5a2ddb8133a075d4ff177cceb74f06a | [
"MIT"
] | 32 | 2019-03-04T00:10:06.000Z | 2022-01-11T08:19:19.000Z | import math
import torch
# import torch.utils.serialization # it was removed in torch v1.0.0 or higher version.
arguments_strModel = 'sintel-final'
SpyNet_model_dir = './models' # The directory of SpyNet's weights
Backward_tensorGrid = {}
# end
| 47.230126 | 186 | 0.589033 |
b01bbd168b9b732e58f788ff84aca342f6b50515 | 2,668 | py | Python | storagetest/pkgs/ltp/acl/acl_test.py | liufeng-elva/storage-test2 | 5364cc00dbe71b106f1bb740bf391e6124788bf4 | [
"MIT"
] | null | null | null | storagetest/pkgs/ltp/acl/acl_test.py | liufeng-elva/storage-test2 | 5364cc00dbe71b106f1bb740bf391e6124788bf4 | [
"MIT"
] | null | null | null | storagetest/pkgs/ltp/acl/acl_test.py | liufeng-elva/storage-test2 | 5364cc00dbe71b106f1bb740bf391e6124788bf4 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
@file : acl_test.py
@Time : 2020/11/9 9:25
@Author: Tao.Xu
@Email : tao.xu2008@outlook.com
"""
import os
import unittest
from storagetest.libs import utils
from storagetest.libs.log import log
from storagetest.libs.exceptions import PlatformError, NoSuchDir, NoSuchBinary
logger = log.get_logger()
cur_dir = os.path.dirname(os.path.realpath(__file__))
bin_path = os.path.join(cur_dir, 'bin')
if __name__ == '__main__':
# unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(UnitTestCase)
unittest.TextTestRunner(verbosity=2).run(suite)
| 29.644444 | 92 | 0.613943 |
b01ead4c68269eedb233e679f59c48eb110ed041 | 1,518 | py | Python | recipes-bsp/b205/files/spihost_write_ftdi.py | tszucs/meta-ublox-tk1 | 8cb7c83d9a8b387fae4a4108a48e697d3e94df8e | [
"MIT"
] | null | null | null | recipes-bsp/b205/files/spihost_write_ftdi.py | tszucs/meta-ublox-tk1 | 8cb7c83d9a8b387fae4a4108a48e697d3e94df8e | [
"MIT"
] | null | null | null | recipes-bsp/b205/files/spihost_write_ftdi.py | tszucs/meta-ublox-tk1 | 8cb7c83d9a8b387fae4a4108a48e697d3e94df8e | [
"MIT"
] | null | null | null | #!/usr/bin/python
import sys, getopt, os, time, array
from pyftdi.spi import SpiController
if __name__ == "__main__":
main (sys.argv[1:])
| 24.483871 | 147 | 0.675889 |
b01eb11332b52c82d114e9890278450ea72e51d6 | 3,845 | py | Python | PedestrianSlayer/MechanicalControl/NeuralNetwork.py | Viriliter/PedestrianSlayer | 4adbdc3d0ed60408e6422cdba01f017701d05069 | [
"MIT"
] | 2 | 2018-05-23T14:14:23.000Z | 2018-12-03T21:08:37.000Z | PedestrianSlayer/MechanicalControl/NeuralNetwork.py | Viriliter/PedestrianSlayer | 4adbdc3d0ed60408e6422cdba01f017701d05069 | [
"MIT"
] | null | null | null | PedestrianSlayer/MechanicalControl/NeuralNetwork.py | Viriliter/PedestrianSlayer | 4adbdc3d0ed60408e6422cdba01f017701d05069 | [
"MIT"
] | null | null | null | import numpy as np
| 34.954545 | 116 | 0.635111 |
b01f92f5f3f6a4f80aa7644a0330cdac5e27b92c | 1,405 | py | Python | tests/test_paramviewer.py | lnielsen/pyhf | 3d98dc445c384d2919a77b9af0a202e12343a707 | [
"Apache-2.0"
] | null | null | null | tests/test_paramviewer.py | lnielsen/pyhf | 3d98dc445c384d2919a77b9af0a202e12343a707 | [
"Apache-2.0"
] | null | null | null | tests/test_paramviewer.py | lnielsen/pyhf | 3d98dc445c384d2919a77b9af0a202e12343a707 | [
"Apache-2.0"
] | null | null | null | import pyhf
from pyhf.parameters import ParamViewer
| 31.222222 | 86 | 0.605694 |
b0204523055a99ef60f353c69bef13df582957e8 | 15,276 | py | Python | library/modules/encoder_decoders/sequence_to_sequence.py | dangitstam/le-traducteur | 499005ac198029fd2a7e7469fb250b8b3af6a619 | [
"Apache-2.0"
] | 6 | 2018-10-23T10:05:55.000Z | 2020-08-30T13:04:51.000Z | library/modules/encoder_decoders/sequence_to_sequence.py | dangitstam/le-traducteur | 499005ac198029fd2a7e7469fb250b8b3af6a619 | [
"Apache-2.0"
] | 1 | 2018-08-20T21:58:33.000Z | 2020-12-29T17:44:04.000Z | library/modules/encoder_decoders/sequence_to_sequence.py | dangitstam/le-traducteur | 499005ac198029fd2a7e7469fb250b8b3af6a619 | [
"Apache-2.0"
] | 1 | 2022-03-26T05:13:38.000Z | 2022-03-26T05:13:38.000Z | from typing import Dict, Optional, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from allennlp.common.checks import ConfigurationError
from allennlp.common.util import START_SYMBOL, END_SYMBOL
from allennlp.data.vocabulary import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import FeedForward, Seq2SeqEncoder, TextFieldEmbedder
from allennlp.modules.attention import BilinearAttention
from allennlp.modules.token_embedders import Embedding
from allennlp.nn import InitializerApplicator, RegularizerApplicator, util
from overrides import overrides
# This is largely based on AllenNLP's general Seq2Seq encoder-decoder:
# https://github.com/allenai/allennlp/blob/master/allennlp/models/encoder_decoders/simple_seq2seq.py
#
# but offers more flexibility. Maybe I'll subclass this module when they've addressed their TODOs.
# TODO: Add more asserts so people don't do dumb shit
# TODO: Better docstrings.
| 48.805112 | 106 | 0.645719 |
b021c9112da0b09c0383564d4213787ef0cf3187 | 1,372 | py | Python | hrv/filters.py | LegrandNico/hrv | 35cdd1b7ddf8afdebf2db91f982b256c3b9dbf67 | [
"BSD-3-Clause"
] | 1 | 2020-01-06T20:08:04.000Z | 2020-01-06T20:08:04.000Z | hrv/filters.py | LegrandNico/hrv | 35cdd1b7ddf8afdebf2db91f982b256c3b9dbf67 | [
"BSD-3-Clause"
] | null | null | null | hrv/filters.py | LegrandNico/hrv | 35cdd1b7ddf8afdebf2db91f982b256c3b9dbf67 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from hrv.rri import RRi
from hrv.utils import _create_time_info
| 25.886792 | 79 | 0.626093 |
b02365bd68f389ec1ac4453e0ddfb053b1f457d4 | 20,428 | py | Python | PVPlugins/PVGeo_UBC_Tools.py | jkulesza/PVGeo | c7bdbad5e5e5579033e1b00605d680b67252b3f4 | [
"BSD-3-Clause"
] | 1 | 2020-06-09T16:49:28.000Z | 2020-06-09T16:49:28.000Z | PVPlugins/PVGeo_UBC_Tools.py | jkulesza/PVGeo | c7bdbad5e5e5579033e1b00605d680b67252b3f4 | [
"BSD-3-Clause"
] | null | null | null | PVPlugins/PVGeo_UBC_Tools.py | jkulesza/PVGeo | c7bdbad5e5e5579033e1b00605d680b67252b3f4 | [
"BSD-3-Clause"
] | null | null | null | paraview_plugin_version = '1.1.39'
# This is module to import. It provides VTKPythonAlgorithmBase, the base class
# for all python-based vtkAlgorithm subclasses in VTK and decorators used to
# 'register' the algorithm with ParaView along with information about UI.
from paraview.util.vtkAlgorithm import *
# Helpers:
from PVGeo import _helpers
# Classes to Decorate
from PVGeo.ubc import *
#### GLOBAL VARIABLES ####
MENU_CAT = 'PVGeo: UBC Mesh Tools'
#------------------------------------------------------------------------------
# Read OcTree Mesh
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Write Tensor Mesh
#------------------------------------------------------------------------------
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
| 42.558333 | 221 | 0.684012 |
b023ba4b1780ce639f98fb2247c460ffe792c1f6 | 20,333 | py | Python | tests/rewards_tree/test_rewards_flow.py | shuklaayush/badger-system | 1274eadbd0b0f3a02efbf40702719ce1d0a96c44 | [
"MIT"
] | 99 | 2020-12-02T08:40:48.000Z | 2022-03-15T05:21:06.000Z | tests/rewards_tree/test_rewards_flow.py | shuklaayush/badger-system | 1274eadbd0b0f3a02efbf40702719ce1d0a96c44 | [
"MIT"
] | 115 | 2020-12-15T07:15:39.000Z | 2022-03-28T22:21:03.000Z | tests/rewards_tree/test_rewards_flow.py | shuklaayush/badger-system | 1274eadbd0b0f3a02efbf40702719ce1d0a96c44 | [
"MIT"
] | 56 | 2020-12-11T06:50:04.000Z | 2022-02-21T09:17:38.000Z | import json
import secrets
import brownie
from dotmap import DotMap
import pytest
import pprint
from brownie import *
from helpers.constants import *
from helpers.registry import registry
from rich.console import Console
FARM_ADDRESS = "0xa0246c9032bC3A600820415aE600c6388619A14D"
XSUSHI_ADDRESS = "0x8798249c2E607446EfB7Ad49eC89dD1865Ff4272"
SECS_PER_HOUR = 3600
SECS_PER_DAY = 86400
console = Console()
# @pytest.fixture(scope="function")
# def setup_badger(badger_tree_unit):
# return badger_tree_unit
def random_32_bytes():
return "0x" + secrets.token_hex(32)
# generates merkle root purely off dummy data
# @pytest.mark.skip()
| 31.137825 | 119 | 0.59278 |
b0249f5db53b2ce54527df608f97d99c1010a240 | 23,869 | py | Python | HCm-uv/HCm-UV_v4.11/HCm-UV_v4.11.py | Borja-Perez-Diaz/HII-CHI-Mistry | d0dafc753c63246bf14b77807a885ddc7bd4bb99 | [
"MIT"
] | null | null | null | HCm-uv/HCm-UV_v4.11/HCm-UV_v4.11.py | Borja-Perez-Diaz/HII-CHI-Mistry | d0dafc753c63246bf14b77807a885ddc7bd4bb99 | [
"MIT"
] | null | null | null | HCm-uv/HCm-UV_v4.11/HCm-UV_v4.11.py | Borja-Perez-Diaz/HII-CHI-Mistry | d0dafc753c63246bf14b77807a885ddc7bd4bb99 | [
"MIT"
] | null | null | null | # Filename: HCm_UV_v4.11.py
import string
import numpy as np
import sys
#sys.stderr = open('errorlog.txt', 'w')
#Function for interpolation of grids
print (' ---------------------------------------------------------------------')
print (' This is HII-CHI-mistry for UV version 4.11')
print (' See Perez-Montero, & Amorin (2017) for details')
print ( ' Insert the name of your input text file with some or all of the following columns:')
print (' Lya 1216, CIV 1549, HeII 1640, OIII 1665, CIII 1909, Hb 4861, OIII 5007')
print ('in arbitrary units and reddening corrected. Each column must be given')
print ('with labels and followed by its corresponding flux error.')
print ('---------------------------------------------------------------------')
# Input file reading
if len(sys.argv) == 1:
if int(sys.version[0]) < 3:
input00 = raw_input('Insert input file name:')
else:
input00 = input('Insert input file name:')
else:
input00 = str(sys.argv[1])
try:
input0 = np.genfromtxt(input00,dtype=None,names=True, encoding = 'ascii')
print ('The input file is:'+input00)
except:
print ('Input file error: It does not exist or has wrong format')
sys.exit
print ('')
if input0.size == 1:
input1 = np.stack((input0,input0))
else:
input1 = input0
# Iterations for Montecarlo error derivation
if len(sys.argv) < 3:
n = 25
else:
n = int(sys.argv[2])
print ('The number of iterations for MonteCarlo simulation is: ',n)
print ('')
# Reading of models grids. These can be changed
print ('')
question = True
while question:
print('-------------------------------------------------')
print ('(1) POPSTAR with Chabrier IMF, age = 1 Myr')
print ('(2) BPASS v.2.1 a_IMF = 1.35, Mup = 300, age = 1Myr')
print('-------------------------------------------------')
if int(sys.version[0]) < 3:
sed = raw_input('Choose SED of the models:')
else:
sed = input('Choose SED of the models:')
if sed == '1' or sed == '2' : question = False
print ('')
question = True
while question:
if int(sys.version[0]) < 3:
inter = raw_input('Choose models [0] No interpolated [1] Interpolated: ')
else:
inter = input('Choose models [0] No interpolated [1] Interpolated: ')
if inter == '0' or inter == '1': question = False
print ('')
sed = int(sed)
inter = int(inter)
if sed==1 :
grid1 = np.loadtxt('C17_popstar_uv_v4.0.dat')
grid2 = np.loadtxt('C17_popstar_logU_adapted_emp_uv_v4.0.dat')
grid3 = np.loadtxt('C17_popstar_logU-CO_adapted_emp_uv_v4.0.dat')
if inter == 0:
sed_type = 'POPSTAR, age = 1 Myr, Chabrier IMF. No interpolation'
print ('No interpolation for the POPSTAR models is going to be used.')
print ('The grid has a resolution of 0.1dex for O/H and 0.125dex for C/O')
res_CO = 0.125
elif inter == 1:
sed_type = 'POPSTAR, age = 1 Myr, Chabrier IMF interpolated'
print ('Interpolation for the POPSTAR models is going to be used.')
print ('The grid has a resolution of 0.01 dex for O/H and 0.0125 dex for C/O')
res_CO = 0.125
elif sed==2:
grid1 = np.loadtxt('C17_bpass_uv_v4.1.dat')
grid2 = np.loadtxt('C17_bpass_logU_adapted_emp_uv_v4.1.dat')
grid3 = np.loadtxt('C17_bpass_logU-CO_adapted_emp_uv_v4.1.dat')
if inter == 0:
sed_type = 'BPASS a_IMF = 1.35, M_up = 300, age = 1Myr. No interpolation'
print ('No interpolation for theBPASS models is going to be used.')
print ('The grid has a resolution of 0.1 dex for O/H and 0.125 dex for N/O')
res_CO = 0.125
elif inter == 1:
sed_type = 'BPASS a_IMF = 1.35, M_up = 300, age = 1Myr interpolated'
print ('Interpolation for theBPASS models is going to be used.')
print ('The grid has a resolution of 0.01 dex for O/H and 0.0125 dex for N/O')
res_CO = 0.125
grids = []
OHffs = []
eOHffs = []
COffs = []
eCOffs = []
logUffs = []
elogUffs = []
Label_ID = False
Label_Lya = False
Label_eLya = False
Label_CIV = False
Label_eCIV = False
Label_HeII = False
Label_eHeII = False
Label_OIII_1665 = False
Label_eOIII_1665 = False
Label_CIII = False
Label_eCIII = False
Label_OIII_5007 = False
Label_eOIII_5007 = False
Label_Hbeta = False
Label_eHbeta = False
for col in range(0,len(input1.dtype.names),1):
if input1.dtype.names[col] == 'ID':
Label_ID = True
if input1.dtype.names[col] == 'Lya_1216':
Label_Lya = True
if input1.dtype.names[col] == 'eLya_1216':
Label_eLya = True
if input1.dtype.names[col] == 'CIV_1549':
Label_CIV = True
if input1.dtype.names[col] == 'eCIV_1549':
Label_eCIV = True
if input1.dtype.names[col] == 'HeII_1640':
Label_HeII = True
if input1.dtype.names[col] == 'eHeII_1640':
Label_eHeII = True
if input1.dtype.names[col] == 'OIII_1665':
Label_OIII_1665 = True
if input1.dtype.names[col] == 'eOIII_1665':
Label_eOIII_1665 = True
if input1.dtype.names[col] == 'CIII_1909':
Label_CIII = True
if input1.dtype.names[col] == 'eCIII_1909':
Label_eCIII = True
if input1.dtype.names[col] == 'Hb_4861':
Label_Hbeta = True
if input1.dtype.names[col] == 'eHb_4861':
Label_eHbeta = True
if input1.dtype.names[col] == 'OIII_5007':
Label_OIII_5007 = True
if input1.dtype.names[col] == 'eOIII_5007':
Label_eOIII_5007 = True
if Label_ID == False:
Names = np.arange(1,input1.size+1,1)
else:
Names = input1['ID']
if Label_Lya == False:
Lya_1216 = np.zeros(input1.size)
else:
Lya_1216 = input1['Lya_1216']
if Label_eLya == False:
eLya_1216 = np.zeros(input1.size)
else:
eLya_1216 = input1['eLya_1216']
if Label_CIV == False:
CIV_1549 = np.zeros(input1.size)
else:
CIV_1549 = input1['CIV_1549']
if Label_eCIV == False:
eCIV_1549 = np.zeros(input1.size)
else:
eCIV_1549 = input1['eCIV_1549']
if Label_HeII == False:
HeII_1640 = np.zeros(input1.size)
else:
HeII_1640 = input1['HeII_1640']
if Label_eHeII == False:
eHeII_1640 = np.zeros(input1.size)
else:
eHeII_1640 = input1['eHeII_1640']
if Label_OIII_1665 == False:
OIII_1665 = np.zeros(input1.size)
else:
OIII_1665 = input1['OIII_1665']
if Label_eOIII_1665 == False:
eOIII_1665 = np.zeros(input1.size)
else:
eOIII_1665 = input1['eOIII_1665']
if Label_CIII == False:
CIII_1909 = np.zeros(input1.size)
else:
CIII_1909 = input1['CIII_1909']
if Label_eCIII == False:
eCIII_1909 = np.zeros(input1.size)
else:
eCIII_1909 = input1['eCIII_1909']
if Label_Hbeta == False:
Hb_4861 = np.zeros(len(input1))
else:
Hb_4861 = input1['Hb_4861']
if Label_eHbeta == False:
eHb_4861 = np.zeros(input1.size)
else:
eHb_4861 = input1['eHb_4861']
if Label_OIII_5007 == False:
OIII_5007 = np.zeros(input1.size)
else:
OIII_5007 = input1['OIII_5007']
if Label_eOIII_5007 == False:
eOIII_5007 = np.zeros(input1.size)
else:
eOIII_5007 = input1['eOIII_5007']
output = np.zeros(input1.size, dtype=[('ID', 'U12'), ('Lya_1216', float),('eLya_1216', float),('CIV_1549', float),('eCIV_1549', float),('HeII_1640', float),('eHeII_1640', float),('OIII_1665', float),('eOIII_1665', float),('CIII_1909', float),('eCIII_1909', float),('Hb_4861', float),('eHb_4861', float),('OIII_5007', float),('eOIII_5007', float),('grid', int),('OH', float),('eOH', float),('CO', float),('eCO', float),('logU', float),('elogU', float)] )
output['ID'] = Names
output['Lya_1216'] = Lya_1216
output['eLya_1216'] = eLya_1216
output['CIV_1549'] = CIV_1549
output['eCIV_1549'] = eCIV_1549
output['HeII_1640'] = HeII_1640
output['eHeII_1640'] = eHeII_1640
output['OIII_1665'] = OIII_1665
output['eOIII_1665'] = eOIII_1665
output['CIII_1909'] = CIII_1909
output['eCIII_1909'] = eCIII_1909
output['Hb_4861'] = Hb_4861
output['eHb_4861'] = eHb_4861
output['OIII_5007'] = OIII_5007
output['eOIII_5007'] = eOIII_5007
print ('Reading grids ....')
print ('')
print ('')
print ('----------------------------------------------------------------')
print ('(%) ID Grid 12+log(O/H) log(C/O) log(U)')
print ('-----------------------------------------------------------------')
# Beginning of loop of calculation
count = 0
for tab in range(0,len(input1),1):
count = count + 1
OH_mc = []
CO_mc = []
logU_mc = []
OHe_mc = []
COe_mc = []
logUe_mc = []
for monte in range(0,n,1):
OH_p = 0
logU_p = 0
CO_p = 0
den_OH = 0
den_CO = 0
OH_e = 0
CO_e = 0
logU_e = 0
den_OH_e = 0
den_CO_e = 0
tol_max = 1e2
Lya_1216_obs = 0
if Lya_1216[tab] == 0:
Lya_1216_obs = 0
else:
while Lya_1216_obs <= 0:
Lya_1216_obs = np.random.normal(Lya_1216[tab],eLya_1216[tab]+1e-5)
CIV_1549_obs = 0
if CIV_1549[tab] == 0:
CIV_1549_obs = 0
else:
while CIV_1549_obs <= 0:
CIV_1549_obs = np.random.normal(CIV_1549[tab],eCIV_1549[tab]+1e-5)
HeII_1640_obs = 0
if HeII_1640[tab] == 0:
HeII_1640_obs = 0
else:
if HeII_1640_obs <= 0:
HeII_1640_obs = np.random.normal(HeII_1640[tab],eHeII_1640[tab]+1e-5)
OIII_1665_obs = 0
if OIII_1665[tab] == 0:
OIII_1665_obs = 0
else:
while OIII_1665_obs <= 0:
OIII_1665_obs = np.random.normal(OIII_1665[tab],eOIII_1665[tab]+1e-5)
CIII_1909_obs = 0
if CIII_1909[tab] == 0:
CIII_1909_obs = 0
else:
while CIII_1909_obs <= 0:
CIII_1909_obs = np.random.normal(CIII_1909[tab],eCIII_1909[tab]+1e-5)
Hb_4861_obs = 0
if Hb_4861[tab] == 0:
Hb_4861_obs = 0
else:
while Hb_4861_obs <= 0:
Hb_4861_obs = np.random.normal(Hb_4861[tab],eHb_4861[tab]+1e-5)
OIII_5007_obs = 0
if OIII_5007[tab] == 0:
OIII_5007_obs = 0
else:
while OIII_5007_obs <= 0:
OIII_5007_obs = np.random.normal(OIII_5007[tab],eOIII_5007[tab]+1e-5)
if OIII_1665_obs == 0 or OIII_5007_obs == 0:
ROIII_obs = 0
else:
ROIII_obs = OIII_5007_obs/OIII_1665_obs
if Lya_1216_obs == 0 or CIII_1909_obs == 0:
C34_obs = 0
else:
C34_obs = (CIII_1909_obs + CIV_1549_obs) / (Lya_1216_obs)
if HeII_1640_obs == 0 or CIII_1909_obs == 0:
C34He2_obs = 0
else:
C34He2_obs = (CIII_1909_obs + CIV_1549_obs) / (HeII_1640_obs)
if CIII_1909_obs == 0 or OIII_1665_obs == 0:
C3O3_obs = -10
else:
C3O3_obs = np.log10((CIII_1909_obs) / (OIII_1665_obs))
if CIII_1909_obs == 0 or CIV_1549_obs == 0:
C3C4_obs = 0
else:
C3C4_obs = (CIII_1909_obs/CIV_1549_obs)
if CIII_1909_obs == 0 or Hb_4861_obs == 0:
C34Hb_obs = 0
else:
C34Hb_obs = (CIII_1909_obs + CIV_1549_obs) / Hb_4861_obs
# Selection of grid
if OIII_1665[tab] > 0 and OIII_5007[tab] > 0:
grid = grid1
if monte == n-1: grids.append(1)
grid_type = 1
elif OIII_1665[tab] > 0 and CIII_1909[tab] > 0:
grid = grid2
if monte == n-1: grids.append(2)
grid_type = 2
else:
grid = grid3
if monte == n-1: grids.append(3)
grid_type = 3
# Calculation of C/O
if C3O3_obs == -10:
CO = -10
else:
CHI_ROIII = 0
CHI_C3O3 = 0
CHI_CO = 0
for index in grid:
if ROIII_obs == 0:
CHI_ROIII = 0
elif index[6] == 0 or index[8] == 0:
CHI_ROIII = tol_max
else:
CHI_ROIII = (index[8]/index[6] - ROIII_obs)**2/(index[8]/index[6])
if C3O3_obs == -10:
CHI_C3O3 = 0
elif index[7] == 0 or index[6] == 0:
CHI_C3O3 = tol_max
else:
CHI_C3O3 =(np.log10((index[7])/index[6]) - C3O3_obs)**2/np.log10((index[7])/(index[6]+1e-5))
CHI_CO = (CHI_ROIII**2 + CHI_C3O3**2 )**0.5
if CHI_CO == 0:
CO_p = CO_p
den_CO = den_CO
else:
CO_p = index[1] /np.exp(CHI_CO) + CO_p
den_CO = 1 / np.exp(CHI_CO) + den_CO
CO = CO_p / den_CO
# Calculation of C/O error
if C3O3_obs == -10:
eCO = 0
else:
CHI_ROIII = 0
CHI_C3O3 = 0
CHI_CO = 0
for index in grid:
if ROIII_obs == 0:
CHI_ROIII = 0
elif index[6] == 0 or index[8] == 0:
CHI_ROIII = tol_max
else:
CHI_ROIII = (index[8]/index[6] - ROIII_obs)**2/(index[8]/index[6])
if C3O3_obs == -10:
CHI_C3O3 = 0
elif index[7] == 0 or index[6] == 0:
CHI_C3O3 = tol_max
else:
CHI_C3O3 =(np.log10((index[7])/index[6]) - C3O3_obs)**2/np.log10((index[7])/(index[6]+1e-5))
CHI_CO = (CHI_ROIII**2 + CHI_C3O3**2 )**0.5
if CHI_CO == 0:
CO_e = CO_e
den_CO_e = den_CO_e
else:
CO_e = (index[1] - CO)**2 / np.exp(CHI_CO) + CO_e
den_CO_e = 1 /np.exp(CHI_CO) + den_CO_e
eCO = CO_e / den_CO_e
# Calculation of O/H and log U
if C34_obs == 0 and ROIII_obs == 0 and C34Hb_obs == 0 and C34He2_obs == 0 :
OH = 0
logU = 0
else:
CHI_ROIII = 0
CHI_C3C4 = 0
CHI_C34He2 = 0
CHI_C34 = 0
CHI_C34Hb = 0
CHI_OH = 0
for index in grid:
if CO > -10 and np.abs(index[1] - CO) > np.abs(eCO+0.125):
continue
if CIV_1549_obs > 0 and index[4] == 0:
continue
if HeII_1640_obs > 0 and index[5] == 0:
continue
else:
if ROIII_obs == 0:
CHI_ROIII = 0
elif index[6] == 0 or index[8] == 0:
CHI_ROIII = tol_max
else:
CHI_ROIII = (index[8]/index[6] - ROIII_obs)**2/(index[8]/index[6])
if C34_obs == 0:
CHI_C34 = 0
elif index[3] == 0 or index[7] == 0:
CHI_C34 = tol_max
else:
CHI_C34 = ((index[7]+index[4])/index[3] - C34_obs)**2/((index[7]+index[4])/index[3])
if C34He2_obs == 0:
CHI_C34He2 = 0
elif index[5] == 0 or index[7] == 0:
CHI_C34He2 = tol_max
else:
CHI_C34He2 = ((index[7]+index[4])/index[5] - C34He2_obs)**2/((index[7]+index[4])/index[5])
if C34Hb_obs == 0:
CHI_C34Hb = 0
elif index[7] == 0:
CHI_C34Hb = tol_max
else:
CHI_C34Hb = (index[7]+index[4] - C34Hb_obs)**2/(index[7]+index[4])
if C3C4_obs == 0:
CHI_C3C4 = 0
elif index[4] == 0 or index[7] == 0:
CHI_C3C4 = tol_max
else:
CHI_C3C4 = (index[7]/index[4] - C3C4_obs)**2/(index[7]/index[4])
if C34Hb_obs > 0:
CHI_OH = (CHI_ROIII**2 + CHI_C34Hb**2 + CHI_C3C4**2)**0.5
else:
CHI_OH = (CHI_ROIII**2 + CHI_C34**2 + CHI_C34He2**2 + CHI_C3C4**2 )**0.5
if CHI_OH == 0:
OH_p = OH_p
logU_p = logU_p
den_OH = den_OH
else:
OH_p = index[0] / np.exp(CHI_OH) + OH_p
logU_p = index[2] / np.exp(CHI_OH) + logU_p
den_OH = 1 /np.exp(CHI_OH) + den_OH
OH = OH_p / den_OH
logU = logU_p / den_OH
# Calculation of error of O/H and logU
if C34_obs == 0 and ROIII_obs == 0 and C34Hb_obs == 0 and C34He2_obs == 0:
eOH = 0
elogU = 0
else:
CHI_ROIII = 0
CHI_C3C4 = 0
CHI_C34 = 0
CHI_C34He2 = 0
CHI_C34Hb = 0
CHI_OH = 0
for index in grid:
if CO > -10 and np.abs(index[1] - CO) > np.abs(eCO+res_CO):
continue
if CIV_1549_obs > 0 and index[4] == 0:
continue
if HeII_1640_obs > 0 and index[5] == 0:
continue
else:
if ROIII_obs == 0:
CHI_ROIII = 0
elif index[6] == 0 or index[8] == 0:
CHI_ROIII = tol_max
else:
CHI_ROIII = (index[8]/index[6] - ROIII_obs)**2/(index[8]/index[6])
if C34_obs == 0:
CHI_C34 = 0
elif index[3] == 0 or index[7] == 0:
CHI_C34 = tol_max
else:
CHI_C34 = ((index[7]+index[4])/index[3] - C34_obs)**2/((index[7]+index[4])/index[3])
if C34He2_obs == 0:
CHI_C34He2 = 0
elif index[5] == 0 or index[7] == 0:
CHI_C34He2 = tol_max
else:
CHI_C34He2 = ((index[7]+index[4])/index[5] - C34He2_obs)**2/((index[7]+index[4])/index[5])
if C34Hb_obs == 0:
CHI_C34Hb = 0
elif index[7] == 0:
CHI_C34Hb = tol_max
else:
CHI_C34Hb = (index[7]+index[4] - C34Hb_obs)**2/(index[7]+index[4])
if C3C4_obs == 0:
CHI_C3C4 = 0
elif index[4] == 0 or index[7] == 0:
CHI_C3C4 = tol_max
else:
CHI_C3C4 = (index[7]/index[4] - C3C4_obs)**2/(index[7]/index[4])
if C34Hb_obs > 0:
CHI_OH = (CHI_ROIII**2 + CHI_C34Hb**2 + CHI_C3C4**2)**0.5
else:
CHI_OH = (CHI_ROIII**2 + CHI_C34**2 + CHI_C34He2**2 + CHI_C3C4**2 )**0.5
if CHI_OH == 0:
OH_e = OH_e
logU_e = logU_e
den_OH_e = den_OH_e
else:
OH_e = (index[0] - OH)**2 /np.exp(CHI_OH) + OH_e
logU_e = (index[2] - logU)**2 /np.exp(CHI_OH) + logU_e
den_OH_e = 1 /np.exp(CHI_OH) + den_OH_e
eOH = OH_e / den_OH_e
elogU = logU_e / den_OH_e
# Iterations for interpolated models
if inter == 0 or (OH == 0 and CO == -10):
COf = CO
OHf = OH
logUf = logU
elif inter == 1:
if OH == 0:
igrid = grid
else:
igrid = interpolate(grid,2,logU-elogU-0.25,logU+elogU+0.25,10)
igrid = igrid[np.lexsort((igrid[:,1],igrid[:,2]))]
igrid = interpolate(igrid,0,OH-eOH-0.1,OH+eOH+0.1,10)
if CO == -10:
igrid = igrid
else:
igrid = igrid[np.lexsort((igrid[:,0],igrid[:,2]))]
igrid = interpolate(igrid,1,CO-eCO-0.125,CO+eCO+0.125,10)
CHI_ROIII = 0
CHI_C3O3 = 0
CHI_C3C4 = 0
CHI_C34He2 = 0
CHI_C34 = 0
CHI_C34Hb = 0
CHI_OH = 0
CHI_CO = 0
for index in igrid:
if ROIII_obs == 0:
CHI_ROIII = 0
elif index[6] == 0 or index[8] == 0:
CHI_ROIII = tol_max
else:
CHI_ROIII = (index[8]/index[6] - ROIII_obs)**2/(index[8]/index[6])
if C3O3_obs == -10:
CHI_C3O3 = 0
elif index[7] == 0 or index[6] == 0:
CHI_C3O3 = tol_max
else:
CHI_C3O3 =(np.log10((index[7])/index[6]) - C3O3_obs)**2/np.log10((index[7])/(index[6]+1e-5))
if C34_obs == 0:
CHI_C34 = 0
elif index[4] == 0:
CHI_C34 = tol_max
else:
CHI_C34 = ((index[6]+index[7])/index[3] - C34_obs)**2/((index[6]+index[7])/index[3])
if C34Hb_obs == 0:
CHI_C34Hb = 0
elif index[4] == 0:
CHI_C34Hb = tol_max
else:
CHI_C34Hb = (index[6]+index[7] - C34_obs)**2/(index[6]+index[7])
if C3C4_obs == 0:
CHI_C3C4 = 0
elif index[7] == 0 or index[6] == 0:
CHI_C3C4 = tol_max
else:
CHI_C3C4 = (index[6]/index[7] - C3C4_obs)**2/(index[6]/index[7])
if C34Hb_obs > 0:
CHI_OH = (CHI_ROIII**2 + CHI_C34Hb**2 + CHI_C3C4**2)**0.5
else:
CHI_OH = (CHI_ROIII**2 + CHI_C34**2 + CHI_C34He2**2 + CHI_C3C4**2 )**0.5
if CHI_OH == 0:
OH_p = OH_p
logU_p = logU_p
den_OH = den_OH
else:
OH_p = index[0] /np.exp(CHI_OH) + OH_p
logU_p = index[2] /np.exp(CHI_OH) + logU_p
den_OH = 1 /np.exp(CHI_OH) + den_OH
CHI_CO = (CHI_ROIII**2 + CHI_C3O3**2 )**0.5
if CHI_CO == 0:
CO_p = CO_p
den_CO = den_CO
else:
CO_p = index[1] /np.exp(CHI_CO)**2 + CO_p
den_CO = 1 /np.exp(CHI_CO)**2 + den_CO
if CO == -10:
COf = -10
else:
COf = CO_p / den_CO
if OH == 0:
OHf = 0
logUf = 0
else:
OHf = OH_p / den_OH
logUf = logU_p / den_OH
OH_mc.append(OHf)
CO_mc.append(COf)
logU_mc.append(logUf)
OHe_mc.append(eOH)
COe_mc.append(eCO)
logUe_mc.append(elogU)
OHff = np.mean(OH_mc)
eOHff = (np.std(OH_mc)**2+np.mean(OHe_mc)**2)**0.5
COff = np.mean(CO_mc)
eCOff = (np.std(CO_mc)**2+np.mean(COe_mc)**2)**0.5
logUff = np.mean(logU_mc)
elogUff = (np.std(logU_mc)**2+np.mean(logUe_mc)**2)**0.5
OHffs.append(OHff)
eOHffs.append(eOHff)
COffs.append(COff)
eCOffs.append(eCOff)
logUffs.append(logUff)
elogUffs.append(elogUff)
if input0.size == 1 and tab==0: continue
print (round(100*(count)/float(input1.size),1),'%',Names[tab],grid_type,'', round(OHff,2), round(eOHff,2),'',round(COff,2), round(eCOff,2), '',round(logUff,2), round(elogUff,2))
output['grid'] = grids
output['OH'] = OHffs
output['eOH'] = eOHffs
output['CO'] = COffs
output['eCO'] = eCOffs
output['logU'] = logUffs
output['elogU'] = elogUffs
if input0.size == 1: output = np.delete(output,obj=1,axis=0)
lineas_header = [' HII-CHI-mistry_UV v.4.11 output file', 'Input file:'+input00,'Iterations for MonteCarlo: '+str(n),'Used models: '+sed_type,'','ID. Lya eLya 1549 e1549 1640 e1640 1665 e1665 1909 e1909 Hbeta eHbeta 5007 e5007 i O/H eO/H C/O eC/O logU elogU']
header = '\n'.join(lineas_header)
np.savetxt(input00+'_hcm-uv-output.dat',output,fmt=' '.join(['%s']*1+['%.3f']*14+['%i']+['%.2f']*6),header=header)
print ('________________________________')
print ('Results are stored in '+input00+'_hcm-uv-output.dat')
| 30.759021 | 453 | 0.526541 |
b026bd7b3b263fb2129be4259af4f24d57934ce8 | 20,746 | py | Python | Name Generator/names.py | Rakkerrak/Projects | 0a9bc54b7d41e69b444165f60254262a163509a9 | [
"MIT"
] | null | null | null | Name Generator/names.py | Rakkerrak/Projects | 0a9bc54b7d41e69b444165f60254262a163509a9 | [
"MIT"
] | null | null | null | Name Generator/names.py | Rakkerrak/Projects | 0a9bc54b7d41e69b444165f60254262a163509a9 | [
"MIT"
] | null | null | null | feFirst = ['Emma', 'Olivia', 'Ava', 'Isabella', 'Sophia', 'Charlotte', 'Mia', 'Amelia', 'Harper', 'Evelyn', 'Abigail', 'Emily', 'Elizabeth', 'Mila', 'Ella', 'Avery', 'Sofia', 'Camila', 'Aria', 'Scarlett', 'Victoria', 'Madison', 'Luna', 'Grace', 'Chloe', 'Penelope', 'Layla', 'Riley', 'Zoey', 'Nora', 'Lily', 'Eleanor', 'Hannah', 'Lillian', 'Addison', 'Aubrey', 'Ellie', 'Stella', 'Natalie', 'Zoe', 'Leah', 'Hazel', 'Violet', 'Aurora', 'Savannah', 'Audrey', 'Brooklyn', 'Bella', 'Claire', 'Skylar', 'Lucy', 'Paisley', 'Everly', 'Anna', 'Caroline', 'Nova', 'Genesis', 'Emilia', 'Kennedy', 'Samantha', 'Maya', 'Willow', 'Kinsley', 'Naomi', 'Aaliyah', 'Elena', 'Sarah', 'Ariana', 'Allison', 'Gabriella', 'Alice', 'Madelyn', 'Cora', 'Ruby', 'Eva', 'Serenity', 'Autumn', 'Adeline', 'Hailey', 'Gianna', 'Valentina', 'Isla', 'Eliana', 'Quinn', 'Nevaeh', 'Ivy', 'Sadie', 'Piper', 'Lydia', 'Alexa', 'Josephine', 'Emery', 'Julia', 'Delilah', 'Arianna', 'Vivian', 'Kaylee', 'Sophie', 'Brielle', 'Madeline', 'Peyton', 'Rylee', 'Clara', 'Hadley', 'Melanie', 'Mackenzie', 'Reagan', 'Adalynn', 'Liliana', 'Aubree', 'Jade', 'Katherine', 'Isabelle', 'Natalia', 'Raelynn', 'Maria', 'Athena', 'Ximena', 'Arya', 'Leilani', 'Taylor', 'Faith', 'Rose', 'Kylie', 'Alexandra', 'Mary', 'Margaret', 'Lyla', 'Ashley', 'Amaya', 'Eliza', 'Brianna', 'Bailey', 'Andrea', 'Khloe', 'Jasmine', 'Melody', 'Iris', 'Isabel', 'Norah', 'Annabelle', 'Valeria', 'Emerson', 'Adalyn', 'Ryleigh', 'Eden', 'Emersyn', 'Anastasia', 'Kayla', 'Alyssa', 'Juliana', 'Charlie', 'Esther', 'Ariel', 'Cecilia', 'Valerie', 'Alina', 'Molly', 'Reese', 'Aliyah', 'Lilly', 'Parker', 'Finley', 'Morgan', 'Sydney', 'Jordyn', 'Eloise', 'Trinity', 'Daisy', 'Kimberly', 'Lauren', 'Genevieve', 'Sara', 'Arabella', 'Harmony', 'Elise', 'Remi', 'Teagan', 'Alexis', 'London', 'Sloane', 'Laila', 'Lucia', 'Diana', 'Juliette', 'Sienna', 'Elliana', 'Londyn', 'Ayla', 'Callie', 'Gracie', 'Josie', 'Amara', 'Jocelyn', 'Daniela', 'Everleigh', 'Mya', 'Rachel', 'Summer', 'Alana', 'Brooke', 'Alaina', 'Mckenzie', 'Catherine', 'Amy', 'Presley', 'Journee', 'Rosalie', 'Ember', 'Brynlee', 'Rowan', 'Joanna', 'Paige', 'Rebecca', 'Ana', 'Sawyer', 'Mariah', 'Nicole', 'Brooklynn', 'Payton', 'Marley', 'Fiona', 'Georgia', 'Lila', 'Harley', 'Adelyn', 'Alivia', 'Noelle', 'Gemma', 'Vanessa', 'Journey', 'Makayla', 'Angelina', 'Adaline', 'Catalina', 'Alayna', 'Julianna', 'Leila', 'Lola', 'Adriana', 'June', 'Juliet', 'Jayla', 'River', 'Tessa', 'Lia', 'Dakota', 'Delaney', 'Selena', 'Blakely', 'Ada', 'Camille', 'Zara', 'Malia', 'Hope', 'Samara', 'Vera', 'Mckenna', 'Briella', 'Izabella', 'Hayden', 'Raegan', 'Michelle', 'Angela', 'Ruth', 'Freya', 'Kamila', 'Vivienne', 'Aspen', 'Olive', 'Kendall', 'Elaina', 'Thea', 'Kali', 'Destiny', 'Amiyah', 'Evangeline', 'Cali', 'Blake', 'Elsie', 'Juniper', 'Alexandria', 'Myla', 'Ariella', 'Kate', 'Mariana', 'Lilah', 'Charlee', 'Daleyza', 'Nyla', 'Jane', 'Maggie', 'Zuri', 'Aniyah', 'Lucille', 'Leia', 'Melissa', 'Adelaide', 'Amina', 'Giselle', 'Lena', 'Camilla', 'Miriam', 'Millie', 'Brynn', 'Gabrielle', 'Sage', 'Annie', 'Logan', 'Lilliana', 'Haven', 'Jessica', 'Kaia', 'Magnolia', 'Amira', 'Adelynn', 'Makenzie', 'Stephanie', 'Nina', 'Phoebe', 'Arielle', 'Evie', 'Lyric', 'Alessandra', 'Gabriela', 'Paislee', 'Raelyn', 'Madilyn', 'Paris', 'Makenna', 'Kinley', 'Gracelyn', 'Talia', 'Maeve', 'Rylie', 'Kiara', 'Evelynn', 'Brinley', 'Jacqueline', 'Laura', 'Gracelynn', 'Lexi', 'Ariah', 'Fatima', 'Jennifer', 'Kehlani', 'Alani', 'Ariyah', 'Luciana', 'Allie', 'Heidi', 'Maci', 'Phoenix', 'Felicity', 'Joy', 'Kenzie', 'Veronica', 'Margot', 'Addilyn', 'Lana', 'Cassidy', 'Remington', 'Saylor', 'Ryan', 'Keira', 'Harlow', 'Miranda', 'Angel', 'Amanda', 'Daniella', 'Royalty', 'Gwendolyn', 'Ophelia', 'Heaven', 'Jordan', 'Madeleine', 'Esmeralda', 'Kira', 'Miracle', 'Elle', 'Amari', 'Danielle', 'Daphne', 'Willa', 'Haley', 'Gia', 'Kaitlyn', 'Oakley', 'Kailani', 'Winter', 'Alicia', 'Serena', 'Nadia', 'Aviana', 'Demi', 'Jada', 'Braelynn', 'Dylan', 'Ainsley', 'Alison', 'Camryn', 'Avianna', 'Bianca', 'Skyler', 'Scarlet', 'Maddison', 'Nylah', 'Sarai', 'Regina', 'Dahlia', 'Nayeli', 'Raven', 'Helen', 'Adrianna', 'Averie', 'Skye', 'Kelsey', 'Tatum', 'Kensley', 'Maliyah', 'Erin', 'Viviana', 'Jenna', 'Anaya', 'Carolina', 'Shelby', 'Sabrina', 'Mikayla', 'Annalise', 'Octavia', 'Lennon', 'Blair', 'Carmen', 'Yaretzi', 'Kennedi', 'Mabel', 'Zariah', 'Kyla', 'Christina', 'Selah', 'Celeste', 'Eve', 'Mckinley', 'Milani', 'Frances', 'Jimena', 'Kylee', 'Leighton', 'Katie', 'Aitana', 'Kayleigh', 'Sierra', 'Kathryn', 'Rosemary', 'Jolene', 'Alondra', 'Elisa', 'Helena', 'Charleigh', 'Hallie', 'Lainey', 'Avah', 'Jazlyn', 'Kamryn', 'Mira', 'Cheyenne', 'Francesca', 'Antonella', 'Wren', 'Chelsea', 'Amber', 'Emory', 'Lorelei', 'Nia', 'Abby', 'April', 'Emelia', 'Carter', 'Aylin', 'Cataleya', 'Bethany', 'Marlee', 'Carly', 'Kaylani', 'Emely', 'Liana', 'Madelynn', 'Cadence', 'Matilda', 'Sylvia', 'Myra', 'Fernanda', 'Oaklyn', 'Elianna', 'Hattie', 'Dayana', 'Kendra', 'Maisie', 'Malaysia', 'Kara', 'Katelyn', 'Maia', 'Celine', 'Cameron', 'Renata', 'Jayleen', 'Charli', 'Emmalyn', 'Holly', 'Azalea', 'Leona', 'Alejandra', 'Bristol', 'Collins', 'Imani', 'Meadow', 'Alexia', 'Edith', 'Kaydence', 'Leslie', 'Lilith', 'Kora', 'Aisha', 'Meredith', 'Danna', 'Wynter', 'Emberly', 'Julieta', 'Michaela', 'Alayah', 'Jemma', 'Reign', 'Colette', 'Kaliyah', 'Elliott', 'Johanna', 'Remy', 'Sutton', 'Emmy', 'Virginia', 'Briana', 'Oaklynn', 'Adelina', 'Everlee', 'Megan', 'Angelica', 'Justice', 'Mariam', 'Khaleesi', 'Macie', 'Karsyn', 'Alanna', 'Aleah', 'Mae', 'Mallory', 'Esme', 'Skyla', 'Madilynn', 'Charley', 'Allyson', 'Hanna', 'Shiloh', 'Henley', 'Macy', 'Maryam', 'Ivanna', 'Ashlynn', 'Lorelai', 'Amora', 'Ashlyn', 'Sasha', 'Baylee', 'Beatrice', 'Itzel', 'Priscilla', 'Marie', 'Jayda', 'Liberty', 'Rory', 'Alessia', 'Alaia', 'Janelle', 'Kalani', 'Gloria', 'Sloan', 'Dorothy', 'Greta', 'Julie', 'Zahra', 'Savanna', 'Annabella', 'Poppy', 'Amalia', 'Zaylee', 'Cecelia', 'Coraline', 'Kimber', 'Emmie', 'Anne', 'Karina', 'Kassidy', 'Kynlee', 'Monroe', 'Anahi', 'Jaliyah', 'Jazmin', 'Maren', 'Monica', 'Siena', 'Marilyn', 'Reyna', 'Kyra', 'Lilian', 'Jamie', 'Melany', 'Alaya', 'Ariya', 'Kelly', 'Rosie', 'Adley', 'Dream', 'Jaylah', 'Laurel', 'Jazmine', 'Mina', 'Karla', 'Bailee', 'Aubrie', 'Katalina', 'Melina', 'Harlee', 'Elliot', 'Hayley', 'Elaine', 'Karen', 'Dallas', 'Irene', 'Lylah', 'Ivory', 'Chaya', 'Rosa', 'Aleena', 'Braelyn', 'Nola', 'Alma', 'Leyla', 'Pearl', 'Addyson', 'Roselyn', 'Lacey', 'Lennox', 'Reina', 'Aurelia', 'Noa', 'Janiyah', 'Jessie', 'Madisyn', 'Saige', 'Alia', 'Tiana', 'Astrid', 'Cassandra', 'Kyleigh', 'Romina', 'Stevie', 'Haylee', 'Zelda', 'Lillie', 'Aileen', 'Brylee', 'Eileen', 'Yara', 'Ensley', 'Lauryn', 'Giuliana', 'Livia', 'Anya', 'Mikaela', 'Palmer', 'Lyra', 'Mara', 'Marina', 'Kailey', 'Liv', 'Clementine', 'Kenna', 'Briar', 'Emerie', 'Galilea', 'Tiffany', 'Bonnie', 'Elyse', 'Cynthia', 'Frida', 'Kinslee', 'Tatiana', 'Joelle', 'Armani', 'Jolie', 'Nalani', 'Rayna', 'Yareli', 'Meghan', 'Rebekah', 'Addilynn', 'Faye', 'Zariyah', 'Lea', 'Aliza', 'Julissa', 'Lilyana', 'Anika', 'Kairi', 'Aniya', 'Noemi', 'Angie', 'Crystal', 'Bridget', 'Ari', 'Davina', 'Amelie', 'Amirah', 'Annika', 'Elora', 'Xiomara', 'Linda', 'Hana', 'Laney', 'Mercy', 'Hadassah', 'Madalyn', 'Louisa', 'Simone', 'Kori', 'Jillian', 'Alena', 'Malaya', 'Miley', 'Milan', 'Sariyah', 'Malani', 'Clarissa', 'Nala', 'Princess', 'Amani', 'Analia', 'Estella', 'Milana', 'Aya', 'Chana', 'Jayde', 'Tenley', 'Zaria', 'Itzayana', 'Penny', 'Ailani', 'Lara', 'Aubriella', 'Clare', 'Lina', 'Rhea', 'Bria', 'Thalia', 'Keyla', 'Haisley', 'Ryann', 'Addisyn', 'Amaia', 'Chanel', 'Ellen', 'Harmoni', 'Aliana', 'Tinsley', 'Landry', 'Paisleigh', 'Lexie', 'Myah', 'Rylan', 'Deborah', 'Emilee', 'Laylah', 'Novalee', 'Ellis', 'Emmeline', 'Avalynn', 'Hadlee', 'Legacy', 'Braylee', 'Elisabeth', 'Kaylie', 'Ansley', 'Dior', 'Paula', 'Belen', 'Corinne', 'Maleah', 'Martha', 'Teresa', 'Salma', 'Louise', 'Averi', 'Lilianna', 'Amiya', 'Milena', 'Royal', 'Aubrielle', 'Calliope', 'Frankie', 'Natasha', 'Kamilah', 'Meilani', 'Raina', 'Amayah', 'Lailah', 'Rayne', 'Zaniyah', 'Isabela', 'Nathalie', 'Miah', 'Opal', 'Kenia', 'Azariah', 'Hunter', 'Tori', 'Andi', 'Keily', 'Leanna', 'Scarlette', 'Jaelyn', 'Saoirse', 'Selene', 'Dalary', 'Lindsey', 'Marianna', 'Ramona', 'Estelle', 'Giovanna', 'Holland', 'Nancy', 'Emmalynn', 'Mylah', 'Rosalee', 'Sariah', 'Zoie', 'Blaire', 'Lyanna', 'Maxine', 'Anais', 'Dana', 'Judith', 'Kiera', 'Jaelynn', 'Noor', 'Kai', 'Adalee', 'Oaklee', 'Amaris', 'Jaycee', 'Belle', 'Carolyn', 'Della', 'Karter', 'Sky', 'Treasure', 'Vienna', 'Jewel', 'Rivka', 'Rosalyn', 'Alannah', 'Ellianna', 'Sunny', 'Claudia', 'Cara', 'Hailee', 'Estrella', 'Harleigh', 'Zhavia', 'Alianna', 'Brittany', 'Jaylene', 'Journi', 'Marissa', 'Mavis', 'Iliana', 'Jurnee', 'Aislinn', 'Alyson', 'Elsa', 'Kamiyah', 'Kiana', 'Lisa', 'Arlette', 'Kadence', 'Kathleen', 'Halle', 'Erika', 'Sylvie', 'Adele', 'Erica', 'Veda', 'Whitney', 'Bexley', 'Emmaline', 'Guadalupe', 'August', 'Brynleigh', 'Gwen', 'Promise', 'Alisson', 'India', 'Madalynn', 'Paloma', 'Patricia', 'Samira', 'Aliya', 'Casey', 'Jazlynn', 'Paulina', 'Dulce', 'Kallie', 'Perla', 'Adrienne', 'Alora', 'Nataly', 'Ayleen', 'Christine', 'Kaiya', 'Ariadne', 'Karlee', 'Barbara', 'Lillianna', 'Raquel', 'Saniyah', 'Yamileth', 'Arely', 'Celia', 'Heavenly', 'Kaylin', 'Marisol', 'Marleigh', 'Avalyn', 'Berkley', 'Kataleya', 'Zainab', 'Dani', 'Egypt', 'Joyce', 'Kenley', 'Annabel', 'Kaelyn', 'Etta', 'Hadleigh', 'Joselyn', 'Luella', 'Jaylee', 'Zola', 'Alisha', 'Ezra', 'Queen', 'Amia', 'Annalee', 'Bellamy', 'Paola', 'Tinley', 'Violeta', 'Jenesis', 'Arden', 'Giana', 'Wendy', 'Ellison', 'Florence', 'Margo', 'Naya', 'Robin', 'Sandra', 'Scout', 'Waverly', 'Janessa', 'Jayden', 'Micah', 'Novah', 'Zora', 'Ann', 'Jana', 'Taliyah', 'Vada', 'Giavanna', 'Ingrid', 'Valery', 'Azaria', 'Emmarie', 'Esperanza', 'Kailyn', 'Aiyana', 'Keilani', 'Austyn', 'Whitley', 'Elina', 'Kimora', 'Maliah']
maFirst = ['Liam', 'Noah', 'William', 'James', 'Oliver', 'Benjamin', 'Elijah', 'Lucas', 'Mason', 'Logan', 'Alexander', 'Ethan', 'Jacob', 'Michael', 'Daniel', 'Henry', 'Jackson', 'Sebastian', 'Aiden', 'Matthew', 'Samuel', 'David', 'Joseph', 'Carter', 'Owen', 'Wyatt', 'John', 'Jack', 'Luke', 'Jayden', 'Dylan', 'Grayson', 'Levi', 'Isaac', 'Gabriel', 'Julian', 'Mateo', 'Anthony', 'Jaxon', 'Lincoln', 'Joshua', 'Christopher', 'Andrew', 'Theodore', 'Caleb', 'Ryan', 'Asher', 'Nathan', 'Thomas', 'Leo', 'Isaiah', 'Charles', 'Josiah', 'Hudson', 'Christian', 'Hunter', 'Connor', 'Eli', 'Ezra', 'Aaron', 'Landon', 'Adrian', 'Jonathan', 'Nolan', 'Jeremiah', 'Easton', 'Elias', 'Colton', 'Cameron', 'Carson', 'Robert', 'Angel', 'Maverick', 'Nicholas', 'Dominic', 'Jaxson', 'Greyson', 'Adam', 'Ian', 'Austin', 'Santiago', 'Jordan', 'Cooper', 'Brayden', 'Roman', 'Evan', 'Ezekiel', 'Xavier', 'Jose', 'Jace', 'Jameson', 'Leonardo', 'Bryson', 'Axel', 'Everett', 'Parker', 'Kayden', 'Miles', 'Sawyer', 'Jason', 'Declan', 'Weston', 'Micah', 'Ayden', 'Wesley', 'Luca', 'Vincent', 'Damian', 'Zachary', 'Silas', 'Gavin', 'Chase', 'Kai', 'Emmett', 'Harrison', 'Nathaniel', 'Kingston', 'Cole', 'Tyler', 'Bennett', 'Bentley', 'Ryker', 'Tristan', 'Brandon', 'Kevin', 'Luis', 'George', 'Ashton', 'Rowan', 'Braxton', 'Ryder', 'Gael', 'Ivan', 'Diego', 'Maxwell', 'Max', 'Carlos', 'Kaiden', 'Juan', 'Maddox', 'Justin', 'Waylon', 'Calvin', 'Giovanni', 'Jonah', 'Abel', 'Jayce', 'Jesus', 'Amir', 'King', 'Beau', 'Camden', 'Alex', 'Jasper', 'Malachi', 'Brody', 'Jude', 'Blake', 'Emmanuel', 'Eric', 'Brooks', 'Elliot', 'Antonio', 'Abraham', 'Timothy', 'Finn', 'Rhett', 'Elliott', 'Edward', 'August', 'Xander', 'Alan', 'Dean', 'Lorenzo', 'Bryce', 'Karter', 'Victor', 'Milo', 'Miguel', 'Hayden', 'Graham', 'Grant', 'Zion', 'Tucker', 'Jesse', 'Zayden', 'Joel', 'Richard', 'Patrick', 'Emiliano', 'Avery', 'Nicolas', 'Brantley', 'Dawson', 'Myles', 'Matteo', 'River', 'Steven', 'Thiago', 'Zane', 'Matias', 'Judah', 'Messiah', 'Jeremy', 'Preston', 'Oscar', 'Kaleb', 'Alejandro', 'Marcus', 'Mark', 'Peter', 'Maximus', 'Barrett', 'Jax', 'Andres', 'Holden', 'Legend', 'Charlie', 'Knox', 'Kaden', 'Paxton', 'Kyrie', 'Kyle', 'Griffin', 'Josue', 'Kenneth', 'Beckett', 'Enzo', 'Adriel', 'Arthur', 'Felix', 'Bryan', 'Lukas', 'Paul', 'Brian', 'Colt', 'Caden', 'Leon', 'Archer', 'Omar', 'Israel', 'Aidan', 'Theo', 'Javier', 'Remington', 'Jaden', 'Bradley', 'Emilio', 'Colin', 'Riley', 'Cayden', 'Phoenix', 'Clayton', 'Simon', 'Ace', 'Nash', 'Derek', 'Rafael', 'Zander', 'Brady', 'Jorge', 'Jake', 'Louis', 'Damien', 'Karson', 'Walker', 'Maximiliano', 'Amari', 'Sean', 'Chance', 'Walter', 'Martin', 'Finley', 'Andre', 'Tobias', 'Cash', 'Corbin', 'Arlo', 'Iker', 'Erick', 'Emerson', 'Gunner', 'Cody', 'Stephen', 'Francisco', 'Killian', 'Dallas', 'Reid', 'Manuel', 'Lane', 'Atlas', 'Rylan', 'Jensen', 'Ronan', 'Beckham', 'Daxton', 'Anderson', 'Kameron', 'Raymond', 'Orion', 'Cristian', 'Tanner', 'Kyler', 'Jett', 'Cohen', 'Ricardo', 'Spencer', 'Gideon', 'Ali', 'Fernando', 'Jaiden', 'Titus', 'Travis', 'Bodhi', 'Eduardo', 'Dante', 'Ellis', 'Prince', 'Kane', 'Luka', 'Kash', 'Hendrix', 'Desmond', 'Donovan', 'Mario', 'Atticus', 'Cruz', 'Garrett', 'Hector', 'Angelo', 'Jeffrey', 'Edwin', 'Cesar', 'Zayn', 'Devin', 'Conor', 'Warren', 'Odin', 'Jayceon', 'Romeo', 'Julius', 'Jaylen', 'Hayes', 'Kayson', 'Muhammad', 'Jaxton', 'Joaquin', 'Caiden', 'Dakota', 'Major', 'Keegan', 'Sergio', 'Marshall', 'Johnny', 'Kade', 'Edgar', 'Leonel', 'Ismael', 'Marco', 'Tyson', 'Wade', 'Collin', 'Troy', 'Nasir', 'Conner', 'Adonis', 'Jared', 'Rory', 'Andy', 'Jase', 'Lennox', 'Shane', 'Malik', 'Ari', 'Reed', 'Seth', 'Clark', 'Erik', 'Lawson', 'Trevor', 'Gage', 'Nico', 'Malakai', 'Quinn', 'Cade', 'Johnathan', 'Sullivan', 'Solomon', 'Cyrus', 'Fabian', 'Pedro', 'Frank', 'Shawn', 'Malcolm', 'Khalil', 'Nehemiah', 'Dalton', 'Mathias', 'Jay', 'Ibrahim', 'Peyton', 'Winston', 'Kason', 'Zayne', 'Noel', 'Princeton', 'Matthias', 'Gregory', 'Sterling', 'Dominick', 'Elian', 'Grady', 'Russell', 'Finnegan', 'Ruben', 'Gianni', 'Porter', 'Kendrick', 'Leland', 'Pablo', 'Allen', 'Hugo', 'Raiden', 'Kolton', 'Remy', 'Ezequiel', 'Damon', 'Emanuel', 'Zaiden', 'Otto', 'Bowen', 'Marcos', 'Abram', 'Kasen', 'Franklin', 'Royce', 'Jonas', 'Sage', 'Philip', 'Esteban', 'Drake', 'Kashton', 'Roberto', 'Harvey', 'Alexis', 'Kian', 'Jamison', 'Maximilian', 'Adan', 'Milan', 'Phillip', 'Albert', 'Dax', 'Mohamed', 'Ronin', 'Kamden', 'Hank', 'Memphis', 'Oakley', 'Augustus', 'Drew', 'Moises', 'Armani', 'Rhys', 'Benson', 'Jayson', 'Kyson', 'Braylen', 'Corey', 'Gunnar', 'Omari', 'Alonzo', 'Landen', 'Armando', 'Derrick', 'Dexter', 'Enrique', 'Bruce', 'Nikolai', 'Francis', 'Rocco', 'Kairo', 'Royal', 'Zachariah', 'Arjun', 'Deacon', 'Skyler', 'Eden', 'Alijah', 'Rowen', 'Pierce', 'Uriel', 'Ronald', 'Luciano', 'Tate', 'Frederick', 'Kieran', 'Lawrence', 'Moses', 'Rodrigo', 'Brycen', 'Leonidas', 'Nixon', 'Keith', 'Chandler', 'Case', 'Davis', 'Asa', 'Darius', 'Isaias', 'Aden', 'Jaime', 'Landyn', 'Raul', 'Niko', 'Trenton', 'Apollo', 'Cairo', 'Izaiah', 'Scott', 'Dorian', 'Julio', 'Wilder', 'Santino', 'Dustin', 'Donald', 'Raphael', 'Saul', 'Taylor', 'Ayaan', 'Duke', 'Ryland', 'Tatum', 'Ahmed', 'Moshe', 'Edison', 'Emmitt', 'Cannon', 'Alec', 'Danny', 'Keaton', 'Roy', 'Conrad', 'Roland', 'Quentin', 'Lewis', 'Samson', 'Brock', 'Kylan', 'Cason', 'Ahmad', 'Jalen', 'Nikolas', 'Braylon', 'Kamari', 'Dennis', 'Callum', 'Justice', 'Soren', 'Rayan', 'Aarav', 'Gerardo', 'Ares', 'Brendan', 'Jamari', 'Kaison', 'Yusuf', 'Issac', 'Jasiah', 'Callen', 'Forrest', 'Makai', 'Crew', 'Kobe', 'Bo', 'Julien', 'Mathew', 'Braden', 'Johan', 'Marvin', 'Zaid', 'Stetson', 'Casey', 'Ty', 'Ariel', 'Tony', 'Zain', 'Callan', 'Cullen', 'Sincere', 'Uriah', 'Dillon', 'Kannon', 'Colby', 'Axton', 'Cassius', 'Quinton', 'Mekhi', 'Reece', 'Alessandro', 'Jerry', 'Mauricio', 'Sam', 'Trey', 'Mohammad', 'Alberto', 'Gustavo', 'Arturo', 'Fletcher', 'Marcelo', 'Abdiel', 'Hamza', 'Alfredo', 'Chris', 'Finnley', 'Curtis', 'Kellan', 'Quincy', 'Kase', 'Harry', 'Kyree', 'Wilson', 'Cayson', 'Hezekiah', 'Kohen', 'Neil', 'Mohammed', 'Raylan', 'Kaysen', 'Lucca', 'Sylas', 'Mack', 'Leonard', 'Lionel', 'Ford', 'Roger', 'Rex', 'Alden', 'Boston', 'Colson', 'Briggs', 'Zeke', 'Dariel', 'Kingsley', 'Valentino', 'Jamir', 'Salvador', 'Vihaan', 'Mitchell', 'Lance', 'Lucian', 'Darren', 'Jimmy', 'Alvin', 'Amos', 'Tripp', 'Zaire', 'Layton', 'Reese', 'Casen', 'Colten', 'Brennan', 'Korbin', 'Sonny', 'Bruno', 'Orlando', 'Devon', 'Huxley', 'Boone', 'Maurice', 'Nelson', 'Douglas', 'Randy', 'Gary', 'Lennon', 'Titan', 'Denver', 'Jaziel', 'Noe', 'Jefferson', 'Ricky', 'Lochlan', 'Rayden', 'Bryant', 'Langston', 'Lachlan', 'Clay', 'Abdullah', 'Lee', 'Baylor', 'Leandro', 'Ben', 'Kareem', 'Layne', 'Joe', 'Crosby', 'Deandre', 'Demetrius', 'Kellen', 'Carl', 'Jakob', 'Ridge', 'Bronson', 'Jedidiah', 'Rohan', 'Larry', 'Stanley', 'Tomas', 'Shiloh', 'Thaddeus', 'Watson', 'Baker', 'Vicente', 'Koda', 'Jagger', 'Nathanael', 'Carmelo', 'Shepherd', 'Graysen', 'Melvin', 'Ernesto', 'Jamie', 'Yosef', 'Clyde', 'Eddie', 'Tristen', 'Grey', 'Ray', 'Tommy', 'Samir', 'Ramon', 'Santana', 'Kristian', 'Marcel', 'Wells', 'Zyaire', 'Brecken', 'Byron', 'Otis', 'Reyansh', 'Axl', 'Joey', 'Trace', 'Morgan', 'Musa', 'Harlan', 'Enoch', 'Henrik', 'Kristopher', 'Talon', 'Rey', 'Guillermo', 'Houston', 'Jon', 'Vincenzo', 'Dane', 'Terry', 'Azariah', 'Castiel', 'Kye', 'Augustine', 'Zechariah', 'Joziah', 'Kamryn', 'Hassan', 'Jamal', 'Chaim', 'Bodie', 'Emery', 'Branson', 'Jaxtyn', 'Kole', 'Wayne', 'Aryan', 'Alonso', 'Brixton', 'Madden', 'Allan', 'Flynn', 'Jaxen', 'Harley', 'Magnus', 'Sutton', 'Dash', 'Anders', 'Westley', 'Brett', 'Emory', 'Felipe', 'Yousef', 'Jadiel', 'Mordechai', 'Dominik', 'Junior', 'Eliseo', 'Fisher', 'Harold', 'Jaxxon', 'Kamdyn', 'Maximo', 'Caspian', 'Kelvin', 'Damari', 'Fox', 'Trent', 'Hugh', 'Briar', 'Franco', 'Keanu', 'Terrance', 'Yahir', 'Ameer', 'Kaiser', 'Thatcher', 'Ishaan', 'Koa', 'Merrick', 'Coen', 'Rodney', 'Brayan', 'London', 'Rudy', 'Gordon', 'Bobby', 'Aron', 'Marc', 'Van', 'Anakin', 'Canaan', 'Dario', 'Reginald', 'Westin', 'Darian', 'Ledger', 'Leighton', 'Maxton', 'Tadeo', 'Valentin', 'Aldo', 'Khalid', 'Nickolas', 'Toby', 'Dayton', 'Jacoby', 'Billy', 'Gatlin', 'Elisha', 'Jabari', 'Jermaine', 'Alvaro', 'Marlon', 'Mayson', 'Blaze', 'Jeffery', 'Kace', 'Braydon', 'Achilles', 'Brysen', 'Saint', 'Xzavier', 'Aydin', 'Eugene', 'Adrien', 'Cain', 'Kylo', 'Nova', 'Onyx', 'Arian', 'Bjorn', 'Jerome', 'Miller', 'Alfred', 'Kenzo', 'Kyng', 'Leroy', 'Maison', 'Jordy', 'Stefan', 'Wallace', 'Benicio', 'Kendall', 'Zayd', 'Blaine', 'Tristian', 'Anson', 'Gannon', 'Jeremias', 'Marley', 'Ronnie', 'Dangelo', 'Kody', 'Will', 'Bentlee', 'Gerald', 'Salvatore', 'Turner', 'Chad', 'Misael', 'Mustafa', 'Konnor', 'Maxim', 'Rogelio', 'Zakai', 'Cory', 'Judson', 'Brentley', 'Darwin', 'Louie', 'Ulises', 'Dakari', 'Rocky', 'Wesson', 'Alfonso', 'Payton', 'Dwayne', 'Juelz', 'Duncan', 'Keagan', 'Deshawn', 'Bode', 'Bridger', 'Skylar', 'Brodie', 'Landry', 'Avi', 'Keenan', 'Reuben', 'Jaxx', 'Rene', 'Yehuda', 'Imran', 'Yael', 'Alexzander', 'Willie', 'Cristiano', 'Heath', 'Lyric', 'Davion', 'Elon', 'Karsyn', 'Krew', 'Jairo', 'Maddux', 'Ephraim', 'Ignacio', 'Vivaan', 'Aries', 'Vance', 'Boden', 'Lyle', 'Ralph', 'Reign', 'Camilo', 'Draven', 'Terrence', 'Idris', 'Ira', 'Javion', 'Jericho', 'Khari', 'Marcellus', 'Creed', 'Shepard', 'Terrell', 'Ahmir', 'Camdyn', 'Cedric', 'Howard', 'Jad', 'Zahir', 'Harper', 'Justus', 'Forest', 'Gibson', 'Zev', 'Alaric', 'Decker', 'Ernest', 'Jesiah', 'Torin', 'Benedict', 'Bowie', 'Deangelo', 'Genesis', 'Harlem', 'Kalel', 'Kylen', 'Bishop', 'Immanuel', 'Lian', 'Zavier', 'Archie', 'Davian', 'Gus', 'Kabir', 'Korbyn', 'Randall', 'Benton', 'Coleman', 'Markus']
last = ['Smith', 'Johnson', 'Williams', 'Brown', 'Jones', 'Garcia', 'Miller', 'Davis', 'Rodriguez', 'Martinez', 'Hernandez', 'Lopez', 'Gonzales', 'Wilson', 'Anderson', 'Thomas', 'Taylor', 'Moore', 'Jackson', 'Martin', 'Lee', 'Perez', 'Thompson', 'White', 'Harris', 'Sanchez', 'Clark', 'Ramirez', 'Lewis', 'Robinson', 'Walker', 'Young', 'Allen', 'King', 'Wright', 'Scott', 'Torres', 'Nguyen', 'Hill', 'Flores', 'Green', 'Adams', 'Nelson', 'Baker', 'Hall', 'Rivera', 'Campbell', 'Mitchell', 'Carter', 'Roberts', 'Gomez', 'Phillips', 'Evans', 'Turner', 'Diaz', 'Parker', 'Cruz', 'Edwards', 'Collins', 'Reyes', 'Stewart', 'Morris', 'Morales', 'Murphy', 'Cook', 'Rogers', 'Gutierrez', 'Ortiz', 'Morgan', 'Cooper', 'Peterson', 'Bailey', 'Reed', 'Kelly', 'Howard', 'Ramos', 'Kim', 'Cox', 'Ward', 'Richardson', 'Watson', 'Brooks', 'Chavez', 'Wood', 'James', 'Bennet', 'Gray', 'Mendoza', 'Ruiz', 'Hughes', 'Price', 'Alvarez', 'Castillo', 'Sanders', 'Patel', 'Myers', 'Long', 'Ross', 'Foster', 'Jimenez']
| 3,457.666667 | 10,010 | 0.594428 |
b028018661b0929da5b6a926d65bb750a50efe57 | 444 | py | Python | oldtoronto/test/toronto_archives_test.py | patcon/oldto | 44c099550a4e3cfafa85afbaebd3cd6c33325891 | [
"Apache-2.0"
] | 22 | 2018-04-25T22:03:53.000Z | 2021-07-13T18:43:23.000Z | oldtoronto/test/toronto_archives_test.py | patcon/oldto | 44c099550a4e3cfafa85afbaebd3cd6c33325891 | [
"Apache-2.0"
] | 17 | 2018-04-30T14:04:08.000Z | 2022-02-13T19:52:44.000Z | oldtoronto/test/toronto_archives_test.py | patcon/oldto | 44c099550a4e3cfafa85afbaebd3cd6c33325891 | [
"Apache-2.0"
] | 7 | 2018-05-08T23:32:44.000Z | 2022-01-27T17:49:30.000Z | from nose.tools import eq_
from oldtoronto.toronto_archives import get_citation_hierarchy # noqa
| 26.117647 | 73 | 0.646396 |
b02d1a840f2e9ca574098b991b8f37e1b954c866 | 979 | py | Python | excel2.py | darkless456/Python | 1ba37d028e4a818ccfffc18682c1bac15554e3ac | [
"MIT"
] | null | null | null | excel2.py | darkless456/Python | 1ba37d028e4a818ccfffc18682c1bac15554e3ac | [
"MIT"
] | null | null | null | excel2.py | darkless456/Python | 1ba37d028e4a818ccfffc18682c1bac15554e3ac | [
"MIT"
] | null | null | null | # excel2.py
import xlrd
if __name__ == '__main__':
print_xls('D:\\python_path\\sample_ex.xls')
'''
__name__ __name__ import
__name__ ,
__name__ "__main__"
cmd .py,__name__'__main__';
import .py,__name__'__main__';
if __name__ == '__main__'.py
'''
| 27.194444 | 96 | 0.670072 |
b02f9eadae5afd900218c21f9e3251e4c4f3cf07 | 1,162 | py | Python | reth_buffer/reth_buffer/__init__.py | sosp2021/Reth | 10c032f44a25049355ebdd97a2cb3299e8c3fb82 | [
"MIT"
] | null | null | null | reth_buffer/reth_buffer/__init__.py | sosp2021/Reth | 10c032f44a25049355ebdd97a2cb3299e8c3fb82 | [
"MIT"
] | 1 | 2021-08-10T02:58:58.000Z | 2021-08-10T02:58:58.000Z | reth_buffer/reth_buffer/__init__.py | sosp2021/reth | 10c032f44a25049355ebdd97a2cb3299e8c3fb82 | [
"MIT"
] | null | null | null | import multiprocessing as mp
import portpicker
from .client import Client, NumpyLoader, TorchCudaLoader
from .sampler import PERSampler
from .server.main_loop import main_loop
from .utils import get_local_ip
| 23.24 | 81 | 0.645439 |
b02fad481b4d3cb3263f98acf09c40e1f2669bfa | 7,171 | py | Python | agent.py | FlowerForAlgernon/rainbow | 78492ba572e2f8b4b2228d2ca625af94a09ee696 | [
"Apache-2.0"
] | 1 | 2022-03-23T02:02:10.000Z | 2022-03-23T02:02:10.000Z | agent.py | FlowerForAlgernon/rainbow | 78492ba572e2f8b4b2228d2ca625af94a09ee696 | [
"Apache-2.0"
] | null | null | null | agent.py | FlowerForAlgernon/rainbow | 78492ba572e2f8b4b2228d2ca625af94a09ee696 | [
"Apache-2.0"
] | null | null | null | import random
import numpy as np
import torch
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as T
from memory import Transition, ReplayMemory, PrioritizedReplayMemory, NStepMemory
from DQN import DQN, DuelingDQN, NoisyDQN, DistributionalDQN
| 48.452703 | 142 | 0.647748 |
b0347f10c5746915500b0d6e172c2c32ab5316d0 | 121 | py | Python | Deutsch-Jozsa-Algorithm/main.py | Gregory-Eales/QA-Reimplementations | bef0b3e67397a73c468e539c426c6629d398433b | [
"MIT"
] | 1 | 2019-05-03T21:48:29.000Z | 2019-05-03T21:48:29.000Z | Deutsch-Jozsa-Algorithm/main.py | Gregory-Eales/QA-Reimplementations | bef0b3e67397a73c468e539c426c6629d398433b | [
"MIT"
] | null | null | null | Deutsch-Jozsa-Algorithm/main.py | Gregory-Eales/QA-Reimplementations | bef0b3e67397a73c468e539c426c6629d398433b | [
"MIT"
] | null | null | null | import qsharp
from DeutschJozsa import SayHello, RunDeutschJozsa
SayHello.simulate()
RunDeutschJozsa.simulate(N=10)
| 13.444444 | 50 | 0.818182 |
b0370f00352f25c209bf62c39330309ded5b5b35 | 413 | py | Python | xslt/apply.py | carlosduarteroa/smap | 5760631dfaf3e85da26ce68bf542bf254bb92c80 | [
"BSD-2-Clause"
] | 21 | 2015-02-06T21:55:59.000Z | 2021-04-29T11:23:18.000Z | xslt/apply.py | carlosduarteroa/smap | 5760631dfaf3e85da26ce68bf542bf254bb92c80 | [
"BSD-2-Clause"
] | 9 | 2015-02-03T10:41:35.000Z | 2020-02-18T12:46:10.000Z | xslt/apply.py | carlosduarteroa/smap | 5760631dfaf3e85da26ce68bf542bf254bb92c80 | [
"BSD-2-Clause"
] | 20 | 2015-02-06T00:09:19.000Z | 2020-01-10T13:27:06.000Z | """Apply a stylesheet to an XML file"""
import sys
from lxml import etree
if len(sys.argv) != 3:
print >>sys.stderr, "Usage: %s <stylesheet> <xml doc> ..." % sys.argv[0]
sys.exit(1)
transform = etree.XSLT(etree.XML(open(sys.argv[1], "r").read()))
for xmlfile in sys.argv[2:]:
with open(xmlfile, "r") as fp:
doc = etree.parse(fp)
print(etree.tostring(transform(doc), pretty_print=True))
| 27.533333 | 76 | 0.639225 |
b037c4f526f6d6afd8598b5e5a8cb64d9cc7462a | 7,122 | py | Python | docs/conf.py | vlukes/io3d | 34d048b7f737a5e56610879f6ab103128e8f0750 | [
"MIT"
] | 8 | 2016-09-26T01:35:15.000Z | 2022-02-23T04:05:23.000Z | docs/conf.py | vlukes/io3d | 34d048b7f737a5e56610879f6ab103128e8f0750 | [
"MIT"
] | 4 | 2016-05-18T11:04:56.000Z | 2018-10-24T11:03:03.000Z | docs/conf.py | vlukes/io3d | 34d048b7f737a5e56610879f6ab103128e8f0750 | [
"MIT"
] | 6 | 2017-03-24T20:43:21.000Z | 2021-08-23T06:05:34.000Z | # -*- coding: utf-8 -*-
#
# io3d documentation build configuration file, created by
# sphinx-quickstart on Mon Nov 27 12:01:57 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("../"))
# mock
import mock
MOCK_MODULES = [
"numpy",
"scipy",
"matplotlib",
"matplotlib.pyplot",
"matplotlib.widgets",
"scipy.io",
"yaml",
"pydicom",
# 'scipy.interpolate', 'scipy.ndimage', 'pycut', 'io3d', 'sed3', 'pysegbase',
# 'pysegbase.pycut', 'sklearn', 'skimage', 'dicom', 'vtk', 'vtk.util',
# 'larcc', 'larcc.VIEW', 'larcc.MKPOL', 'larcc.AA', 'larcc.INTERVALS',
# 'larcc.MAP',
"PyQt5",
"PyQt5.QtCore",
"PyQt5.QtGui", #'web', 'lar2psm',
# 'scipy.ndimage.measurements', 'lar', 'extern.lar', 'splines',
# 'scipy.sparse', 'skimage.filter', 'mapper', 'skelet3d', 'numpy.core',
# 'skimage.filters', 'skimage.restoration','skimage.io',
# 'gzip', 'cPickle',
# 'lbpLibrary', 'skimage.exposure', 'PyQt4.QVTKRenderWindowInteractor',
# 'matplotlib.backends', 'matplotlib.backends.backend_qt4agg', 'numpy.linalg',
# 'PyQt4.Qt', 'matplotlib.figure', 'skimage.morphology', 'gtk',
# 'pysegbase.seed_editor_qt', 'vtk.qt4', 'vtk.qt4.QVTKRenderWindowInteractor',
# 'seg2fem', 'skimage.segmentation', 'skimage.transform', 'matplotlib.patches', 'skimage.feature',
# 'scipy.ndimage.morphology', 'mpl_toolkits', 'mpl_toolkits.mplot3d',
# 'scipy.ndimage.measurement', 'scipy.ndimage.interpolation',
# 'matplotlib.backends.backend_gtkagg', 'cv2', 'skimage.measure', 'dicom2fem',
# 'morphsnakes', 'scipy.ndimage.filters', 'scipy.signal', 'pandas',
# 'scipy.stats', 'io3d.misc', 'lisa.extern.lar', 'scipy.cluster',
# 'scipy.cluster.vq', 'scipy.cluster.vq',
# 'ipdb', 'multipolyfit', 'PIL', 'yaml',
"SimpleITK",
# 'six', 'nearpy', 'SimpleITK', 'lar', 'pandas'
"ruamel.yaml.YAML",
]
#
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# import sklearn
# sklearn.__version__ = '0.0'
# import scipy
# scipy.__version__ = '0.0'
# import pysegbase.pycut
# pysegbase.pycut.methods = ['graphcut']
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"sphinx.ext.coverage",
"sphinx.ext.imgmath",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = [".rst", ".md"]
# source_suffix = '.rst'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"io3d"
copyright = u"2017, Miroslav Jirik"
author = u"Miroslav Jirik"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u"1.2.3"
# The full version, including alpha/beta/rc tags.
release = u"1.2.3"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
"**": [
"relations.html", # needs 'show_related': True theme option to display
"searchbox.html",
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "io3ddoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "io3d.tex", u"io3d Documentation", u"Miroslav Jirik", "manual")
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "io3d", u"io3d Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"io3d",
u"io3d Documentation",
author,
"io3d",
"One line description of project.",
"Miscellaneous",
)
]
| 32.226244 | 102 | 0.664139 |
b038cebedd15245004a4a13444cb7f55e363f2e8 | 33,401 | py | Python | EVB.py | yunzhe-zhou/CS285-Project | e6aca061e27d2794949d4419339120107a6cb8f7 | [
"MIT"
] | null | null | null | EVB.py | yunzhe-zhou/CS285-Project | e6aca061e27d2794949d4419339120107a6cb8f7 | [
"MIT"
] | null | null | null | EVB.py | yunzhe-zhou/CS285-Project | e6aca061e27d2794949d4419339120107a6cb8f7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""RED_linear_run1.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1-WN1MY9YYluGcnigLgrndqsxcOYldbB6
"""
#@title mount your Google Drive
#@markdown Your work will be stored in a folder called `cs285_f2021` by default to prevent Colab instance timeouts from deleting your edits.
import os
from google.colab import drive
drive.mount('/content/gdrive')
# Commented out IPython magic to ensure Python compatibility.
#@title set up mount symlink
DRIVE_PATH = '/content/gdrive/My\ Drive/cs285_project'
DRIVE_PYTHON_PATH = DRIVE_PATH.replace('\\', '')
if not os.path.exists(DRIVE_PYTHON_PATH):
# %mkdir $DRIVE_PATH
## the space in `My Drive` causes some issues,
## make a symlink to avoid this
SYM_PATH = '/content/cs285_project'
if not os.path.exists(SYM_PATH):
!ln -s $DRIVE_PATH $SYM_PATH
!apt update
!apt install -y --no-install-recommends \
build-essential \
curl \
git \
gnupg2 \
make \
cmake \
ffmpeg \
swig \
libz-dev \
unzip \
zlib1g-dev \
libglfw3 \
libglfw3-dev \
libxrandr2 \
libxinerama-dev \
libxi6 \
libxcursor-dev \
libgl1-mesa-dev \
libgl1-mesa-glx \
libglew-dev \
libosmesa6-dev \
lsb-release \
ack-grep \
patchelf \
wget \
xpra \
xserver-xorg-dev \
xvfb \
python-opengl \
ffmpeg
# Commented out IPython magic to ensure Python compatibility.
#@title download mujoco
MJC_PATH = '{}/mujoco'.format(SYM_PATH)
# %mkdir $MJC_PATH
# %cd $MJC_PATH
!wget -q https://www.roboti.us/download/mujoco200_linux.zip
!unzip -q mujoco200_linux.zip
# %mv mujoco200_linux mujoco200
# %rm mujoco200_linux.zip
#@title update mujoco paths
import os
os.environ['LD_LIBRARY_PATH'] += ':{}/mujoco200/bin'.format(MJC_PATH)
os.environ['MUJOCO_PY_MUJOCO_PATH'] = '{}/mujoco200'.format(MJC_PATH)
os.environ['MUJOCO_PY_MJKEY_PATH'] = '{}/mjkey.txt'.format(MJC_PATH)
## installation on colab does not find *.so files
## in LD_LIBRARY_PATH, copy over manually instead
!cp $MJC_PATH/mujoco200/bin/*.so /usr/lib/x86_64-linux-gnu/
# Commented out IPython magic to ensure Python compatibility.
# %cd $MJC_PATH
!git clone https://github.com/openai/mujoco-py.git
# %cd mujoco-py
# %pip install -e .
## cythonize at the first import
import mujoco_py
# Commented out IPython magic to ensure Python compatibility.
# %cd $SYM_PATH
# %cd RED
# %tensorflow_version 1.x
! pip install mpi4py
'''
Disclaimer: this code is highly based on trpo_mpi at @openai/baselines and @openai/imitation
'''
import argparse
import os.path as osp
import logging
from mpi4py import MPI
from tqdm import tqdm
import numpy as np
import gym
from baselines.rnd_gail import mlp_policy
from baselines.common import set_global_seeds, tf_util as U
from baselines.common.misc_util import boolean_flag
from baselines import bench
from baselines import logger
from baselines.rnd_gail.merged_critic import make_critic
import pickle
Log_dir = osp.expanduser("~/workspace/log/mujoco")
Checkpoint_dir = osp.expanduser("~/workspace/checkpoint/mujoco")
parser = argparse.ArgumentParser("Tensorflow Implementation of GAIL")
parser.add_argument('--env_id', help='environment ID', default="Hopper-v2")
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--checkpoint_dir', help='the directory to save model', default=Checkpoint_dir)
parser.add_argument('--log_dir', help='the directory to save log file', default=Log_dir)
parser.add_argument('--load_model_path', help='if provided, load the model', type=str, default=None)
# Task
parser.add_argument('--task', type=str, choices=['train', 'evaluate', 'sample'], default='train')
# for evaluatation
boolean_flag(parser, 'stochastic_policy', default=False, help='use stochastic/deterministic policy to evaluate')
# Optimization Configuration
parser.add_argument('--g_step', help='number of steps to train policy in each epoch', type=int, default=3)
parser.add_argument('--d_step', help='number of steps to train discriminator in each epoch', type=int, default=1)
# Network Configuration (Using MLP Policy)
parser.add_argument('--policy_hidden_size', type=int, default=100)
parser.add_argument('--adversary_hidden_size', type=int, default=100)
# Algorithms Configuration
parser.add_argument('--max_kl', type=float, default=0.01)
parser.add_argument('--policy_entcoeff', help='entropy coefficiency of policy', type=float, default=0)
parser.add_argument('--adversary_entcoeff', help='entropy coefficiency of discriminator', type=float, default=1e-3)
# Traing Configuration
parser.add_argument('--num_timesteps', help='number of timesteps per episode', type=int, default=5e6)
# Behavior Cloning
boolean_flag(parser, 'pretrained', default=False, help='Use BC to pretrain')
boolean_flag(parser, 'fixed_var', default=False, help='Fixed policy variance')
parser.add_argument('--BC_max_iter', help='Max iteration for training BC', type=int, default=20)
parser.add_argument('--gamma', help='Discount factor', type=float, default=0.97)
boolean_flag(parser, 'popart', default=True, help='Use popart on V function')
parser.add_argument('--reward', help='Reward Type', type=int, default=0)
args = parser.parse_args(args=[])
set_global_seeds(args.seed)
env = gym.make(args.env_id)
env.seed(args.seed)
# env = bench.Monitor(env, logger.get_dir() and
# osp.join(logger.get_dir(), "monitor.json"))
gym.logger.setLevel(logging.WARN)
if args.log_dir != Log_dir:
log_dir = osp.join(Log_dir, args.log_dir)
save_dir = osp.join(Checkpoint_dir, args.log_dir)
else:
log_dir = Log_dir
save_dir = Checkpoint_dir
args, rnd_iter, dyn_norm = modify_args(args)
exp_data = get_exp_data("/content/gdrive/My Drive/cs285_project/RED/data/Hopper-v2.pkl")
task_name = get_task_name(args)
logger.configure(dir=log_dir, log_suffix=task_name, format_strs=["log", "stdout"])
import numpy as np
import tensorflow as tf
from baselines.common import tf_util as U
from baselines.common.dataset import iterbatches
from baselines import logger
hid_size=128
rnd_hid_size=128
reward_type=0
scale=250000
reward_type=args.reward
ac_size = env.action_space.sample().shape[0]
ob_size = env.observation_space.shape[0]
# linear model to estimate variance
X1 = exp_data[0]
X2 = exp_data[1]
X = np.concatenate([X1,X2],axis=1)
np.random.seed(1)
# randomly create a oracle linear model to estimate
param = np.random.normal(0,1,14).reshape([-1,1])
# calculate response under this oracle model
Y = np.matmul(X,param).flatten() + np.random.normal(0,1,X.shape[0])
# estimate the linear model
beta_hat = np.matmul(np.linalg.inv(np.matmul(X.T,X)),np.matmul(X.T,Y))
# estimate varaince
sigma_hat = np.sqrt(np.sum((Y-np.matmul(X,beta_hat))**2)/(X.shape[0]-14))
# calculate a matrix for later use
W = np.linalg.inv(np.matmul(X.T,X))
critic = RND_Critic_Revise(W, sigma_hat, ob_size, ac_size, hid_size=hid_size, rnd_hid_size=rnd_hid_size, scale=scale)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy.ndimage.filters import gaussian_filter
X1 = exp_data[0]
X2 = exp_data[1]
generate_density_plot(X2[:,0],X2[:,1])
from matplotlib import pyplot as plt, cm, colors
import numpy as np
plt.rcParams["figure.figsize"] = [7.00, 3.50]
plt.rcParams["figure.autolayout"] = True
X1 = exp_data[0]
X2 = exp_data[1]
X = np.concatenate([X1,X2],axis=1)
ob = np.mean(X[:,0:11],axis=0)
ac = np.mean(X[:,11:],axis=0)
N=100
side = np.linspace(-4, 6, N)
x, y = np.meshgrid(side, side)
z= np.zeros([N,N])
for i in range(N):
for j in range(N):
ac[0] = x[0,i]
ac[1] = y[j,0]
z[i,j] = critic.get_reward(ob,ac).flatten()[0]
plt.pcolormesh(x, y, z, shading='auto')
plt.show()
from matplotlib import pyplot as plt, cm, colors
import numpy as np
plt.rcParams["figure.figsize"] = [7.00, 3.50]
plt.rcParams["figure.autolayout"] = True
X1 = exp_data[0]
X2 = exp_data[1]
X = np.concatenate([X1,X2],axis=1)
ob = np.mean(X[:,0:11],axis=0)
ac = np.mean(X[:,11:],axis=0)
N=100
side = np.linspace(-1, 3, N)
x, y = np.meshgrid(side, side)
z= np.zeros([N,N])
for i in range(N):
for j in range(N):
ob[0] = x[0,i]
ob[1] = y[j,0]
z[i,j] = critic.get_reward(ob,ac).flatten()[0]
plt.pcolormesh(x, y, z, shading='auto')
plt.show()
from matplotlib import pyplot as plt, cm, colors
import numpy as np
plt.rcParams["figure.figsize"] = [7.00, 3.50]
plt.rcParams["figure.autolayout"] = True
X1 = exp_data[0]
X2 = exp_data[1]
X = np.concatenate([X1,X2],axis=1)
ob = np.mean(X[:,0:11],axis=0)
ac = np.mean(X[:,11:],axis=0)
N=100
side = np.linspace(-1, 3, N)
x, y = np.meshgrid(side, side)
z= np.zeros([N,N])
for i in range(N):
for j in range(N):
ob[2] = x[0,i]
ob[4] = y[j,0]
z[i,j] = critic.get_reward(ob,ac).flatten()[0]
plt.pcolormesh(x, y, z, shading='auto')
plt.show()
np.max(X,0)
from matplotlib import pyplot as plt, cm, colors
import numpy as np
plt.rcParams["figure.figsize"] = [7.00, 3.50]
plt.rcParams["figure.autolayout"] = True
X1 = exp_data[0]
X2 = exp_data[1]
X = np.concatenate([X1,X2],axis=1)
ob = np.mean(X[:,0:11],axis=0)
ac = np.mean(X[:,11:],axis=0)
fig,ax=plt.subplots(nrows=5,ncols=3,figsize=(10,12))
axes = ax.flatten()
N=100
count = 0
for p in range(6):
for q in range(6):
if p<q:
max_value = np.max([np.max(X,0)[p],np.max(X,0)[q]])+1
min_value = np.max([np.min(X,0)[p],np.min(X,0)[q]])-1
side = np.linspace(min_value, max_value, N)
x, y = np.meshgrid(side, side)
z= np.zeros([N,N])
for i in range(N):
for j in range(N):
ob[p] = x[0,i]
ob[q] = y[j,0]
z[i,j] = critic.get_reward(ob,ac).flatten()[0]
## figure 1.1
axes[count].pcolormesh(x, y, z, shading='auto')
axes[count].set_title("dim "+str(p+1) + " vs " + "dim " + str(q+1), fontsize=14)
count = count +1
plt.show()
import numpy as np
import matplotlib.pyplot as plt
fig,ax=plt.subplots(nrows=2,ncols=2,figsize=(10,8))
axes = ax.flatten()
## figure 1.1
axes[0].pcolormesh(x, y, z, shading='auto')
plt.show()
plt.rcParams["figure.figsize"] = [4.50, 3.50]
ob = np.mean(X[:,0:11],axis=0)
ac = np.mean(X[:,11:],axis=0)
critic.get_reward(ob,ac)
ob = np.mean(X[:,0:11],axis=0)
ac = np.mean(X[:,11:],axis=0)
reward_ls = []
node = 0
for i in range(100):
ac[node] = i * 0.1 - 5
reward_ls.append(critic.get_reward(ob,ac).flatten()[0])
import numpy as np
import matplotlib.pyplot as plt
x = np.array(range(len(reward_ls)))/10 - 5
plt.plot(x, reward_ls,color="limegreen",linestyle='-', markersize=7)
plt.xlabel('Value of the First Dimension of Action', fontsize=12)
plt.ylabel('Reward', fontsize=12)
plt.tight_layout(pad=4)
# plt.title("Linear Model Variance Estimation Based Reward Function \n (Change the First Dimension of Action)")
plt.show()
ob = np.mean(X[:,0:11],axis=0)
ac = np.mean(X[:,11:],axis=0)
reward_ls = []
node = 1
for i in range(100):
ac[node] = i * 0.1 - 5
reward_ls.append(critic.get_reward(ob,ac).flatten()[0])
import numpy as np
import matplotlib.pyplot as plt
x = np.array(range(len(reward_ls)))/10 - 5
plt.plot(x, reward_ls,color="limegreen",linestyle='-', markersize=7)
plt.xlabel('Value of the Second Dimension of Action', fontsize=12)
plt.ylabel('Reward', fontsize=12)
plt.tight_layout(pad=4)
# plt.title("Linear Model Variance Estimation Based Reward Function \n (Change the Second Dimension of Action)")
plt.show()
ob = np.mean(X[:,0:11],axis=0)
ac = np.mean(X[:,11:],axis=0)
reward_ls = []
node = 2
for i in range(100):
ac[node] = i * 0.1 - 5
reward_ls.append(critic.get_reward(ob,ac).flatten()[0])
import numpy as np
import matplotlib.pyplot as plt
x = np.array(range(len(reward_ls)))/10 - 5
plt.plot(x, reward_ls,color="limegreen",linestyle='-', markersize=7)
plt.xlabel('Value of the Third Dimension of Action', fontsize=12)
plt.ylabel('Reward', fontsize=12)
plt.tight_layout(pad=4)
plt.title("Linear Model Variance Estimation Based Reward Function \n (Change the Third Dimension of Action)")
plt.show()
# X1 = exp_data[0]
# X2 = exp_data[1]
# X = np.concatenate([X1,X2],axis=1)
# np.random.seed(1)
# param = np.random.normal(0,1,14).reshape([-1,1])
# Y = np.matmul(X,param).flatten() + np.random.normal(0,1,X.shape[0])
# beta_hat = np.matmul(np.linalg.inv(np.matmul(X.T,X)),np.matmul(X.T,Y))
# sigma_hat = np.sqrt(np.sum((Y-np.matmul(X,beta_hat))**2)/(X.shape[0]-14))
# W = np.linalg.inv(np.matmul(X.T,X))
# scale = 5
# x = np.ones(14).reshape([-1,1])
# var = sigma_hat*np.sqrt(np.matmul(np.matmul(x.T,W),x))*scale
# reward_return = np.exp(-var**2)
# print("var: ", var)
# print("reward: ", reward_return)
# x = X[1,:].reshape([-1,1])
# var = sigma_hat*np.sqrt(np.matmul(np.matmul(x.T,W),x))*scale
# reward_return = np.exp(-var**2)
# print("var: ", var)
# print("reward: ", reward_return)
seed = args.seed
reward_giver = critic
dataset = exp_data
g_step = args.g_step
d_step = args.d_step
policy_entcoeff = args.policy_entcoeff
num_timesteps = args.num_timesteps
checkpoint_dir = save_dir
pretrained = args.pretrained
BC_max_iter = args.BC_max_iter
gamma = args.gamma
pretrained_weight = None
from baselines.rnd_gail import trpo_mpi
# Set up for MPI seed
rank = MPI.COMM_WORLD.Get_rank()
if rank != 0:
logger.set_level(logger.DISABLED)
workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank()
set_global_seeds(workerseed)
env.seed(workerseed)
import time
import os
from contextlib import contextmanager
from mpi4py import MPI
from collections import deque
import tensorflow as tf
import numpy as np
import baselines.common.tf_util as U
from baselines.common import explained_variance, zipsame, dataset, fmt_row
from baselines import logger
from baselines.common.mpi_adam import MpiAdam
from baselines.common.cg import cg
from baselines.gail.statistics import stats
from baselines.common.dataset_plus import iterbatches
env = env
policy_func = policy_fn
reward_giver = reward_giver
expert_dataset = exp_data
rank =rank
pretrained = pretrained
pretrained_weight = pretrained_weight
g_step = g_step
d_step = d_step
entcoeff = policy_entcoeff
max_timesteps=num_timesteps
ckpt_dir=checkpoint_dir
timesteps_per_batch=1024
max_kl=args.max_kl
cg_iters=10
cg_damping=0.1
gamma=gamma
lam=0.97
vf_iters=5
vf_stepsize=1e-3
d_stepsize=3e-4
task_name=task_name
rnd_iter=rnd_iter
dyn_norm=dyn_norm
mmd=args.reward==2
max_iters=0
callback=None
max_episodes=0
nworkers = MPI.COMM_WORLD.Get_size()
rank = MPI.COMM_WORLD.Get_rank()
np.set_printoptions(precision=3)
# Setup losses and stuff
# ----------------------------------------
ob_space = env.observation_space
ac_space = env.action_space
pi = policy_func("pi", ob_space, ac_space)
oldpi = policy_func("oldpi", ob_space, ac_space)
atarg = tf.placeholder(dtype=tf.float32, shape=[None]) # Target advantage function (if applicable)
ob = U.get_placeholder_cached(name="ob")
ac = pi.pdtype.sample_placeholder([None])
kloldnew = oldpi.pd.kl(pi.pd)
ent = pi.pd.entropy()
meankl = tf.reduce_mean(kloldnew)
meanent = tf.reduce_mean(ent)
entbonus = entcoeff * meanent
ratio = tf.exp(pi.pd.logp(ac) - oldpi.pd.logp(ac)) # advantage * pnew / pold
surrgain = tf.reduce_mean(ratio * atarg)
optimgain = surrgain + entbonus
losses = [optimgain, meankl, entbonus, surrgain, meanent]
loss_names = ["optimgain", "meankl", "entloss", "surrgain", "entropy"]
dist = meankl
all_var_list = pi.get_trainable_variables()
var_list = [v for v in all_var_list if v.name.startswith("pi/pol") or v.name.startswith("pi/logstd")]
vf_var_list = [v for v in all_var_list if v.name.startswith("pi/vff")]
vfadam = MpiAdam(vf_var_list)
get_flat = U.GetFlat(var_list)
set_from_flat = U.SetFromFlat(var_list)
klgrads = tf.gradients(dist, var_list)
flat_tangent = tf.placeholder(dtype=tf.float32, shape=[None], name="flat_tan")
shapes = [var.get_shape().as_list() for var in var_list]
start = 0
tangents = []
for shape in shapes:
sz = U.intprod(shape)
tangents.append(tf.reshape(flat_tangent[start:start+sz], shape))
start += sz
gvp = tf.add_n([tf.reduce_sum(g*tangent) for (g, tangent) in zipsame(klgrads, tangents)]) # pylint: disable=E1111
fvp = U.flatgrad(gvp, var_list)
assign_old_eq_new = U.function([], [], updates=[tf.assign(oldv, newv)
for (oldv, newv) in zipsame(oldpi.get_variables(), pi.get_variables())])
compute_losses = U.function([ob, ac, atarg], losses)
compute_lossandgrad = U.function([ob, ac, atarg], losses + [U.flatgrad(optimgain, var_list)])
compute_fvp = U.function([flat_tangent, ob, ac, atarg], fvp)
compute_vflossandgrad = pi.vlossandgrad
U.initialize()
th_init = get_flat()
MPI.COMM_WORLD.Bcast(th_init, root=0)
set_from_flat(th_init)
vfadam.sync()
if rank == 0:
print("Init param sum", th_init.sum(), flush=True)
# Prepare for rollouts
# ----------------------------------------
seg_gen = traj_segment_generator(pi, env, reward_giver, timesteps_per_batch, stochastic=True)
episodes_so_far = 0
timesteps_so_far = 0
iters_so_far = 0
tstart = time.time()
lenbuffer = deque(maxlen=40) # rolling buffer for episode lengths
rewbuffer = deque(maxlen=40) # rolling buffer for episode rewards
true_rewbuffer = deque(maxlen=40)
assert sum([max_iters > 0, max_timesteps > 0, max_episodes > 0]) == 1
ep_stats = stats(["True_rewards", "Rewards", "Episode_length"])
# if provide pretrained weight
if pretrained_weight is not None:
U.load_variables(pretrained_weight, variables=pi.get_variables())
else:
if not dyn_norm:
pi.ob_rms.update(expert_dataset[0])
if not mmd:
reward_giver.train(*expert_dataset, iter=rnd_iter)
best = -2000
save_ind = 0
max_save = 3
while True:
if callback: callback(locals(), globals())
if max_timesteps and timesteps_so_far >= max_timesteps:
break
elif max_episodes and episodes_so_far >= max_episodes:
break
elif max_iters and iters_so_far >= max_iters:
break
logger.log("********** Iteration %i ************" % iters_so_far)
# ------------------ Update G ------------------
# logger.log("Optimizing Policy...")
for _ in range(g_step):
seg = seg_gen.__next__()
#mmd reward
if mmd:
reward_giver.set_b2(seg["ob"], seg["ac"])
seg["rew"] = reward_giver.get_reward(seg["ob"], seg["ac"])
#report stats and save policy if any good
lrlocal = (seg["ep_lens"], seg["ep_rets"], seg["ep_true_rets"]) # local values
listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal) # list of tuples
lens, rews, true_rets = map(flatten_lists, zip(*listoflrpairs))
true_rewbuffer.extend(true_rets)
lenbuffer.extend(lens)
rewbuffer.extend(rews)
true_rew_avg = np.mean(true_rewbuffer)
logger.record_tabular("EpLenMean", np.mean(lenbuffer))
logger.record_tabular("EpRewMean", np.mean(rewbuffer))
logger.record_tabular("EpTrueRewMean", true_rew_avg)
logger.record_tabular("EpThisIter", len(lens))
episodes_so_far += len(lens)
timesteps_so_far += sum(lens)
iters_so_far += 1
logger.record_tabular("EpisodesSoFar", episodes_so_far)
logger.record_tabular("TimestepsSoFar", timesteps_so_far)
logger.record_tabular("TimeElapsed", time.time() - tstart)
logger.record_tabular("Best so far", best)
# Save model
if ckpt_dir is not None and true_rew_avg >= best:
best = true_rew_avg
fname = os.path.join(ckpt_dir, task_name)
os.makedirs(os.path.dirname(fname), exist_ok=True)
pi.save_policy(fname+"_"+str(save_ind))
save_ind = (save_ind+1) % max_save
#compute gradient towards next policy
add_vtarg_and_adv(seg, gamma, lam)
ob, ac, atarg, tdlamret = seg["ob"], seg["ac"], seg["adv"], seg["tdlamret"]
vpredbefore = seg["vpred"] # predicted value function before udpate
atarg = (atarg - atarg.mean()) / atarg.std() # standardized advantage function estimate
if hasattr(pi, "ob_rms") and dyn_norm: pi.ob_rms.update(ob) # update running mean/std for policy
args = seg["ob"], seg["ac"], atarg
fvpargs = [arr[::5] for arr in args]
assign_old_eq_new() # set old parameter values to new parameter values
*lossbefore, g = compute_lossandgrad(*args)
lossbefore = allmean(np.array(lossbefore))
g = allmean(g)
if np.allclose(g, 0):
logger.log("Got zero gradient. not updating")
else:
stepdir = cg(fisher_vector_product, g, cg_iters=cg_iters, verbose=False)
assert np.isfinite(stepdir).all()
shs = .5*stepdir.dot(fisher_vector_product(stepdir))
lm = np.sqrt(shs / max_kl)
fullstep = stepdir / lm
expectedimprove = g.dot(fullstep)
surrbefore = lossbefore[0]
stepsize = 1.0
thbefore = get_flat()
for _ in range(10):
thnew = thbefore + fullstep * stepsize
set_from_flat(thnew)
meanlosses = surr, kl, *_ = allmean(np.array(compute_losses(*args)))
improve = surr - surrbefore
logger.log("Expected: %.3f Actual: %.3f" % (expectedimprove, improve))
if not np.isfinite(meanlosses).all():
logger.log("Got non-finite value of losses -- bad!")
elif kl > max_kl * 1.5:
logger.log("violated KL constraint. shrinking step.")
elif improve < 0:
logger.log("surrogate didn't improve. shrinking step.")
else:
logger.log("Stepsize OK!")
break
stepsize *= .5
else:
logger.log("couldn't compute a good step")
set_from_flat(thbefore)
if nworkers > 1 and iters_so_far % 20 == 0:
paramsums = MPI.COMM_WORLD.allgather((thnew.sum(), vfadam.getflat().sum())) # list of tuples
assert all(np.allclose(ps, paramsums[0]) for ps in paramsums[1:])
if pi.use_popart:
pi.update_popart(tdlamret)
for _ in range(vf_iters):
for (mbob, mbret) in dataset.iterbatches((seg["ob"], seg["tdlamret"]),
include_final_partial_batch=False, batch_size=128):
if hasattr(pi, "ob_rms") and dyn_norm:
pi.ob_rms.update(mbob) # update running mean/std for policy
vfadam.update(allmean(compute_vflossandgrad(mbob, mbret)), vf_stepsize)
g_losses = meanlosses
for (lossname, lossval) in zip(loss_names, meanlosses):
logger.record_tabular(lossname, lossval)
logger.record_tabular("ev_tdlam_before", explained_variance(vpredbefore, tdlamret))
if rank == 0:
logger.dump_tabular()
| 32.586341 | 143 | 0.632945 |
b03a815221b3f33cdcf33d82406be159b843f64d | 2,096 | py | Python | School-Management-System/teachers/views.py | GisaKaze/Python-Quarantine-Projects | 29fabcb7e4046e6f3e9a19403e6d2490fe4b9fc4 | [
"MIT"
] | null | null | null | School-Management-System/teachers/views.py | GisaKaze/Python-Quarantine-Projects | 29fabcb7e4046e6f3e9a19403e6d2490fe4b9fc4 | [
"MIT"
] | null | null | null | School-Management-System/teachers/views.py | GisaKaze/Python-Quarantine-Projects | 29fabcb7e4046e6f3e9a19403e6d2490fe4b9fc4 | [
"MIT"
] | null | null | null | from django.shortcuts import render, get_object_or_404, redirect
from .models import TeacherInfo
from .forms import CreateTeacher
from django.contrib import messages
from django.core.paginator import Paginator
# Create your views here.
| 30.376812 | 102 | 0.705153 |
b03af16df806f7a2f213bb90c1c62ae5588655f0 | 4,326 | py | Python | runner_service/controllers/jobs.py | tonykhbo/ansible-runner-service | 200bd9aa67fc0fd66a4425cfb38a2ac3aed4d4b2 | [
"Apache-2.0"
] | 174 | 2018-11-21T07:44:50.000Z | 2022-03-04T15:11:56.000Z | runner_service/controllers/jobs.py | tonykhbo/ansible-runner-service | 200bd9aa67fc0fd66a4425cfb38a2ac3aed4d4b2 | [
"Apache-2.0"
] | 76 | 2018-12-12T17:20:37.000Z | 2021-12-06T11:15:47.000Z | runner_service/controllers/jobs.py | tonykhbo/ansible-runner-service | 200bd9aa67fc0fd66a4425cfb38a2ac3aed4d4b2 | [
"Apache-2.0"
] | 61 | 2018-12-27T15:17:38.000Z | 2022-03-04T12:29:33.000Z | # from flask import request
from flask_restful import request
# import logging
from .utils import log_request
from .base import BaseResource
from ..services.jobs import get_events, get_event
from ..services.utils import APIResponse
import logging
logger = logging.getLogger(__name__)
| 32.772727 | 184 | 0.502312 |
b0433121aa8bbd1327d3221055a476dfcaf07db3 | 136 | py | Python | case3/test_calc.py | emre/unit-test-workshop | 6a323dd7ffac08e7aa56e09d307798d4ae984fa9 | [
"MIT"
] | 1 | 2017-11-20T18:15:12.000Z | 2017-11-20T18:15:12.000Z | case3/test_calc.py | emre/unit-test-workshop | 6a323dd7ffac08e7aa56e09d307798d4ae984fa9 | [
"MIT"
] | null | null | null | case3/test_calc.py | emre/unit-test-workshop | 6a323dd7ffac08e7aa56e09d307798d4ae984fa9 | [
"MIT"
] | null | null | null | import unittest
# https://docs.python.org/3/library/unittest.html
from calc import Calc
| 13.6 | 49 | 0.757353 |
b043e0116441bcee9ae6a5419079e591b49e7c1e | 3,267 | py | Python | tests/service/test_integer_converter_service.py | NeolithEra/WavesGatewayFramework | e7ba892427e1d0444f2bfdc2922c45ff5f4c4add | [
"MIT"
] | 25 | 2018-03-04T07:49:21.000Z | 2022-03-28T05:20:50.000Z | tests/service/test_integer_converter_service.py | NeolithEra/WavesGatewayFramework | e7ba892427e1d0444f2bfdc2922c45ff5f4c4add | [
"MIT"
] | 22 | 2018-03-25T13:19:45.000Z | 2020-11-28T17:21:08.000Z | tests/service/test_integer_converter_service.py | NeolithEra/WavesGatewayFramework | e7ba892427e1d0444f2bfdc2922c45ff5f4c4add | [
"MIT"
] | 31 | 2018-03-25T09:45:13.000Z | 2022-03-24T05:32:18.000Z | import unittest
from unittest.mock import patch
from waves_gateway.model import Transaction, TransactionReceiver
from waves_gateway.service import IntegerConverterService
| 37.551724 | 94 | 0.67034 |
b044475c3b8a25898a8527a87ed6dc1d9dadbb1d | 6,670 | py | Python | live_demo.py | GerryZhang7/ASL-Translator- | 3963311d8dd1f010ee5a19b3760b451bc287ab1e | [
"MIT"
] | null | null | null | live_demo.py | GerryZhang7/ASL-Translator- | 3963311d8dd1f010ee5a19b3760b451bc287ab1e | [
"MIT"
] | null | null | null | live_demo.py | GerryZhang7/ASL-Translator- | 3963311d8dd1f010ee5a19b3760b451bc287ab1e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
LIVE DEMO
This script loads a pre-trained model (for best results use pre-trained weights for classification block)
and classifies American Sign Language finger spelling frame-by-frame in real-time
"""
import string
import cv2
import time
from processing import square_pad, preprocess_for_vgg
from model import create_model
import argparse
import numpy as np
ap = argparse.ArgumentParser()
ap.add_argument("-w", "--weights", default=None,
help="path to the model weights")
required_ap = ap.add_argument_group('required arguments')
required_ap.add_argument("-m", "--model",
type=str, default="resnet", required=True,
help="name of pre-trained network to use")
args = vars(ap.parse_args())
# ====== Create model for real-time classification ======
# =======================================================
# Map model names to classes
MODELS = ["resnet", "vgg16", "inception", "xception", "mobilenet"]
if args["model"] not in MODELS:
raise AssertionError("The --model command line argument should be a key in the `MODELS` dictionary")
# Create pre-trained model + classification block, with or without pre-trained weights
my_model = create_model(model=args["model"],
model_weights_path=args["weights"])
# Dictionary to convert numerical classes to alphabet
label_dict = {pos: letter
for pos, letter in enumerate(string.ascii_uppercase)}
# ====================== Live loop ======================
# =======================================================
video_capture = cv2.VideoCapture(0)
#if not video_capture.isOpened():
# raise Exception("Could not open video device")
# Set properties. Each returns === True on success (i.e. correct resolution)
video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, 5000)
video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 5000)
path = "C:/Users/Desktop/splash.jpg"
img = cv2.imread(path)
imgWrite = np.zeros((512, 512, 3), np.uint8)
flag1 = 0
flag2 = 0
flag3 = 0
fps = 0
i = 0
timer = 0
start = time.time()
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
fps += 1
timer += 1
# Draw rectangle around face
x = 313
y = 82
w = 451
h = 568
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 0), 3)
# Crop + process captured frame
hand = frame[83:650, 314:764]
#hand = frame[0:1000, 0:1000]
hand = square_pad(hand)
hand = preprocess_for_vgg(hand)
# Make prediction
my_predict = my_model.predict(hand,
batch_size=1,
verbose=0)
# Predict letter
top_prd = np.argmax(my_predict)
if (flag1 == 1):
cv2.putText(frame, text="hi ",
org=(50, (560 + 240)),
fontFace=cv2.FONT_HERSHEY_PLAIN,
fontScale=6, color=(0, 0, 255),
thickness=6, lineType=cv2.LINE_AA)
if (flag2 == 1):
cv2.putText(frame, text="im ",
org=(185, (560 + 240)),
fontFace=cv2.FONT_HERSHEY_PLAIN,
fontScale=6, color=(0, 0, 255),
thickness=6, lineType=cv2.LINE_AA)
if (flag3 == 1):
cv2.putText(frame, text="good",
org=(300, (560 + 240)),
fontFace=cv2.FONT_HERSHEY_PLAIN,
fontScale=6, color=(0, 0, 255),
thickness=6, lineType=cv2.LINE_AA)
timer = -50
# Only display predictions with probabilities greater than 0.5
#if np.max(my_predict) >= 0.50:
#if timer >= 15:
if np.max(my_predict) >= 0.9925 and timer >= 12:
timer = 0;
prediction_result = "hi im good"
#prediction_result = label_dict[top_prd]
preds_list = np.argsort(my_predict)[0]
#pred_2 = label_dict[preds_list[-2]]
#pred_3 = label_dict[preds_list[-3]]
width = int(video_capture.get(3) + 0.5)
height = int(video_capture.get(4) + 0.5)
# Annotate image with most probable prediction
if i != 2 and i != 5 and i != 10:
cv2.putText(frame, text=prediction_result[i],
org=(width // 2 + 230, height // 2 + 75),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=17, color=(255, 255, 0),
thickness=15, lineType=cv2.LINE_AA)
elif i == 2:
cv2.putText(frame, text="[space]",
org=(width // 2 + 230, height // 2 + 75),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=5, color=(255, 255, 0),
thickness=15, lineType=cv2.LINE_AA)
flag1 = 1
#cv2.imshow("img", img)
#cv2.imwrite("splash.jpg", img)
#cv2.waitKey(0)
elif i == 5:
cv2.putText(frame, text="[space]",
org=(width // 2 + 230, height // 2 + 75),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=5, color=(255, 255, 0),
thickness=15, lineType=cv2.LINE_AA)
flag2 = 1
cv2.imwrite(path, frame)
elif i == 10:
cv2.putText(frame, text="[space]",
org=(width // 2 + 230, height // 2 + 75),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=5, color=(255, 255, 0),
thickness=15, lineType=cv2.LINE_AA)
flag3 = 1
i = (i+1) % (len(prediction_result)+1)
# Annotate image with second most probable prediction (displayed on bottom left)
'''cv2.putText(frame, text=pred_2,
org=(width // 2 + width // 5 + 40, (360 + 240)),
fontFace=cv2.FONT_HERSHEY_PLAIN,
fontScale=6, color=(0, 0, 255),
thickness=6, lineType=cv2.LINE_AA)
# Annotate image with third probable prediction (displayed on bottom right)
cv2.putText(frame, text=pred_3,
org=(width // 2 + width // 3 + 5, (360 + 240)),
fontFace=cv2.FONT_HERSHEY_PLAIN,
fontScale=6, color=(0, 0, 255),
thickness=6, lineType=cv2.LINE_AA)'''
# Display the resulting frame
cv2.imshow('Video', frame)
# Press 'q' to exit live loop
if cv2.waitKey(10) & 0xFF == ord('q'):
break
# Calculate frames per second
end = time.time()
FPS = fps/(end-start)
print("[INFO] approx. FPS: {:.2f}".format(FPS))
# Release the capture
video_capture.release()
cv2.destroyAllWindows()
| 33.517588 | 105 | 0.553373 |
b044b434998843e21fedc472b72d6aa6d023641a | 8,770 | py | Python | prob2020/python/gene_sequence.py | KarchinLab/probabilistic2020 | 8e0b1b9578bd8189b1690dd2f17476c3305b98dc | [
"Apache-2.0"
] | 8 | 2016-04-30T03:26:40.000Z | 2021-09-17T04:47:08.000Z | prob2020/python/gene_sequence.py | KarchinLab/probabilistic2020 | 8e0b1b9578bd8189b1690dd2f17476c3305b98dc | [
"Apache-2.0"
] | 9 | 2016-08-18T15:19:04.000Z | 2019-07-17T18:16:52.000Z | prob2020/python/gene_sequence.py | KarchinLab/probabilistic2020 | 8e0b1b9578bd8189b1690dd2f17476c3305b98dc | [
"Apache-2.0"
] | 7 | 2016-10-19T03:43:42.000Z | 2021-07-31T02:40:20.000Z | """Fetches gene sequence from gene fasta created by extract_genes.py"""
import prob2020.python.utils as utils
def _fetch_5ss_fasta(fasta, gene_name, exon_num,
chrom, strand, start, end):
"""Retreives the 5' SS sequence flanking the specified exon.
Returns a string in fasta format with the first line containing
a ">" and the second line contains the two base pairs of 5' SS.
Parameters
----------
fasta : pysam.Fastafile
fasta object from pysam
gene_name : str
gene name used for fasta seq id
exon_num : int
the `exon_num` exon, used for seq id
chrom : str
chromsome
strand : str
strand, {'+', '-'}
start : int
0-based start position
end : int
0-based end position
Returns
-------
ss_fasta : str
string in fasta format with first line being seq id
"""
if strand == '+':
ss_seq = fasta.fetch(reference=chrom,
start=end-1,
end=end+3)
elif strand == '-':
ss_seq = fasta.fetch(reference=chrom,
start=start-3,
end=start+1)
ss_seq = utils.rev_comp(ss_seq)
ss_fasta = '>{0};exon{1};5SS\n{2}\n'.format(gene_name,
exon_num,
ss_seq.upper())
return ss_fasta
def _fetch_3ss_fasta(fasta, gene_name, exon_num,
chrom, strand, start, end):
"""Retreives the 3' SS sequence flanking the specified exon.
Returns a string in fasta format with the first line containing
a ">" and the second line contains the two base pairs of 3' SS.
Parameters
----------
fasta : pysam.Fastafile
fasta object from pysam
gene_name : str
gene name used for fasta seq id
exon_num : int
the `exon_num` exon, used for seq id
chrom : str
chromsome
strand : str
strand, {'+', '-'}
start : int
0-based start position
end : int
0-based end position
Returns
-------
ss_fasta : str
string in fasta format with first line being seq id
"""
if strand == '-':
ss_seq = fasta.fetch(reference=chrom,
start=end-1,
end=end+3)
ss_seq = utils.rev_comp(ss_seq)
elif strand == '+':
ss_seq = fasta.fetch(reference=chrom,
start=start-3,
end=start+1)
ss_fasta = '>{0};exon{1};3SS\n{2}\n'.format(gene_name,
exon_num,
ss_seq.upper())
return ss_fasta
def fetch_gene_fasta(gene_bed, fasta_obj):
"""Retreive gene sequences in FASTA format.
Parameters
----------
gene_bed : BedLine
BedLine object representing a single gene
fasta_obj : pysam.Fastafile
fasta object for index retreival of sequence
Returns
-------
gene_fasta : str
sequence of gene in FASTA format
"""
gene_fasta = ''
strand = gene_bed.strand
exons = gene_bed.get_exons()
if strand == '-':
exons.reverse() # order exons 5' to 3', so reverse if '-' strand
# iterate over exons
for i, exon in enumerate(exons):
exon_seq = fasta_obj.fetch(reference=gene_bed.chrom,
start=exon[0],
end=exon[1]).upper()
if strand == '-':
exon_seq = utils.rev_comp(exon_seq)
exon_fasta = '>{0};exon{1}\n{2}\n'.format(gene_bed.gene_name,
i, exon_seq)
# get splice site sequence
if len(exons) == 1:
# splice sites don't matter if there is no splicing
ss_fasta = ''
elif i == 0:
# first exon only, get 3' SS
ss_fasta = _fetch_5ss_fasta(fasta_obj, gene_bed.gene_name, i,
gene_bed.chrom, strand, exon[0], exon[1])
elif i == (len(exons) - 1):
# last exon only, get 5' SS
ss_fasta = _fetch_3ss_fasta(fasta_obj, gene_bed.gene_name, i,
gene_bed.chrom, strand, exon[0], exon[1])
else:
# middle exon, get bot 5' and 3' SS
fasta_3ss = _fetch_3ss_fasta(fasta_obj, gene_bed.gene_name, i,
gene_bed.chrom, strand, exon[0], exon[1])
fasta_5ss = _fetch_5ss_fasta(fasta_obj, gene_bed.gene_name, i,
gene_bed.chrom, strand, exon[0], exon[1])
ss_fasta = fasta_5ss + fasta_3ss
gene_fasta += exon_fasta + ss_fasta
return gene_fasta
| 34.801587 | 89 | 0.55382 |
b04538316ec8e7dec6961b4c00010c7027a8e97d | 1,118 | py | Python | src/main/python/request/http_request.py | photowey/pytest-dynamic-framework | 4e7b6d74594191006b50831d42e7aae21e154d56 | [
"Apache-2.0"
] | null | null | null | src/main/python/request/http_request.py | photowey/pytest-dynamic-framework | 4e7b6d74594191006b50831d42e7aae21e154d56 | [
"Apache-2.0"
] | null | null | null | src/main/python/request/http_request.py | photowey/pytest-dynamic-framework | 4e7b6d74594191006b50831d42e7aae21e154d56 | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
# ---------------------------------------------
# @file http_request
# @description http_request
# @author WcJun
# @date 2021/07/19
# ---------------------------------------------
from src.main.python.request.options import RequestOptions
| 29.421053 | 92 | 0.573345 |
b04682256b68f1be1d146f950d4cf5cacbc05399 | 5,728 | py | Python | bot/helper/mirror_utils/download_utils/aria2_download.py | vincreator/Eunha | 85a702a5b5f30ccea1798122c261d4ff07fe0c0c | [
"Apache-2.0"
] | null | null | null | bot/helper/mirror_utils/download_utils/aria2_download.py | vincreator/Eunha | 85a702a5b5f30ccea1798122c261d4ff07fe0c0c | [
"Apache-2.0"
] | null | null | null | bot/helper/mirror_utils/download_utils/aria2_download.py | vincreator/Eunha | 85a702a5b5f30ccea1798122c261d4ff07fe0c0c | [
"Apache-2.0"
] | null | null | null | from time import sleep
from threading import Thread
from bot import aria2, download_dict_lock, download_dict, STOP_DUPLICATE, TORRENT_DIRECT_LIMIT, ZIP_UNZIP_LIMIT, LOGGER, STORAGE_THRESHOLD
from bot.helper.mirror_utils.upload_utils.gdriveTools import GoogleDriveHelper
from bot.helper.ext_utils.bot_utils import is_magnet, getDownloadByGid, new_thread, get_readable_file_size
from bot.helper.mirror_utils.status_utils.aria_download_status import AriaDownloadStatus
from bot.helper.telegram_helper.message_utils import sendMarkup, sendStatusMessage, sendMessage
from bot.helper.ext_utils.fs_utils import get_base_name, check_storage_threshold
def start_listener():
aria2.listen_to_notifications(threaded=True, on_download_start=__onDownloadStarted,
on_download_error=__onDownloadError,
on_download_stop=__onDownloadStopped,
on_download_complete=__onDownloadComplete,
timeout=20)
def add_aria2c_download(link: str, path, listener, filename):
if is_magnet(link):
download = aria2.add_magnet(link, {'dir': path, 'out': filename})
else:
download = aria2.add_uris([link], {'dir': path, 'out': filename})
if download.error_message:
error = str(download.error_message).replace('<', ' ').replace('>', ' ')
LOGGER.info(f"Download Error: {error}")
return sendMessage(error, listener.bot, listener.message)
with download_dict_lock:
download_dict[listener.uid] = AriaDownloadStatus(download.gid, listener)
LOGGER.info(f"Started: {download.gid} DIR: {download.dir} ")
sendStatusMessage(listener.message, listener.bot)
start_listener()
| 46.569106 | 138 | 0.618191 |
b047b2781fee7bef3205107d3cc7277c6707a880 | 3,407 | py | Python | gol.py | AjayMT/game-of-life | 681bb92e1d7c0644645af7b77f0106ba2d4c9c20 | [
"MIT"
] | null | null | null | gol.py | AjayMT/game-of-life | 681bb92e1d7c0644645af7b77f0106ba2d4c9c20 | [
"MIT"
] | null | null | null | gol.py | AjayMT/game-of-life | 681bb92e1d7c0644645af7b77f0106ba2d4c9c20 | [
"MIT"
] | null | null | null |
import pygame
from pygame.locals import *
from pygamehelper import *
from vec2d import *
from random import randrange
g = GameOfLife()
g.mainLoop(60)
| 27.039683 | 70 | 0.502495 |
b048467d0a750345394b6d343d01156aad3e1cef | 109 | py | Python | pylib/gna/graph/__init__.py | gnafit/gna | c1a58dac11783342c97a2da1b19c97b85bce0394 | [
"MIT"
] | 5 | 2019-10-14T01:06:57.000Z | 2021-02-02T16:33:06.000Z | pylib/gna/graph/__init__.py | gnafit/gna | c1a58dac11783342c97a2da1b19c97b85bce0394 | [
"MIT"
] | null | null | null | pylib/gna/graph/__init__.py | gnafit/gna | c1a58dac11783342c97a2da1b19c97b85bce0394 | [
"MIT"
] | null | null | null | from gna.graph.walk import GraphWalker
from gna.graph.timeit import *
from gna.graph.walk_functions import *
| 27.25 | 38 | 0.816514 |
b048ccf5383075a3e3ddc09cd04494ee80c2a300 | 434 | py | Python | Recursion/Aditya_Verma/Hypothesis_Method/Print_N_to_1.py | prash-kr-meena/GoogleR | 27aca71e51cc2442e604e07ab00406a98d8d63a4 | [
"Apache-2.0"
] | null | null | null | Recursion/Aditya_Verma/Hypothesis_Method/Print_N_to_1.py | prash-kr-meena/GoogleR | 27aca71e51cc2442e604e07ab00406a98d8d63a4 | [
"Apache-2.0"
] | null | null | null | Recursion/Aditya_Verma/Hypothesis_Method/Print_N_to_1.py | prash-kr-meena/GoogleR | 27aca71e51cc2442e604e07ab00406a98d8d63a4 | [
"Apache-2.0"
] | null | null | null | # Forward Implementation
# Backward implementation
# - Here backward implementation, would be a bit typical to do,
# - Forward implementation makes more sense, if you think in terms of the input n
if __name__ == "__main__":
print_to_n_reverse(7)
| 25.529412 | 81 | 0.675115 |
b04a94197db758a9aeced9b7588eec2e7e3ada18 | 7,835 | py | Python | certbot_azure/azure_agw.py | loufa-io/certbot-azure | f081da34fa74c3d2fded08af2da0ca2b5380fa14 | [
"MIT"
] | null | null | null | certbot_azure/azure_agw.py | loufa-io/certbot-azure | f081da34fa74c3d2fded08af2da0ca2b5380fa14 | [
"MIT"
] | null | null | null | certbot_azure/azure_agw.py | loufa-io/certbot-azure | f081da34fa74c3d2fded08af2da0ca2b5380fa14 | [
"MIT"
] | null | null | null | """Azure App Gateway Certbot installer plugin."""
from __future__ import print_function
import os
import sys
import logging
import time
import OpenSSL
import base64
try:
from secrets import token_urlsafe
except ImportError:
from os import urandom
import zope.component
import zope.interface
from certbot import interfaces
from certbot import errors
from certbot.plugins import common
from azure.common.client_factory import get_client_from_auth_file
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.network import NetworkManagementClient
from msrestazure.azure_exceptions import CloudError
from azure.identity import CredentialUnavailableError
from .cred_wrapper import CredentialWrapper
MSDOCS = 'https://docs.microsoft.com/'
ACCT_URL = MSDOCS + 'python/azure/python-sdk-azure-authenticate?view=azure-python#mgmt-auth-file'
AZURE_CLI_URL = MSDOCS + 'cli/azure/install-azure-cli?view=azure-cli-latest'
AZURE_CLI_COMMAND = ("az ad sp create-for-rbac"
" --name Certbot --sdk-auth"
" --scope /subscriptions/<SUBSCRIPTION_ID>/resourceGroups/<RESOURCE_GROUP_ID>"
" > mycredentials.json")
logger = logging.getLogger(__name__)
| 36.957547 | 117 | 0.664199 |
b04b28603590e6dad8f700f43ec0e40f0f4392cb | 1,999 | py | Python | image/apps/Ignitions.py | AnthonyRawlinsUoM/MetricsDashboard | 37594e46b0cec340e10d3123bbaf94b277a3ce22 | [
"MIT"
] | null | null | null | image/apps/Ignitions.py | AnthonyRawlinsUoM/MetricsDashboard | 37594e46b0cec340e10d3123bbaf94b277a3ce22 | [
"MIT"
] | null | null | null | image/apps/Ignitions.py | AnthonyRawlinsUoM/MetricsDashboard | 37594e46b0cec340e10d3123bbaf94b277a3ce22 | [
"MIT"
] | null | null | null |
from pathlib import Path
from glob import glob as glob
from extractor.Ignition import Ignition
import logging
logger = logging.getLogger(__name__)
| 36.345455 | 76 | 0.477239 |
b04cbd151462272c28fb0ccf978f4c3ccbb776cd | 11,913 | py | Python | frontend/alexa/alexa.py | jjanetzki/HackHPI-2017 | 5345a4b385b92dff8b665818127e85eb1e14b31f | [
"MIT"
] | 1 | 2017-06-17T18:18:55.000Z | 2017-06-17T18:18:55.000Z | frontend/alexa/alexa.py | janetzki/Productivity-Bot | 5345a4b385b92dff8b665818127e85eb1e14b31f | [
"MIT"
] | null | null | null | frontend/alexa/alexa.py | janetzki/Productivity-Bot | 5345a4b385b92dff8b665818127e85eb1e14b31f | [
"MIT"
] | null | null | null | """
This code sample is a part of a simple demo to show beginners how to create a skill (app) for the Amazon Echo using AWS Lambda and the Alexa Skills Kit.
For the full code sample visit https://github.com/pmckinney8/Alexa_Dojo_Skill.git
"""
from __future__ import print_function
import requests
import json
alcohol_url = "https://hpi.de/naumann/sites/ingestion/hackhpi/alcohol/add"
caffeine_url = "https://hpi.de/naumann/sites/ingestion/hackhpi/caffeine/add"
profile_url = "https://hpi.de/naumann/sites/ingestion/hackhpi/alcohol/setprofile"
caffeine_recommendation_url = "https://hpi.de/naumann/sites/ingestion/hackhpi/caffeine/recommendation"
alcohol_recommendation_url = "https://hpi.de/naumann/sites/ingestion/hackhpi/alcohol/recommendation"
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
print("event.session.application.applicationId=" +
event['session']['application']['applicationId'])
"""
Uncomment this if statement and populate with your skill's application ID to
prevent someone else from configuring a skill that sends requests to this
function.
"""
# if (event['session']['application']['applicationId'] !=
# "amzn1.echo-sdk-ams.app.[unique-value-here]"):
# raise ValueError("Invalid Application ID")
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'])
elif event['request']['type'] == "SessionEndedRequest":
return on_session_ended(event['request'], event['session'])
def on_session_started(session_started_request, session):
""" Called when the session starts """
print("on_session_started requestId=" + session_started_request['requestId']
+ ", sessionId=" + session['sessionId'])
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they
want
"""
# Dispatch to your skill's launch
return get_welcome_response()
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
print("on_intent requestId=" + intent_request['requestId'] +
", sessionId=" + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
# Dispatch to your skill's intent handlers
if intent_name == "DrinkIntend":
return get_drink_response(intent_request)
elif intent_name == "DrinkFinishedIntend":
return get_finished_drink(intent_request)
elif intent_name == "CaffeineIntend":
return get_caffeine(intent_request)
elif intent_name == "AlcoholIntend":
return get_alcohol(intent_request)
elif intent_name == "CaffeineRecommendationIntend":
return get_caffeine_recommendation()
elif intent_name == "AlcoholRecommendationIntend":
return get_alcohol_recommendation()
elif intent_name == "CaffeineLevelIntend":
return get_caffeine_level()
elif intent_name == "AlcoholLevelIntend":
return get_alcohol_level()
elif intent_name == "SexIntend":
return set_sex(intent_request)
elif intent_name == "BodyweightIntend":
return set_bodyweight(intent_request)
elif intent_name == "AgeIntend":
return set_age(intent_request)
elif intent_name == "AMAZON.HelpIntent":
return get_help_response()
elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent":
return handle_session_end_request()
else:
raise ValueError("Invalid intent")
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.
Is not called when the skill returns should_end_session=true
"""
print("on_session_ended requestId=" + session_ended_request['requestId'] +
", sessionId=" + session['sessionId'])
# add cleanup logic here
# --------------- Functions that control the skill's behavior ------------------
# --------------- Helpers that build all of the responses ----------------------
| 39.44702 | 242 | 0.697138 |
b04d338c3d1c16a12edd8387b7d2185efd9aed7b | 474 | py | Python | day1.py | kdrag0n/aoc2021 | 469bd861a7d7c0add14412a705ec4cb1e1b5a10f | [
"MIT"
] | 2 | 2021-12-04T21:15:14.000Z | 2021-12-12T09:28:28.000Z | day1.py | kdrag0n/aoc2021 | 469bd861a7d7c0add14412a705ec4cb1e1b5a10f | [
"MIT"
] | null | null | null | day1.py | kdrag0n/aoc2021 | 469bd861a7d7c0add14412a705ec4cb1e1b5a10f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
with open(sys.argv[1], "r") as f:
lines = [l for l in f.read().split("\n") if l]
ilist = []
imap = {}
total = 0
result = 0
other = 0
last = -1
while True:
for l in lines:
val = int(l.split()[0])
if last != -1 and val > last:
total += 1
last = val
break
print(f"Total: {total}")
print(f"Result: {result}")
print(f"Other: {other}")
| 12.810811 | 50 | 0.529536 |
b04e83f0c6c5bd946cc75a63519557d702719e38 | 2,142 | py | Python | pythingspeak/test_pythingspeak.py | mdauphin/pythingspeak | d5971e9347b17a14221564a368fe032ca6acaa03 | [
"MIT"
] | null | null | null | pythingspeak/test_pythingspeak.py | mdauphin/pythingspeak | d5971e9347b17a14221564a368fe032ca6acaa03 | [
"MIT"
] | null | null | null | pythingspeak/test_pythingspeak.py | mdauphin/pythingspeak | d5971e9347b17a14221564a368fe032ca6acaa03 | [
"MIT"
] | null | null | null | #-*- coding: utf-8 -*-
import pythingspeak
import unittest
if __name__ == '__main__':
unittest.main() | 31.5 | 78 | 0.745565 |
b04f12eb656c69facb8b7d0c196d013597b90eb0 | 11,920 | py | Python | esst/utils/historygraph.py | etcher-be/esst | ac41cd0c07af8ca8532997f533756c529c9609a4 | [
"MIT"
] | 4 | 2018-06-24T14:03:44.000Z | 2019-01-21T01:20:02.000Z | esst/utils/historygraph.py | etcher-be/esst | ac41cd0c07af8ca8532997f533756c529c9609a4 | [
"MIT"
] | 106 | 2018-06-24T13:59:52.000Z | 2019-11-26T09:05:14.000Z | esst/utils/historygraph.py | theendsofinvention/esst | ac41cd0c07af8ca8532997f533756c529c9609a4 | [
"MIT"
] | null | null | null | # coding=utf-8
"""
Creates graphic of perfs
"""
import datetime
import typing
from collections import namedtuple
from tempfile import mktemp
import humanize
from esst.core import CTX
PLT = GRID_SPEC = TICKER = None
# https://stackoverflow.com/questions/4931376/generating-matplotlib-graphs-without-a-running-x-server/4935945#4935945
# noinspection SpellCheckingInspection
def _init_mpl():
"""
This is a very stupid hack to go around Matplotlib being stupid about Tkinter.
My linters don't like import statements mixed within the code, so this will do.
"""
global PLT, GRID_SPEC, TICKER # pylint: disable=global-statement
import matplotlib as mpl
mpl.use('Agg')
from matplotlib import pyplot as plt_
from matplotlib import gridspec as grd_, ticker as tick_
PLT = plt_
GRID_SPEC = grd_
TICKER = tick_
_init_mpl()
GraphValues = namedtuple('GraphValues', ['server_cpu_history',
'server_mem_history',
'server_bytes_sent_history',
'server_bytes_recv_history',
'dcs_cpu_history',
'dcs_mem_history',
'players_history', ])
PlotLine = namedtuple('PlotValue',
[
'values',
'label',
'style',
])
def process_values(values_to_process: GraphValues, time_delta: float) -> GraphValues:
"""
Converts raw values for plotting
Args:
values_to_process: values in set from CTX
time_delta: how far behind?
Returns: processed values
"""
server_cpu_history = _process(values_to_process.server_cpu_history)
server_mem_history = _process(values_to_process.server_mem_history)
server_bytes_sent_history = _process(values_to_process.server_bytes_sent_history)
server_bytes_recv_history = _process(values_to_process.server_bytes_recv_history)
dcs_cpu_history = _process(values_to_process.dcs_cpu_history)
dcs_mem_history = _process(values_to_process.dcs_mem_history)
players_history = _process(values_to_process.players_history)
return GraphValues(
server_cpu_history=zip(*server_cpu_history),
server_mem_history=zip(*server_mem_history),
server_bytes_sent_history=zip(*server_bytes_sent_history),
server_bytes_recv_history=zip(*server_bytes_recv_history),
dcs_cpu_history=zip(*dcs_cpu_history),
dcs_mem_history=zip(*dcs_mem_history),
players_history=tuple(zip(*players_history)),
)
# pylint: disable=too-many-arguments,too-many-locals
def _make_history_graph( # pylint: disable=too-many-arguments
values_to_process,
days=0,
hours=0,
minutes=0,
show: bool = False,
save_path=None):
"""
Creates a graph of perfs
Args:
show: show and exit
save_path: specify path to save to (default to temp path)
"""
# noinspection PyTypeChecker
now = datetime.datetime.now().timestamp()
time_delta = _make_delta(now, days, hours, minutes)
values = process_values(values_to_process, time_delta)
figure = PLT.figure(figsize=(18, 12)) # type: ignore
grid_spec = GRID_SPEC.GridSpec(3, 1, height_ratios=[1, 1, 1]) # type: ignore
ax_server = _plot_server(grid_spec, values, now)
_plot_dcs(grid_spec, values, now, share_x=ax_server)
_plot_bandwidth(grid_spec, values, now, share_x=ax_server)
PLT.tight_layout() # type: ignore
figure.tight_layout()
if show:
PLT.show() # type: ignore
PLT.close() # type: ignore
return None
if not save_path:
save_path = mktemp('.png') # nosec
PLT.savefig(save_path) # type: ignore
PLT.close() # type: ignore
return save_path
# pylint: disable=too-many-arguments
def make_history_graph(callback=None, days=0, hours=0, minutes=0, show: bool = False, save_path=None):
"""
Creates a graph of perfs
Args:
minutes: number of minutes to go back
hours: number of hours to go back
days: number of days to go back
callback: optional call back to the future
show: show and exit
save_path: specify path to save to (default to temp path)
"""
values_to_process = GraphValues(
dcs_cpu_history=CTX.dcs_cpu_history,
dcs_mem_history=CTX.dcs_mem_history,
server_cpu_history=CTX.server_cpu_history,
server_mem_history=CTX.server_mem_history,
server_bytes_recv_history=CTX.server_bytes_recv_history,
server_bytes_sent_history=CTX.server_bytes_sent_history,
players_history=CTX.players_history,
)
graph = _make_history_graph(values_to_process, days, hours, minutes, show, save_path)
if callback:
callback(graph)
# process_pool = futures.ProcessPoolExecutor(max_workers=1)
# values_to_process = GraphValues(
# dcs_cpu_history=CTX.dcs_cpu_history,
# dcs_mem_history=CTX.dcs_mem_history,
# server_cpu_history=CTX.server_cpu_history,
# server_mem_history=CTX.server_mem_history,
# server_bytes_recv_history=CTX.server_bytes_recv_history,
# server_bytes_sent_history=CTX.server_bytes_sent_history,
# players_history=CTX.players_history,
# )
# future = process_pool.submit(
# _make_history_graph, values_to_process, days, hours, minutes, show, save_path
# )
# if callback:
# future.add_done_callback(callback)
if __name__ == '__main__':
# Debug code
import random
TIME_DELTA = datetime.timedelta(hours=5)
TOTAL_SECONDS = int(TIME_DELTA.total_seconds())
NOW = datetime.datetime.now().timestamp()
PLAYER_COUNT = 0
CTX.players_history.append((NOW - TOTAL_SECONDS, 0))
SKIP = 0
for time_stamp in range(TOTAL_SECONDS, 0, -10):
CTX.server_mem_history.append(
(NOW - time_stamp, random.randint(60, 70))) # nosec
CTX.dcs_cpu_history.append((NOW - time_stamp, random.randint(20, 30))) # nosec
CTX.dcs_mem_history.append((NOW - time_stamp, random.randint(60, 70))) # nosec
SKIP += 1
if SKIP > 20:
SKIP = 0
CTX.server_bytes_recv_history.append(
(NOW - time_stamp, random.randint(0, 50000000))) # nosec
CTX.server_bytes_sent_history.append(
(NOW - time_stamp, random.randint(0, 50000000))) # nosec
if time_stamp <= int(TOTAL_SECONDS / 2):
CTX.server_cpu_history.append(
(NOW - time_stamp, random.randint(20, 30))) # nosec
if random.randint(0, 100) > 99: # nosec
PLAYER_COUNT += random.choice([-1, 1]) # nosec
if PLAYER_COUNT < 0:
PLAYER_COUNT = 0
continue
CTX.players_history.append((NOW - time_stamp, PLAYER_COUNT))
TIME_DELTA = datetime.datetime.now() - TIME_DELTA # type: ignore
TIME_DELTA = TIME_DELTA.timestamp() # type: ignore
make_history_graph(hours=5, save_path='./test.png')
| 32.747253 | 117 | 0.640017 |
b04f60f28cbb6155e0266d15a62d61ce814d26c3 | 1,267 | py | Python | 20.valid-parentheses.py | Qianli-Ma/LeetCode | ebda421c3d652adffca5e547a22937bf1726a532 | [
"MIT"
] | null | null | null | 20.valid-parentheses.py | Qianli-Ma/LeetCode | ebda421c3d652adffca5e547a22937bf1726a532 | [
"MIT"
] | null | null | null | 20.valid-parentheses.py | Qianli-Ma/LeetCode | ebda421c3d652adffca5e547a22937bf1726a532 | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=20 lang=python3
#
# [20] Valid Parentheses
#
# https://leetcode.com/problems/valid-parentheses/description/
#
# algorithms
# Easy (36.20%)
# Total Accepted: 554.4K
# Total Submissions: 1.5M
# Testcase Example: '"()"'
#
# Given a string containing just the characters '(', ')', '{', '}', '[' and
# ']', determine if the input string is valid.
#
# An input string is valid if:
#
#
# Open brackets must be closed by the same type of brackets.
# Open brackets must be closed in the correct order.
#
#
# Note that an empty string isalso considered valid.
#
# Example 1:
#
#
# Input: "()"
# Output: true
#
#
# Example 2:
#
#
# Input: "()[]{}"
# Output: true
#
#
# Example 3:
#
#
# Input: "(]"
# Output: false
#
#
# Example 4:
#
#
# Input: "([)]"
# Output: false
#
#
# Example 5:
#
#
# Input: "{[]}"
# Output: true
#
#
#
| 16.454545 | 75 | 0.534333 |
b0578e2fd0b0bbd54ee3add80281e9bcba12bdeb | 428 | py | Python | airypi/redis_queue.py | airypi/airypi | c7e3e781eaf2e6b3e2e87b576d5202e381544d0c | [
"Apache-2.0"
] | 3 | 2015-11-04T19:45:48.000Z | 2017-10-26T19:40:18.000Z | airypi/redis_queue.py | airypi/airypi | c7e3e781eaf2e6b3e2e87b576d5202e381544d0c | [
"Apache-2.0"
] | null | null | null | airypi/redis_queue.py | airypi/airypi | c7e3e781eaf2e6b3e2e87b576d5202e381544d0c | [
"Apache-2.0"
] | null | null | null | import redis
from flask import g, session
import device
import message_queue
import os | 22.526316 | 61 | 0.698598 |
b05916119eca4a721a156d9e476326122efd26e2 | 4,956 | py | Python | rnnApp.py | RiboswitchClassifier/RiboswitchClassification | 4a4ab0590aa50aa765638b2bd8aa0cfd84054ac7 | [
"MIT"
] | 2 | 2019-12-16T13:08:28.000Z | 2021-02-23T03:03:18.000Z | rnnApp.py | RiboswitchClassifier/RiboswitchClassification | 4a4ab0590aa50aa765638b2bd8aa0cfd84054ac7 | [
"MIT"
] | null | null | null | rnnApp.py | RiboswitchClassifier/RiboswitchClassification | 4a4ab0590aa50aa765638b2bd8aa0cfd84054ac7 | [
"MIT"
] | 3 | 2019-01-01T06:00:20.000Z | 2020-01-28T13:57:49.000Z | import tensorflow as tf
import theano
import pandas as pd
import numpy as np
import matplotlib
# matplotlib.use('pdf')
import matplotlib.pyplot as plt
from keras.layers import Dense, Dropout, LSTM, Embedding, Activation, Lambda, Bidirectional
from sklearn.preprocessing import OneHotEncoder
from keras.engine import Input, Model, InputSpec
from keras.preprocessing.sequence import pad_sequences
from keras.utils import plot_model
from keras.utils.data_utils import get_file
from keras.models import Sequential
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from sklearn.utils import class_weight
from keras import backend as K
from keras.preprocessing import sequence
from keras.models import model_from_json
from keras.utils import to_categorical
from sklearn.utils import shuffle
from sklearn.metrics import classification_report,confusion_matrix
from sklearn.preprocessing import label_binarize
import os
import pydot
from keras.models import load_model
import multiclassROC
import graphviz
import functools
import preprocess
# Hyperparameters and Parameters
EPOCHS = 25 # an arbitrary cutoff, generally defined as "one pass over the entire dataset", used to separate training into distinct phases, which is useful for logging and periodic evaluation.
BATCH_SIZE = 128 # a set of N samples. The samples in a batch are processed` independently, in parallel. If training, a batch results in only one update to the model.
ALLOWED_ALPHABETS = 'ATGCN' # Allowed Charecters
INPUT_DIM = len(ALLOWED_ALPHABETS) # a vocabulary of 5 words in case of genome sequence 'ATGCN'
CLASSES = 32 # Number of Classes to Classify -> Change this to 16 when needed
OUTPUT_DIM = 50 # Embedding output of Layer 1
RNN_HIDDEN_DIM = 62 # Hidden Layers
DROPOUT_RATIO = 0.2 # proportion of neurones not used for training
MAXLEN = 250 # cuts text after number of these characters in pad_sequences
VALIDATION_SPLIT = 0.1
# Create Directory for Checkpoints
checkpoint_dir ='epoch_tuning/RNN/32_checkpoints'
os.path.exists(checkpoint_dir)
# Path to save and load Model
model_file_h5 = "models/rnn_32_model.h5"
# Path to Dataset
input_file_train = 'processed_datasets/final_32train.csv'
input_file_test = 'processed_datasets/final_32test.csv'
# Create the RNN
# Train RNN
# Classification Report
# Predict Classes, Probabilities, Call AucRoc Function
if __name__ == '__main__':
# Load Training Datasets
X_train, y_train = preprocess.load_data(input_file_train,True)
# Create Model Structure
model = create_lstm(len(X_train[0]))
model.summary()
# Load Test Datasets
X_test, y_test = preprocess.load_data(input_file_test, False)
# Train Model and Save it
model = train_model_and_save(X_train, y_train, model)
# Generate Auc and Roc Curve
generate_auc_roc(X_test, y_test)
| 43.858407 | 193 | 0.7841 |
b05b358493a6597bac995a34db28dd63e04524e6 | 72 | py | Python | geetiles/config/prod.py | icpac-igad/gee-tiles | 713a58e00b4377dd54aeaa77416ad7fe7b2c9206 | [
"MIT"
] | 1 | 2020-09-28T12:23:25.000Z | 2020-09-28T12:23:25.000Z | geetiles/config/prod.py | icpac-igad/gee-tiles | 713a58e00b4377dd54aeaa77416ad7fe7b2c9206 | [
"MIT"
] | 6 | 2019-08-28T17:17:25.000Z | 2021-10-13T07:19:14.000Z | geetiles/config/prod.py | icpac-igad/gee-tiles | 713a58e00b4377dd54aeaa77416ad7fe7b2c9206 | [
"MIT"
] | 5 | 2019-11-15T10:37:56.000Z | 2021-07-15T08:07:27.000Z | """-"""
SETTINGS = {
'logging': {
'level': 'DEBUG'
}
}
| 9 | 24 | 0.347222 |
b05bf40e3728937480f8f42cb9c975d60036475f | 6,911 | py | Python | neptune-python-utils/neptune_python_utils/glue_gremlin_client.py | Alfian878787/amazon-neptune-tools | a447da238e99612a290babc66878fe772727a19e | [
"Apache-2.0"
] | null | null | null | neptune-python-utils/neptune_python_utils/glue_gremlin_client.py | Alfian878787/amazon-neptune-tools | a447da238e99612a290babc66878fe772727a19e | [
"Apache-2.0"
] | null | null | null | neptune-python-utils/neptune_python_utils/glue_gremlin_client.py | Alfian878787/amazon-neptune-tools | a447da238e99612a290babc66878fe772727a19e | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Amazon.com, Inc. or its affiliates.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
import sys
from pyspark.sql.functions import lit
from pyspark.sql.functions import format_string
from gremlin_python import statics
from gremlin_python.structure.graph import Graph
from gremlin_python.process.graph_traversal import __
from gremlin_python.process.strategies import *
from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection
from gremlin_python.driver.protocol import GremlinServerError
from gremlin_python.process.traversal import *
from neptune_python_utils.gremlin_utils import GremlinUtils
from neptune_python_utils.endpoints import Endpoints | 46.073333 | 135 | 0.570395 |
b0618e2deaae21564649c946c7681a44ee75680f | 2,613 | py | Python | backend/app/api/api_v1/router/file/excel_tool.py | PY-GZKY/fastapi-crawl-admin | 6535054994d11e3c31b4caeae65e8fa0f495d2b7 | [
"MIT"
] | 13 | 2021-07-25T15:26:04.000Z | 2022-03-02T12:12:02.000Z | backend/app/api/api_v1/router/file/excel_tool.py | PY-GZKY/fastapi-crawl-admin | 6535054994d11e3c31b4caeae65e8fa0f495d2b7 | [
"MIT"
] | 1 | 2021-07-26T03:26:09.000Z | 2021-07-26T09:05:38.000Z | backend/app/api/api_v1/router/file/excel_tool.py | PY-GZKY/fastapi-crawl-admin | 6535054994d11e3c31b4caeae65e8fa0f495d2b7 | [
"MIT"
] | 3 | 2021-07-26T01:44:24.000Z | 2021-07-31T14:31:49.000Z | # -*- coding: utf-8 -*
# @Time : 2020/12/22 15:58
from fastapi import Depends
from motor.motor_asyncio import AsyncIOMotorClient
from app.api.db.mongoDB import get_database
import pandas as pd
import numpy as np
from io import BytesIO
if __name__ == '__main__':
pass | 25.871287 | 87 | 0.564485 |
b0619b37fbd880320070eeeb51552bb149486090 | 1,164 | py | Python | Lab8/1 + 2 (Simple socket server)/simple_client.py | marianfx/python-labs | 7066db410ad19cababb7b66745641e65a28ccd98 | [
"MIT"
] | null | null | null | Lab8/1 + 2 (Simple socket server)/simple_client.py | marianfx/python-labs | 7066db410ad19cababb7b66745641e65a28ccd98 | [
"MIT"
] | null | null | null | Lab8/1 + 2 (Simple socket server)/simple_client.py | marianfx/python-labs | 7066db410ad19cababb7b66745641e65a28ccd98 | [
"MIT"
] | null | null | null | """Simple socket client for the simple socket client."""
import sys
import socket
import time
SOCKET_ADDRESS = "127.0.0.1"
SOCKET_PORT = 6996
def build_client_tcp(address: str, port: int):
"""Builds the TCP client."""
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((address, port))
time.sleep(1)
sock.close()
except:
print("Cannot connect to the target server.")
def build_client_udp(address: str, port: int, message: str):
"""Builds the UDP client."""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(message.encode(), (address, port))
if __name__ == "__main__":
if len(sys.argv) < 5:
print("You must give as args the mode, server address, the port and the message to send.")
exit()
MODE = sys.argv[1]
SOCKET_ADDRESS = sys.argv[2]
SOCKET_PORT = int(sys.argv[3])
MESSAGE = sys.argv[4]
if MODE == "TCP":
build_client_tcp(SOCKET_ADDRESS, SOCKET_PORT)
elif MODE == "UDP":
build_client_udp(SOCKET_ADDRESS, SOCKET_PORT, MESSAGE)
else:
print("Unable to determine what you want.")
| 28.390244 | 98 | 0.649485 |
b062b0f29115369104d664570dbb03f1de934fe3 | 2,689 | py | Python | 009/app.py | ilos-vigil/random-script | bf8d45196d4faa6912dc0469a86b8370f43ce7ac | [
"MIT"
] | null | null | null | 009/app.py | ilos-vigil/random-script | bf8d45196d4faa6912dc0469a86b8370f43ce7ac | [
"MIT"
] | null | null | null | 009/app.py | ilos-vigil/random-script | bf8d45196d4faa6912dc0469a86b8370f43ce7ac | [
"MIT"
] | null | null | null | import bs4
import nltk
import json
import re
import requests
with open('./acronym_abbreviation_id.json', 'r') as f:
data = f.read()
list_acronym_abbreviation = json.loads(data)
from_wikipedia = False
if from_wikipedia:
# Take text with Indonesian language from Wikipedia randomly
html = requests.get('https://id.wikipedia.org/wiki/Istimewa:Halaman_sembarang').text
soup = bs4.BeautifulSoup(html, 'html.parser')
for p in soup.find('div', class_='mw-parser-output').find_all('p'):
text = f'{text}{p.get_text()}'
text = re.sub(r'\n', '', text)
text = re.sub(r'\[\d*\]', '', text)
else:
text = '''
Linux (atau GNU/Linux, lihat kontroversi penamaannya) adalah nama yang diberikan kepada kumpulan sistem operasi Mirip Unix yang menggunakan Kernel Linux sebagai kernelnya. Linux merupakan proyek perangkat lunak bebas dan sumber terbuka terbesar di dunia. Seperti perangkat lunak bebas dan sumber terbuka lainnya pada umumnya, kode sumber Linux dapat dimodifikasi, digunakan dan didistribusikan kembali secara bebas oleh siapa saja
'''
text = re.sub(r'\n', '', text)
print(f'Input : {text}')
# pisah berdasarkan kalimat
# step 1
boundary = ''
rule = {
r'\.': f'.',
r'\?': f'?',
'!': f'!',
';': f';',
':': f':'
}
for old, new in rule.items():
text = re.sub(old, new, text)
# step 2
for word in re.finditer(r'"(.+)"', text):
start_position, end_position = word.regs[0][0], word.regs[0][1]
quoted_sentence = text[start_position:end_position]
quoted_sentence = re.sub('', '', quoted_sentence) # remove boundary
if text[end_position] == '.': # move boundary if character after " is .
text = text[:start_position] + quoted_sentence + text[end_position:]
else:
text = text[:start_position] + quoted_sentence + '' + text[end_position:]
# step 3
for word in re.finditer(r'([\w]*)(\.|\?|!|;|:)', text): # [word][sign]
# [0] -> position start, [1] -> position for [word], [2] -> position for [sign]
# position value is adalah (start, end + 1)
word_start_position, word_end_position, boundary_position = word.regs[1][0], word.regs[2][1], word.regs[0][1]
if text[word_start_position:word_end_position] in list_acronym_abbreviation:
text = text[:word_end_position] + text[boundary_position:] # remove boundary
# step 4
for word in re.finditer(r'([\w]+) ?(!|\?)() ?[a-z]', text): #[word](optional space)[sign][](optional space)[lowercase char]
boundary_position = word.regs[2][1]
text = text[:boundary_position] + text[boundary_position:]
# step 5
sentences = text.split('')
print('Output:')
[print(s.lstrip(' ').rstrip(' ')) for s in sentences]
| 38.414286 | 430 | 0.661584 |
b062c54e4119bba9afb9e6fce3e62bb1a445400e | 2,295 | py | Python | graphs/page_rank.py | tg12/Python | 398d1dbf4b780d1725aeae9a91b4c79f4410e2f0 | [
"MIT"
] | null | null | null | graphs/page_rank.py | tg12/Python | 398d1dbf4b780d1725aeae9a91b4c79f4410e2f0 | [
"MIT"
] | null | null | null | graphs/page_rank.py | tg12/Python | 398d1dbf4b780d1725aeae9a91b4c79f4410e2f0 | [
"MIT"
] | 1 | 2020-06-26T09:46:17.000Z | 2020-06-26T09:46:17.000Z | '''THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR ANYONE
DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR OTHER LIABILITY,
WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
# Bitcoin Cash (BCH) qpz32c4lg7x7lnk9jg6qg7s4uavdce89myax5v5nuk
# Ether (ETH) - 0x843d3DEC2A4705BD4f45F674F641cE2D0022c9FB
# Litecoin (LTC) - Lfk5y4F7KZa9oRxpazETwjQnHszEPvqPvu
# Bitcoin (BTC) - 34L8qWiQyKr8k4TnHDacfjbaSqQASbBtTd
# contact :- github@jamessawyer.co.uk
"""
Author: https://github.com/bhushan-borole
"""
"""
The input graph for the algorithm is:
A B C
A 0 1 1
B 0 0 1
C 1 0 0
"""
graph = [[0, 1, 1], [0, 0, 1], [1, 0, 0]]
if __name__ == "__main__":
main()
| 25.21978 | 74 | 0.616122 |
b064a795cdfc5cdd50a92817a383a97f8144e544 | 4,330 | py | Python | DeepRTS/python/game.py | cair/deep-rts | 7aa5dde0c5df10ae3a3d057e7b89641aec58e115 | [
"MIT"
] | 144 | 2018-07-13T07:47:50.000Z | 2022-03-31T06:29:50.000Z | DeepRTS/python/game.py | cair/DeepRTS | 2ea4de0993ea0ca2677fdb36a172779db4ce7868 | [
"MIT"
] | 18 | 2019-03-29T10:37:01.000Z | 2022-03-02T12:47:34.000Z | DeepRTS/python/game.py | cair/DeepRTS | 2ea4de0993ea0ca2677fdb36a172779db4ce7868 | [
"MIT"
] | 23 | 2018-11-02T18:12:51.000Z | 2022-02-15T20:32:18.000Z | from DeepRTS import Engine, Constants
from DeepRTS.python import GUI
from DeepRTS.python import Config
from DeepRTS.python import DeepRTSPlayer
import numpy as np
import random
import os
import argparse
import gym
dir_path = os.path.dirname(os.path.realpath(__file__))
| 26.564417 | 116 | 0.599769 |
b064ac81a6a14605eca93bb63e07f0834ed4309a | 1,147 | py | Python | lairgpt/utils/assets.py | lightonai/lairgpt | 7580e1339a39662b2ff636d158c36195eb7fe3fb | [
"MIT"
] | 19 | 2021-05-04T13:54:45.000Z | 2022-01-05T15:45:12.000Z | lairgpt/utils/assets.py | lightonai/lairgpt | 7580e1339a39662b2ff636d158c36195eb7fe3fb | [
"MIT"
] | null | null | null | lairgpt/utils/assets.py | lightonai/lairgpt | 7580e1339a39662b2ff636d158c36195eb7fe3fb | [
"MIT"
] | 1 | 2021-05-28T15:25:12.000Z | 2021-05-28T15:25:12.000Z | from enum import Enum
from os.path import expanduser
from lairgpt.utils.remote import local_dir
| 23.408163 | 60 | 0.558849 |
b0651029340e768b51b715881e03f9826ce6837f | 1,546 | py | Python | smart_open/__init__.py | DataTron-io/smart_open | 3565eff8f0ffe19d7fd31063753384e0084fb1e0 | [
"MIT"
] | 1 | 2020-09-28T06:47:58.000Z | 2020-09-28T06:47:58.000Z | smart_open/__init__.py | DataTron-io/smart_open | 3565eff8f0ffe19d7fd31063753384e0084fb1e0 | [
"MIT"
] | null | null | null | smart_open/__init__.py | DataTron-io/smart_open | 3565eff8f0ffe19d7fd31063753384e0084fb1e0 | [
"MIT"
] | null | null | null | import shutil
from .smart_open_lib import *
DEFAULT_CHUNKSIZE = 16*1024*1024 # 16mb
def copy_file(src, dest, close_src=True, close_dest=True, make_path=False):
"""
Copies file from src to dest. Supports s3 and webhdfs (does not include kerberos support)
If src does not exist, a FileNotFoundError is raised.
:param src: file-like object or path
:param dest: file-like object or path
:param close_src: boolean (optional). if True, src file is closed after use.
:param close_dest: boolean (optional). if True, dest file is closed after use.
:param make_path: str (optional, default False). if True, destination parent directories are created if missing. Only if path is local
"""
logging.info("Copy file from {} to {}".format(src, dest))
if make_path:
dir_path, _ = os.path.split(dest)
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
in_file = smart_open(src, 'rb')
out_file = smart_open(dest, 'wb')
try:
shutil.copyfileobj(in_file, out_file, DEFAULT_CHUNKSIZE)
except NotImplementedError as e:
logging.info("Error encountered copying file. Falling back to looping over input file. {}".format(e))
for line in in_file:
out_file.write(line)
try:
out_file.flush()
except Exception as e:
logging.info("Unable to flush out_file")
if in_file and not in_file.closed and close_src:
in_file.close()
if out_file and not out_file.closed and close_dest:
out_file.close()
| 34.355556 | 138 | 0.679172 |
b068470f8ca662453890dee9ded5d2a25fb6fcdd | 4,706 | py | Python | guacozy_server/backend/api/utils.py | yinm8315/guacozy-django-react | 99a8270cb660052d3b4868b7959a5750968d9cc3 | [
"MIT"
] | 121 | 2019-10-28T09:23:05.000Z | 2022-03-19T00:30:36.000Z | guacozy_server/backend/api/utils.py | peppelinux/guacozy | ff4ca3fae8b9a5cb379a7a73d39f0d0ea8b6521c | [
"MIT"
] | 43 | 2019-10-28T09:22:59.000Z | 2022-03-18T23:01:25.000Z | guacozy_server/backend/api/utils.py | peppelinux/guacozy | ff4ca3fae8b9a5cb379a7a73d39f0d0ea8b6521c | [
"MIT"
] | 44 | 2019-11-05T01:58:05.000Z | 2022-03-30T08:05:18.000Z | import rules
from backend.models import Folder
def add_folder_to_tree_dictionary(folder, resulting_set, include_ancestors=False):
"""
Adds folder, folder's ancestors and folder's descendants
Ancestors are needed to build the traverse path in tree view
Descendants are needed because user has permission to see them
:type folder: Folder
:type resulting_set: set
:type include_ancestors: bool}
"""
# Include all ancestors, which we get from django-mptt's get_ancestors()
# it's a "cheap" query
if include_ancestors and folder.parent is not None:
for ancestor in folder.parent.get_ancestors(ascending=False, include_self=True):
resulting_set.add(ancestor)
# add this folder
resulting_set.add(folder)
# add all foldres children
for child in folder.children.all():
add_folder_to_tree_dictionary(child, resulting_set, include_ancestors=False)
def check_folder_permissions(folder, resulting_set, user, require_view_permission=False):
"""
Recursively check folders and adds it to resulting_set if user has direct permission on folder
If require_view_permission is set to True, it returns only folders with direct permission and all child folders
If require_view_permission is set to True, it also returns all ancestor folders
:type folder: backend.Folder
:type user: users.User
:type resulting_set: set
:type require_view_permission: bool
"""
if rules.test_rule('has_direct_permission', user, folder):
add_folder_to_tree_dictionary(folder, resulting_set, include_ancestors=not require_view_permission)
else:
for child in folder.children.all():
check_folder_permissions(child, resulting_set, user, require_view_permission)
def folder_to_object(folder, user, allowed_to_list=None, allowed_to_view=None, include_objects=True):
"""
Given folder converts it and it's children and objects to a tree format, which is used in API
:type folder: Folder
:type user: users.User
:type allowed_to_list: set
:type allowed_to_view: set
:type include_objects: bool
"""
if allowed_to_list is None:
allowed_to_list = user_allowed_folders_ids(user, require_view_permission=False)
if allowed_to_view is None:
allowed_to_view = user_allowed_folders_ids(user, require_view_permission=True)
result = {'id': folder.id, 'text': folder.name, 'isFolder': True}
result_children = []
# For every child check if it is included in allowed folders
# (precalculated list of folders allowed and
# their ancestors, which is needed to get to this folder
for child in folder.children.all():
if child in allowed_to_list:
result_children += [folder_to_object(
folder=child,
user=user,
allowed_to_list=allowed_to_list,
allowed_to_view=allowed_to_view,
include_objects=include_objects
)
]
# If we are asked (include_objects) and folder is in allowed_to_view list
# include all objects (currently only connections)
if include_objects and folder.id in allowed_to_view:
for connection in folder.connections.all():
connection_object = {'id': connection.id,
'text': connection.name,
'isFolder': False,
'protocol': connection.protocol,
}
result_children += [connection_object]
result['children'] = result_children
return result
def user_allowed_folders(user, require_view_permission=False):
"""
If require_view_permission is False, return list of folders user is allowed to list
If require_view_permission is True, return list of folders user is allowed to view
:type require_view_permission: bool
:type user: users.User
"""
resulting_folder = set()
# iterate over root folders
for folder in Folder.objects.all().filter(parent=None):
check_folder_permissions(folder, resulting_folder, user, require_view_permission)
return resulting_folder
def user_allowed_folders_ids(user, require_view_permission=False):
"""
If require_view_permission is False, return list of ids of folders user is allowed to list
If require_view_permission is True, return list of ids of folders user is allowed to view
:type require_view_permission: bool
:type user: users.User
"""
resulting_set = set()
for folder in user_allowed_folders(user, require_view_permission):
resulting_set.add(folder.id)
return resulting_set
| 36.765625 | 115 | 0.698683 |
b06a64034b02fc50eab6da81b27b39ddfc4affcc | 348 | py | Python | web/services/device-service/src/app.py | fhgrings/match-io | 0acb0b006ae8d8073f1d148e80275a568c2517ae | [
"MIT"
] | null | null | null | web/services/device-service/src/app.py | fhgrings/match-io | 0acb0b006ae8d8073f1d148e80275a568c2517ae | [
"MIT"
] | null | null | null | web/services/device-service/src/app.py | fhgrings/match-io | 0acb0b006ae8d8073f1d148e80275a568c2517ae | [
"MIT"
] | null | null | null | from flask import Flask
from flask_cors import CORS
from src.ext import configuration
| 19.333333 | 42 | 0.672414 |
b06a839b9e9c3f3cd1914d16be145f347a1d20cd | 11,314 | py | Python | nyc/nyc-new-cases.py | btrr/covid19-epicenters | 4134967f6dbbdeb5ad91a435dc72d905e9886fd6 | [
"MIT"
] | 1 | 2020-04-02T15:48:28.000Z | 2020-04-02T15:48:28.000Z | nyc/nyc-new-cases.py | btrr/covid19-epicenters | 4134967f6dbbdeb5ad91a435dc72d905e9886fd6 | [
"MIT"
] | null | null | null | nyc/nyc-new-cases.py | btrr/covid19-epicenters | 4134967f6dbbdeb5ad91a435dc72d905e9886fd6 | [
"MIT"
] | null | null | null | import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.ticker as ticker
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
dates = ['2/29/2020', '3/1/2020', '3/2/2020', '3/3/2020', '3/4/2020', '3/5/2020', '3/6/2020', '3/7/2020', '3/8/2020', '3/9/2020', '3/10/2020', '3/11/2020', '3/12/2020', '3/13/2020', '3/14/2020', '3/15/2020', '3/16/2020', '3/17/2020', '3/18/2020', '3/19/2020', '3/20/2020', '3/21/2020', '3/22/2020', '3/23/2020', '3/24/2020', '3/25/2020', '3/26/2020', '3/27/2020', '3/28/2020', '3/29/2020', '3/30/2020', '3/31/2020', '4/1/2020', '4/2/2020', '4/3/2020', '4/4/2020', '4/5/2020', '4/6/2020', '4/7/2020', '4/8/2020', '4/9/2020', '4/10/2020', '4/11/2020', '4/12/2020', '4/13/2020', '4/14/2020', '4/15/2020', '4/16/2020', '4/17/2020', '4/18/2020', '4/19/2020', '4/20/2020', '4/21/2020', '4/22/2020', '4/23/2020', '4/24/2020', '4/25/2020', '4/26/2020', '4/27/2020', '4/28/2020', '4/29/2020', '4/30/2020', '5/1/2020', '5/2/2020', '5/3/2020', '5/4/2020', '5/5/2020', '5/6/2020', '5/7/2020', '5/8/2020', '5/9/2020', '5/10/2020', '5/11/2020', '5/12/2020', '5/13/2020', '5/14/2020', '5/15/2020', '5/16/2020', '5/17/2020', '5/18/2020', '5/19/2020', '5/20/2020', '5/21/2020', '5/22/2020', '5/23/2020', '5/24/2020', '5/25/2020', '5/26/2020', '5/27/2020', '5/28/2020', '5/29/2020', '5/30/2020', '5/31/2020', '6/1/2020', '6/2/2020', '6/3/2020', '6/4/2020', '6/5/2020', '6/6/2020', '6/7/2020', '6/8/2020', '6/9/2020', '6/10/2020', '6/11/2020', '6/12/2020', '6/13/2020', '6/14/2020', '6/15/2020', '6/16/2020', '6/17/2020', '6/18/2020', '6/19/2020', '6/20/2020', '6/21/2020', '6/22/2020', '6/23/2020', '6/24/2020', '6/25/2020', '6/26/2020', '6/27/2020', '6/28/2020', '6/30/2020', '7/01/2020', '7/02/2020',
'7/03/2020', '7/04/2020', '7/05/2020', '7/06/2020', '7/07/2020', '7/08/2020', '7/09/2020', '7/10/2020', '7/11/2020', '7/12/2020', '7/13/2020', '7/14/2020', '7/15/2020', '7/16/2020', '7/17/2020', '7/18/2020', '7/19/2020', '7/20/2020', '7/21/2020', '7/22/2020', '7/23/2020', '7/24/2020', '7/25/2020', '7/26/2020', '7/27/2020', '7/28/2020', '7/29/2020', '7/30/2020', '7/31/2020', '8/01/2020', '8/02/2020', '8/03/2020', '8/04/2020', '8/05/2020', '8/06/2020', '8/07/2020', '8/08/2020', '8/09/2020', '8/10/2020', '8/11/2020', '8/12/2020', '8/13/2020', '8/14/2020', '8/15/2020', '8/16/2020', '8/17/2020', '8/18/2020', '8/19/2020', '8/20/2020', '8/21/2020', '8/22/2020', '8/23/2020', '8/24/2020', '8/25/2020', '8/26/2020', '8/27/2020', '8/28/2020', '8/29/2020', '8/30/2020', '8/31/2020', '9/01/2020', '9/02/2020', '9/3/2020', '9/4/2020', '9/5/2020', '9/7/2020', '9/08/2020', '9/09/2020', '9/10/2020', '9/11/2020', '9/12/2020', '9/14/2020', '9/15/2020', '9/16/2020', '9/17/2020', '9/18/2020', '9/19/2020', '9/20/2020', '9/21/2020', '9/22/2020', '9/23/2020', '9/24/2020', '9/25/2020', '9/26/2020', '9/27/2020', '9/28/2020', '9/29/2020', '9/30/2020', '10/01/2020', '10/02/2020', '10/03/2020', '10/04/2020', '10/05/2020', '10/06/2020', '10/07/2020', '10/08/2020', '10/09/2020', '10/10/2020', '10/11/2020', '10/12/2020', '10/13/2020', '10/14/2020', '10/15/2020', '10/16/2020', '10/17/2020', '10/18/2020', '10/19/2020', '10/20/2020', '10/21/2020', '10/22/2020', '10/23/2020', '10/24/2020', '10/25/2020', '10/26/2020', '10/27/2020', '10/28/2020', '10/29/2020', '10/30/2020', '10/31/2020', '11/01/2020', '11/02/2020', '11/03/2020', '11/04/2020', '11/05/2020', '11/06/2020', '11/07/2020', '11/08/2020', '11/09/2020', '11/10/2020', '11/11/2020', '11/12/2020', '11/13/2020', '11/14/2020', '11/15/2020', '11/16/2020', '11/17/2020', '11/18/2020', '11/19/2020', '11/20/2020', '11/21/2020', '11/22/2020', '11/23/2020', '11/24/2020', '11/25/2020', '11/26/2020', '11/27/2020', '11/28/2020', '11/29/2020', '11/30/2020', '12/01/2020', '12/02/2020', '12/03/2020', '12/04/2020', '12/05/2020', '12/06/2020', '12/07/2020', '12/08/2020', '12/09/2020', '12/10/2020', '12/11/2020', '12/12/2020', '12/13/2020', '12/14/2020', '12/15/2020', '12/16/2020', '12/17/2020', '12/18/2020', '12/19/2020', '12/20/2020', '12/21/2020', '12/22/2020', '12/23/2020', '12/24/2020', '12/25/2020', '12/26/2020', '12/27/2020', '12/28/2020', '12/29/2020', '12/30/2020', '12/31/2020', '01/01/2021', '01/02/2021', '01/03/2021', '01/04/2021', '01/05/2021', '01/06/2021', '01/07/2021', '01/08/2021', '01/09/2021', '01/10/2021', '01/11/2021', '01/12/2021', '01/13/2021', '01/14/2021', '01/15/2021', '01/16/2021', '01/17/2021', '01/18/2021', '01/19/2021', '01/20/2021', '01/21/2021', '01/22/2021', '01/23/2021', '01/24/2021', '01/25/2021', '01/26/2021', '01/27/2021', '01/28/2021', '01/29/2021', '01/30/2021', '01/31/2021', '02/01/2021', '02/02/2021', '02/03/2021', '02/04/2021', '02/05/2021', '02/06/2021', '02/07/2021', '02/08/2021', '02/09/2021', '02/10/2021', '02/11/2021', '02/12/2021', '02/13/2021', '02/14/2021', '02/15/2021', '02/16/2021', '02/17/2021', '02/18/2021', '02/19/2021', '02/20/2021', '02/21/2021', '02/22/2021', '02/23/2021', '02/24/2021', '02/25/2021', '02/26/2021', '02/27/2021', '02/28/2021', '03/01/2021', '03/02/2021', '03/03/2021', '03/04/2021', '03/05/2021', '03/06/2021', '03/07/2021', '03/08/2021', '03/09/2021', '03/10/2021', '03/11/2021', '03/12/2021', '03/13/2021', '03/14/2021', '03/15/2021', '03/16/2021', '03/17/2021', '03/18/2021', '03/19/2021', '03/20/2021', '03/24/2021', '03/25/2021', '03/26/2021', '03/27/2021', '03/28/2021', '03/29/2021', '03/30/2021', '03/31/2021', '04/01/2021', '04/02/2021', '04/03/2021', '04/04/2021', '04/05/2021', '04/06/2021', '04/07/2021', '04/08/2021', '04/09/2021', '04/10/2021', '04/11/2021', '04/12/2021', '04/13/2021', '04/14/2021', '04/15/2021', '04/16/2021', '04/17/2021', '04/18/2021', '04/19/2021', '04/20/2021', '04/21/2021', '04/22/2021', '04/23/2021', '04/24/2021', '04/25/2021', '04/26/2021', '04/27/2021', '04/28/2021', '04/29/2021', '04/30/2021', '05/01/2021', '05/02/2021', '05/03/2021', '05/04/2021', '05/05/2021', '05/06/2021', '05/07/2021', '05/08/2021', '05/09/2021', '05/10/2021', '05/11/2021', '05/12/2021', '05/13/2021', '05/14/2021', '05/15/2021', '05/16/2021', '05/17/2021', '05/18/2021', '05/19/2021', '05/20/2021', '05/21/2021', '05/22/2021', '05/23/2021', '05/24/2021', '05/25/2021', '05/26/2021', '05/27/2021', '05/28/2021', '05/29/2021', '05/30/2021', '05/31/2021', '06/01/2021', '06/02/2021', '06/03/2021', '06/04/2021', '06/05/2021', '06/06/2021', '06/07/2021', '06/08/2021', '06/09/2021', '06/10/2021', '06/11/2021', '06/12/2021', '06/13/2021', '06/14/2021', '06/15/2021', '06/16/2021', '06/17/2021', '06/18/2021', '06/19/2021', '06/20/2021', '06/21/2021', '06/22/2021', '06/23/2021', '06/24/2021', '06/25/2021', '06/26/2021', '06/27/2021', '06/28/2021', '06/29/2021', '06/30/2021', '07/01/2021', '07/02/2021', '07/03/2021', '07/04/2021', '07/05/2021']
# format dates
x_values = [dt.datetime.strptime(d, "%m/%d/%Y").date() for d in dates]
ax = plt.gca()
formatter = mdates.DateFormatter("%m/%d")
ax.xaxis.set_major_formatter(formatter)
# create x-axis
ax.xaxis.set_major_locator(mdates.WeekdayLocator(
byweekday=(MO, TU, WE, TH, FR, SA, SU), interval=21))
# minor tick = daily
ax.xaxis.set_minor_locator(mdates.WeekdayLocator(
byweekday=(MO, TU, WE, TH, FR, SA, SU)))
# format y-axis
ax.get_yaxis().set_major_formatter(
ticker.FuncFormatter(lambda x, pos: format(int(x/1000), ',')))
# schools closed
plt.axvline(dt.datetime(2020, 3, 18), linestyle='--',
color='orange', linewidth=2, label='schools')
# non-essential businesses closed
plt.axvline(dt.datetime(2020, 3, 20), linestyle='--',
color='red', linewidth=2, label='nonessential')
# stay-at-home
plt.axvline(dt.datetime(2020, 3, 22), color='black',
linewidth=2, label='stay at home')
# massive funeral in brooklyn
plt.axvline(dt.datetime(2020, 4, 29), color='black',
linestyle='--', linewidth=2, label='funeral')
# reopening, phase 1
plt.axvline(dt.datetime(2020, 6, 8), color='green',
linewidth=2, label='stay at home')
# schools reopen
plt.axvline(dt.datetime(2020, 9, 21), color='red',
linewidth=2, label='schools reopen')
# schools close again
plt.axvline(dt.datetime(2020, 11, 19), color='blue',
linewidth=2, label='schools close')
# new cases by day
new_cases = [0, 1, 0, 0, 0, 3, 0, 7, 9, 7, 5, 23, 47, 59, 115, 60, 485, 109, 1086, 1606, 2068, 2432, 2649, 2355, 2478, 4414, 3101, 3585, 4033, 2744, 4613, 5052, 4210, 2358, 6582, 4561, 4105, 3821, 5825, 5603, 7521, 6684, 4306, 5695, 2403, 450, 4161, 6141, 4583, 4220, 3420, 2679, 2407, 3561, 3319, 4385, 4437, 2628, 2896, 1613, 2152, 2347, 2293, 2378, 1962, 1689, 1189, 1565, 1421, 1377, 1395, 1285, 4896, 657, 887, 1087, 1555, 1183, 1377, 665, 577, 724, 466, 1111, 716, 785, 646, 525, 728, 904, 783, 855, 654, 283, 293, 683, 513, 510, 601, 389, 434, 323, 435, 394, 441, 476, 284, 443, 324, 448, 276, 358, 308,
550, 249, 331, 292, 338, 385, 321, 340, 503, 340, 362, 438, 349, 291, 209, 310, 199, 382, 313, 333, 326, 275, 269, 366, 396, 332, 333, 264, 319, 552, 98, 361, 152, 531, 94, 217, 424, 313, 314, 288, 309, 199, 192, 318, 346, 287, 403, 241, 321, 210, 272, 364, 429, 330, 224, 489, 221, 181, 261, 305, 203, 217, 284, 189, 171, 183, 236, 311, 233, 229, 225, 291, 248, 324, 222, 304, 230, 212, 196, 478, 216, 290, 420, 336, 253, 275, 327, 634, 188, 320, 284, 209, 379, 386, 401, 343, 367, 395, 486, 466, 579, 609, 530, 439, 473, 587, 421, 652, 680, 473, 352, 416, 502, 438, 555, 545, 486, 523, 390, 718, 436, 481, 968, 343, 367, 568, 561, 314, 1117, 641, 585, 447, 732, 592, 800, 1087, 1151, 646, 52, 1389, 963, 1127, 1154, 1228, 1489, 973, 1552, 1264, 1486, 1420, 1572, 1398, 1642, 1350, 1312, 1959, 1889, 1905, 1282, 2100, 2218, 2384, 2512, 2855, 2498, 2406, 2298, 2715, 2561, 2582, 2643, 3168, 2630, 2367, 3265, 2531, 2539, 3874, 2633, 2256, 2761, 2693, 3199, 3766, 3452, 3222, 2653, 2512, 4029, 3366, 3851, 4800, 5041, 2937, 2892, 3956, 3969, 5077, 5241, 4770, 5045, 4306, 5168, 4508, 4746, 6222, 5018, 4988, 4520, 4509, 3571, 4283, 5127, 4844, 5130, 4086, 3982, 3013, 4964, 4774, 5229, 4533, 3375, 3069, 2570, 2084, 2463, 4394, 3973, 4160, 3811, 2144, 3870, 3407, 3590, 3398, 2945, 2904, 1914, 5296, 3819, 3515, 2306, 2554, 2974, 2558, 3459, 3149, 3265, 3318, 2806, 2979, 2369, 3084, 3389, 3245, 3087, 1994, 3443, 2058, 2680, 3003, 2124, 2397, 3400, 2833, 2229, 1127, 820, 2142, 17319, 3147, 2857, 3078, 3387, 4477, 1518, 2734, 3319, 3241, 2543, 3017, 2563, 2778, 2229, 3166, 3027, 2774, 2213, 2431, 1749, 1522, 2120, 2648, 1750, 2220, 2190, 2012, 1452, 1973, 1625, 1412, 1358, 1249, 1256, 850, 1169, 1323, 1360, 982, 855, 684, 809, 827, 777, 878, 865, 650, 419, 564, 667, 650, 347, 772, 468, 662, 312, 502, 456, 402, 406, 316, 260, 206, 376, 298, 265, 289, 210, 137, 143, 124, 258, 274, 148, 181, 191, 172, 208, 329, 154, 139, 179, 130, 76, 173, 162, 160, 151, 137, 120, 129, 132, 136, 174, 152, 158, 110, 92, 177, 200, 71, 153, 195]
# text labels
plt.title('Covid-19 in NYC: New Cases')
plt.xlabel('Date')
plt.ylabel('Number of New Cases (in thousands)')
plt.legend(['Schools Closure', 'Non-Essential Businesses Closure', 'Statewide Stay-at-Home Order',
'Massive Funeral Crowd in Brooklyn', 'Reopening, Phase 1', 'Schools Reopen', 'Schools Close'], loc='best')
# create the graph
plt.plot(x_values, new_cases, color='#730af2', linewidth=2)
plt.show()
| 176.78125 | 4,998 | 0.595457 |
b06d15947556e9e4b04c29a89022d993e3d2bccf | 4,357 | py | Python | src/face_utils/save_figure.py | hankyul2/FaceDA | 73006327df3668923d4206f81d4976ca1240329d | [
"Apache-2.0"
] | null | null | null | src/face_utils/save_figure.py | hankyul2/FaceDA | 73006327df3668923d4206f81d4976ca1240329d | [
"Apache-2.0"
] | null | null | null | src/face_utils/save_figure.py | hankyul2/FaceDA | 73006327df3668923d4206f81d4976ca1240329d | [
"Apache-2.0"
] | null | null | null | import os
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import albumentations as A
from pathlib import Path
import torch
from torch import nn
from src_backup.cdan import get_model
from src.backbone.iresnet import get_arcface_backbone
| 36.008264 | 116 | 0.627037 |
b070934d7222c882ff718596c5213477b01b49fc | 2,481 | py | Python | tests/unit/tests_standard_lib/tests_sample_generation/test_time_parser.py | monishshah18/pytest-splunk-addon | 1600f2c7d30ec304e9855642e63511780556b406 | [
"Apache-2.0"
] | 39 | 2020-06-09T17:37:21.000Z | 2022-02-08T01:57:35.000Z | tests/unit/tests_standard_lib/tests_sample_generation/test_time_parser.py | monishshah18/pytest-splunk-addon | 1600f2c7d30ec304e9855642e63511780556b406 | [
"Apache-2.0"
] | 372 | 2020-04-15T13:55:09.000Z | 2022-03-31T17:14:56.000Z | tests/unit/tests_standard_lib/tests_sample_generation/test_time_parser.py | isabella232/pytest-splunk-addon | 5e6ae2b47df7a1feb6f358bbbd1f02197b5024f6 | [
"Apache-2.0"
] | 22 | 2020-05-06T10:43:45.000Z | 2022-03-16T15:50:08.000Z | import pytest
from datetime import datetime
from freezegun import freeze_time
from pytest_splunk_addon.standard_lib.sample_generation.time_parser import (
time_parse,
)
| 37.590909 | 80 | 0.523176 |
c65e7d463bac4685e30ec3b3b04bcf2f66cd3d98 | 2,756 | py | Python | igcollect/artfiles.py | brainexe/igcollect | 12a2fa81331f305f8852b5a30c8d90d2a8895738 | [
"MIT"
] | 15 | 2016-04-13T11:13:41.000Z | 2020-12-04T17:25:43.000Z | igcollect/artfiles.py | brainexe/igcollect | 12a2fa81331f305f8852b5a30c8d90d2a8895738 | [
"MIT"
] | 10 | 2016-12-01T15:15:31.000Z | 2020-05-07T13:54:57.000Z | igcollect/artfiles.py | brainexe/igcollect | 12a2fa81331f305f8852b5a30c8d90d2a8895738 | [
"MIT"
] | 18 | 2016-03-16T11:06:10.000Z | 2022-03-14T14:56:05.000Z | #!/usr/bin/env python
"""igcollect - Artfiles Hosting Metrics
Copyright (c) 2019 InnoGames GmbH
"""
import base64
from argparse import ArgumentParser
from time import time
try:
# Try importing the Python3 packages
from urllib.request import Request, urlopen
from urllib.parse import urlencode
except ImportError:
# On failure, import the Python2
from urllib2 import Request, urlopen
from urllib import urlencode
if __name__ == '__main__':
main()
| 30.966292 | 77 | 0.589623 |
c65ec057f48af79a642c8637764b523b537f83f6 | 5,459 | py | Python | sem/storage/corpus.py | YoannDupont/SEM | ff21c5dc9a8e99eda81dc266e67cfa97dec7c243 | [
"MIT"
] | 22 | 2016-11-13T21:08:58.000Z | 2021-04-26T07:04:54.000Z | sem/storage/corpus.py | Raphencoder/SEM | ff21c5dc9a8e99eda81dc266e67cfa97dec7c243 | [
"MIT"
] | 15 | 2016-11-15T10:21:07.000Z | 2021-11-08T10:08:05.000Z | sem/storage/corpus.py | Raphencoder/SEM | ff21c5dc9a8e99eda81dc266e67cfa97dec7c243 | [
"MIT"
] | 8 | 2016-11-15T10:21:41.000Z | 2022-03-04T21:28:05.000Z | # -*- coding: utf-8 -*-
"""
file: corpus.py
Description: defines the Corpus object. It is an object representation
of a CoNLL-formatted corpus.
author: Yoann Dupont
MIT License
Copyright (c) 2018 Yoann Dupont
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from sem.IO import KeyReader, KeyWriter
_train_set = set([u"train", u"eval", u"evaluate", u"evaluation"])
_train = u"train"
_label_set = set([u"label", u"annotate", u"annotation"])
_label = "label"
_modes = _train_set | _label_set
_equivalence = dict([[mode, _train] for mode in _train_set] + [[mode, _label] for mode in _label_set])
class Corpus(object):
| 32.301775 | 130 | 0.634365 |
c660dc00601aa00fc2df39ad1285ba2cbf2bab57 | 3,426 | py | Python | recbole/utils/inferred_lm.py | ghazalehnt/RecBole | f1219847005e2c8d72b8c3cd5c49a138fe83276d | [
"MIT"
] | null | null | null | recbole/utils/inferred_lm.py | ghazalehnt/RecBole | f1219847005e2c8d72b8c3cd5c49a138fe83276d | [
"MIT"
] | null | null | null | recbole/utils/inferred_lm.py | ghazalehnt/RecBole | f1219847005e2c8d72b8c3cd5c49a138fe83276d | [
"MIT"
] | null | null | null | import time
import torch
from recbole.config import Config
from recbole.utils import get_model, init_seed
import gensim
import gensim.downloader as api
from recbole.data import create_dataset, data_preparation
import numpy as np
URL_FIELD = "item_url"
| 40.305882 | 140 | 0.613543 |
c660f9f806690fc5f7e2f8042a3e47405144af39 | 2,842 | py | Python | alchemist_py/parsetab.py | Kenta11/alchemist_py | 49d013dde4688f663eb2d35519347047739ecace | [
"MIT"
] | null | null | null | alchemist_py/parsetab.py | Kenta11/alchemist_py | 49d013dde4688f663eb2d35519347047739ecace | [
"MIT"
] | 1 | 2021-08-04T14:14:09.000Z | 2021-08-04T14:14:09.000Z | alchemist_py/parsetab.py | Kenta11/alchemist_py | 49d013dde4688f663eb2d35519347047739ecace | [
"MIT"
] | 1 | 2021-07-15T07:05:42.000Z | 2021-07-15T07:05:42.000Z |
# parsetab.py
# This file is automatically generated. Do not edit.
# pylint: disable=W,C,R
_tabversion = '3.10'
_lr_method = 'LALR'
_lr_signature = 'INTEGER L_BRACE L_BRACKET RESERVED R_BRACE R_BRACKET SEMICOLON STRUCT TYPE_BOOL TYPE_CSTDINT TYPE_PRIMITIVE_FLOAT TYPE_PRIMITIVE_INT TYPE_STRING TYPE_UNSIGNED VAR_NAMEMESSAGE : PARAMS\n PARAMS : PARAM\n | PARAMS PARAM\n \n PARAM : TYPE VAR_NAME SEMICOLON\n | TYPE VAR_NAME ARRAY SEMICOLON\n \n TYPE : TYPE_PRIMITIVE_INT\n | TYPE_PRIMITIVE_FLOAT\n | TYPE_CSTDINT\n | TYPE_BOOL\n | TYPE_STRING\n | TYPE_UNSIGNED TYPE_PRIMITIVE_INT\n \n ARRAY : L_BRACKET INTEGER R_BRACKET\n | ARRAY L_BRACKET INTEGER R_BRACKET\n '
_lr_action_items = {'TYPE_PRIMITIVE_INT':([0,2,3,10,11,14,17,],[5,5,-2,13,-3,-4,-5,]),'TYPE_PRIMITIVE_FLOAT':([0,2,3,11,14,17,],[6,6,-2,-3,-4,-5,]),'TYPE_CSTDINT':([0,2,3,11,14,17,],[7,7,-2,-3,-4,-5,]),'TYPE_BOOL':([0,2,3,11,14,17,],[8,8,-2,-3,-4,-5,]),'TYPE_STRING':([0,2,3,11,14,17,],[9,9,-2,-3,-4,-5,]),'TYPE_UNSIGNED':([0,2,3,11,14,17,],[10,10,-2,-3,-4,-5,]),'$end':([1,2,3,11,14,17,],[0,-1,-2,-3,-4,-5,]),'VAR_NAME':([4,5,6,7,8,9,13,],[12,-6,-7,-8,-9,-10,-11,]),'SEMICOLON':([12,15,21,22,],[14,17,-12,-13,]),'L_BRACKET':([12,15,21,22,],[16,18,-12,-13,]),'INTEGER':([16,18,],[19,20,]),'R_BRACKET':([19,20,],[21,22,]),}
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'MESSAGE':([0,],[1,]),'PARAMS':([0,],[2,]),'PARAM':([0,2,],[3,11,]),'TYPE':([0,2,],[4,4,]),'ARRAY':([12,],[15,]),}
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> MESSAGE","S'",1,None,None,None),
('MESSAGE -> PARAMS','MESSAGE',1,'p_MESSAGE','yacc.py',6),
('PARAMS -> PARAM','PARAMS',1,'p_PARAMS','yacc.py',11),
('PARAMS -> PARAMS PARAM','PARAMS',2,'p_PARAMS','yacc.py',12),
('PARAM -> TYPE VAR_NAME SEMICOLON','PARAM',3,'p_PARAM','yacc.py',21),
('PARAM -> TYPE VAR_NAME ARRAY SEMICOLON','PARAM',4,'p_PARAM','yacc.py',22),
('TYPE -> TYPE_PRIMITIVE_INT','TYPE',1,'p_TYPE','yacc.py',34),
('TYPE -> TYPE_PRIMITIVE_FLOAT','TYPE',1,'p_TYPE','yacc.py',35),
('TYPE -> TYPE_CSTDINT','TYPE',1,'p_TYPE','yacc.py',36),
('TYPE -> TYPE_BOOL','TYPE',1,'p_TYPE','yacc.py',37),
('TYPE -> TYPE_STRING','TYPE',1,'p_TYPE','yacc.py',38),
('TYPE -> TYPE_UNSIGNED TYPE_PRIMITIVE_INT','TYPE',2,'p_TYPE','yacc.py',39),
('ARRAY -> L_BRACKET INTEGER R_BRACKET','ARRAY',3,'p_ARRAY','yacc.py',67),
('ARRAY -> ARRAY L_BRACKET INTEGER R_BRACKET','ARRAY',4,'p_ARRAY','yacc.py',68),
]
| 64.590909 | 622 | 0.611189 |
c665a58b2ec63745fb6a56eded667c424d56d832 | 548 | py | Python | fisica.py | Kenedw/RSSF | b9e7f2f0c6f2304af4de645039e70800d22d2b0c | [
"MIT"
] | 1 | 2019-09-01T20:28:35.000Z | 2019-09-01T20:28:35.000Z | fisica.py | Kenedw/RSSF | b9e7f2f0c6f2304af4de645039e70800d22d2b0c | [
"MIT"
] | null | null | null | fisica.py | Kenedw/RSSF | b9e7f2f0c6f2304af4de645039e70800d22d2b0c | [
"MIT"
] | 1 | 2019-05-18T00:09:26.000Z | 2019-05-18T00:09:26.000Z | from packet import packet
# Camada Fisica
| 18.896552 | 47 | 0.669708 |
c666e9dcacd68dd1abb51bc4ffb6d2640c170719 | 11,792 | py | Python | programs/pyeos/tests/python/cryptokitties/kittyownership.py | learnforpractice/pyeos | 4f04eb982c86c1fdb413084af77c713a6fda3070 | [
"MIT"
] | 144 | 2017-10-18T16:38:51.000Z | 2022-01-09T12:43:57.000Z | programs/pyeos/tests/python/cryptokitties/kittyownership.py | openchatproject/safeos | 2c8dbf57d186696ef6cfcbb671da9705b8f3d9f7 | [
"MIT"
] | 60 | 2017-10-11T13:07:43.000Z | 2019-03-26T04:33:27.000Z | programs/pyeos/tests/python/cryptokitties/kittyownership.py | learnforpractice/pyeos | 4f04eb982c86c1fdb413084af77c713a6fda3070 | [
"MIT"
] | 38 | 2017-12-05T01:13:56.000Z | 2022-01-07T07:06:53.000Z | from backend import *
from basement import *
from pausable import *
from kittyaccesscontrol import *
from kittybase import KittyBase
from erc721 import ERC721
from erc721metadata import ERC721Metadata
# @title The facet of the CryptoKitties core contract that manages ownership, ERC-721 (draft) compliant.
# @author Axiom Zen (https://www.axiomzen.co)
# @dev Ref: https://github.com/ethereum/EIPs/issues/721
# See the KittyCore contract documentation to understand how the various contract facets are arranged.
# @dev Checks if a given address currently has transferApproval for a particular Kitty.
# @param _claimant the address we are confirming kitten is approved for.
# @param _tokenId kitten id, only valid when > 0
def _approvedFor(self, _claimant: address, _tokenId: uint256) -> bool:
return self.kittyIndexToApproved[_tokenId] == _claimant
# @dev Marks an address as being approved for transferFrom(), overwriting any previous
# approval. Setting _approved to address(0) clears all transfer approval.
# NOTE: _approve() does NOT send the Approval event. This is intentional because
# _approve() and transferFrom() are used together for putting Kitties on auction, and
# there is no value in spamming the log with Approval events in that case.
# @notice Returns the number of Kitties owned by a specific address.
# @param _owner The owner address to check.
# @dev Required for ERC-721 compliance
# @notice Transfers a Kitty to another address. If transferring to a smart
# contract be VERY CAREFUL to ensure that it is aware of ERC-721 (or
# CryptoKitties specifically) or your Kitty may be lost forever. Seriously.
# @param _to The address of the recipient, can be a user or contract.
# @param _tokenId The ID of the Kitty to transfer.
# @dev Required for ERC-721 compliance.
# @notice Grant another address the right to transfer a specific Kitty via
# transferFrom(). This is the preferred flow for transfering NFTs to contracts.
# @param _to The address to be granted transfer approval. Pass address(0) to
# clear all approvals.
# @param _tokenId The ID of the Kitty that can be transferred if this call succeeds.
# @dev Required for ERC-721 compliance.
# @notice Transfer a Kitty owned by another address, for which the calling address
# has previously been granted transfer approval by the owner.
# @param _from The address that owns the Kitty to be transfered.
# @param _to The address that should take ownership of the Kitty. Can be any address,
# including the caller.
# @param _tokenId The ID of the Kitty to be transferred.
# @dev Required for ERC-721 compliance.
# @notice Returns the total number of Kitties currently in existence.
# @dev Required for ERC-721 compliance.
def totalSupply(self) -> uint:
return self.kitties.length - 1
# @notice Returns the address currently assigned ownership of a given Kitty.
# @dev Required for ERC-721 compliance.
# @notice Returns a list of all Kitty IDs assigned to an address.
# @param _owner The owner whose Kitties we are interested in.
# @dev This method MUST NEVER be called by smart contract code. First, it's fairly
# expensive (it walks the entire Kitty array looking for cats belonging to owner),
# but it also returns a dynamic array, which is only supported for web3 calls, and
# not contract-to-contract calls.
# @dev Adapted from memcpy() by @arachnid (Nick Johnson <arachnid@notdot.net>)
# This method is licenced under the Apache License.
# Ref: https://github.com/Arachnid/solidity-stringutils/blob/2f6ca9accb48ae14c66f1437ec50ed19a0616f78/strings.sol
'''
def _memcpy(uint _dest, uint _src, uint _len) private view {
# Copy word-length chunks while possible
for(; _len >= 32; _len -= 32) {
assembly {
mstore(_dest, mload(_src))
}
_dest += 32;
_src += 32;
}
# Copy remaining bytes
uint256 mask = 256 ** (32 - _len) - 1;
assembly {
let srcpart := and(mload(_src), not(mask))
let destpart := and(mload(_dest), mask)
mstore(_dest, or(destpart, srcpart))
}
}
'''
# @dev Adapted from toString(slice) by @arachnid (Nick Johnson <arachnid@notdot.net>)
# This method is licenced under the Apache License.
# Ref: https://github.com/Arachnid/solidity-stringutils/blob/2f6ca9accb48ae14c66f1437ec50ed19a0616f78/strings.sol
#FIXME
'''
def _toString(bytes32[4] _rawBytes, uint256 _stringLength) private view returns (string) {
var outputString = new string(_stringLength);
uint256 outputPtr;
uint256 bytesPtr;
assembly {
outputPtr := add(outputString, 32)
bytesPtr := _rawBytes
}
_memcpy(outputPtr, bytesPtr, _stringLength);
return outputString;
'''
# @notice Returns a URI pointing to a metadata package for this token conforming to
# ERC-721 (https://github.com/ethereum/EIPs/issues/721)
# @param _tokenId The ID number of the Kitty whose metadata should be returned.
| 45.180077 | 118 | 0.672914 |
c6690d881a99354cf92a13a7b705df947e112eb1 | 5,009 | py | Python | menu.py | kokohi28/stock-prediction | 82d18cbb6366d522a01252e0cdc6eafa9fffea6d | [
"MIT"
] | 11 | 2020-06-15T12:38:57.000Z | 2021-12-08T13:34:28.000Z | menu.py | kokohi28/stock-prediction | 82d18cbb6366d522a01252e0cdc6eafa9fffea6d | [
"MIT"
] | null | null | null | menu.py | kokohi28/stock-prediction | 82d18cbb6366d522a01252e0cdc6eafa9fffea6d | [
"MIT"
] | 5 | 2020-12-17T16:58:36.000Z | 2022-02-08T09:29:28.000Z | import os
import const as CONST
from datetime import datetime
# Const
MENU_ROOT = 0
MENU_SPECIFY_DATE = 1
MENU_SPECIFY_PERCENT_TRAINED = 2
currMenu = MENU_ROOT
stockList = ['AAPL', '^DJI', '^HSI', '^GSPC'] | 27.075676 | 89 | 0.502296 |
c6692746527064fc0f46c5e36e6e97f09870ae4f | 3,410 | py | Python | demo/infinity/triton_client.py | dumpmemory/transformer-deploy | 36993d8dd53c7440e49dce36c332fa4cc08cf9fb | [
"Apache-2.0"
] | 698 | 2021-11-22T17:42:40.000Z | 2022-03-31T11:16:08.000Z | demo/infinity/triton_client.py | dumpmemory/transformer-deploy | 36993d8dd53c7440e49dce36c332fa4cc08cf9fb | [
"Apache-2.0"
] | 38 | 2021-11-23T13:45:04.000Z | 2022-03-31T10:36:45.000Z | demo/infinity/triton_client.py | dumpmemory/transformer-deploy | 36993d8dd53c7440e49dce36c332fa4cc08cf9fb | [
"Apache-2.0"
] | 58 | 2021-11-24T11:46:21.000Z | 2022-03-29T08:45:16.000Z | # Copyright 2022, Lefebvre Dalloz Services
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
import tritonclient.http
from transformer_deploy.benchmarks.utils import print_timings, setup_logging, track_infer_time
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="require inference", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--length", required=True, help="sequence length", choices=(16, 128), type=int)
parser.add_argument("--model", required=True, help="model type", choices=("onnx", "tensorrt"))
args, _ = parser.parse_known_args()
setup_logging()
model_name = f"transformer_{args.model}_inference"
url = "127.0.0.1:8000"
model_version = "1"
batch_size = 1
if args.length == 128:
# from https://venturebeat.com/2021/08/25/how-hugging-face-is-tackling-bias-in-nlp/, text used in the HF demo
text = """Today, Hugging Face has expanded to become a robust NLP startup,
known primarily for making open-source software such as Transformers and Datasets,
used for building NLP systems. The software Hugging Face develops can be used for
classification, question answering, translation, and many other NLP tasks, Rush said.
Hugging Face also hosts a range of pretrained NLP models, on GitHub, that practitioners can download
and apply for their problems, Rush added.""" # noqa: W291
else:
text = "This live event is great. I will sign-up for Infinity."
triton_client = tritonclient.http.InferenceServerClient(url=url, verbose=False)
assert triton_client.is_model_ready(
model_name=model_name, model_version=model_version
), f"model {model_name} not yet ready"
model_metadata = triton_client.get_model_metadata(model_name=model_name, model_version=model_version)
model_config = triton_client.get_model_config(model_name=model_name, model_version=model_version)
query = tritonclient.http.InferInput(name="TEXT", shape=(batch_size,), datatype="BYTES")
model_score = tritonclient.http.InferRequestedOutput(name="output", binary_data=False)
time_buffer = list()
for _ in range(10000):
query.set_data_from_numpy(np.asarray([text] * batch_size, dtype=object))
_ = triton_client.infer(
model_name=model_name, model_version=model_version, inputs=[query], outputs=[model_score]
)
for _ in range(1000):
with track_infer_time(time_buffer):
query.set_data_from_numpy(np.asarray([text] * batch_size, dtype=object))
response = triton_client.infer(
model_name=model_name, model_version=model_version, inputs=[query], outputs=[model_score]
)
print_timings(name="triton transformers", timings=time_buffer)
print(response.as_numpy("output"))
| 46.712329 | 117 | 0.72346 |
c66969c34948d04bc70f6e069bd8dabc5e27f5b6 | 2,361 | py | Python | mf/knnbased.py | waashk/extended-pipeline | 1f8cdfcd1530a9dd502ea0d76d89b5010d19daf7 | [
"MIT"
] | null | null | null | mf/knnbased.py | waashk/extended-pipeline | 1f8cdfcd1530a9dd502ea0d76d89b5010d19daf7 | [
"MIT"
] | null | null | null | mf/knnbased.py | waashk/extended-pipeline | 1f8cdfcd1530a9dd502ea0d76d89b5010d19daf7 | [
"MIT"
] | null | null | null |
import numpy as np
from tqdm import tqdm
from scipy.sparse import csr_matrix, hstack, vstack
from sklearn.neighbors import NearestNeighbors
| 23.147059 | 105 | 0.647183 |
c66bd961fbf8bcb3556ef3c4fc46854f04ab9b95 | 581 | py | Python | general-practice/Exercises solved/codingbat/Warmup2/string_match.py | lugabrielbueno/Projeto | f012c5bb9ce6f6d7c9e8196cc7986127dba3eba0 | [
"MIT"
] | null | null | null | general-practice/Exercises solved/codingbat/Warmup2/string_match.py | lugabrielbueno/Projeto | f012c5bb9ce6f6d7c9e8196cc7986127dba3eba0 | [
"MIT"
] | null | null | null | general-practice/Exercises solved/codingbat/Warmup2/string_match.py | lugabrielbueno/Projeto | f012c5bb9ce6f6d7c9e8196cc7986127dba3eba0 | [
"MIT"
] | null | null | null | #Given 2 strings, a and b, return the number of the positions where they contain the same length 2 substring. So "xxcaazz" and "xxbaaz" yields 3, since the "xx", "aa", and "az" substrings appear in the same place in both strings.
#string_match('xxcaazz', 'xxbaaz') 3
#string_match('abc', 'abc') 2
#string_match('abc', 'axc') 0
| 34.176471 | 229 | 0.593804 |