hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1fc8f64a1c48e617dc27ddaba536434b9f8ea44b | 4,915 | py | Python | Configuration/GlobalRuns/python/reco_TLR_311X.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | Configuration/GlobalRuns/python/reco_TLR_311X.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | Configuration/GlobalRuns/python/reco_TLR_311X.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
##############################################################################
##############################################################################
##############################################################################
##############################################################################
##############################################################################
##############################################################################
##############################################################################
##############################################################################
##############################################################################
##############################################################################
##############################################################################
##############################################################################
| 36.407407 | 107 | 0.545677 |
1fcb25844610f792402d0768084d92368a8057d1 | 4,838 | py | Python | renderer/settings.py | 12564985/DeFMO | 8ed9c2963678e2c59c7431ec8786302eea841572 | [
"MIT"
] | 1 | 2022-03-14T12:46:38.000Z | 2022-03-14T12:46:38.000Z | renderer/settings.py | 12564985/DeFMO | 8ed9c2963678e2c59c7431ec8786302eea841572 | [
"MIT"
] | null | null | null | renderer/settings.py | 12564985/DeFMO | 8ed9c2963678e2c59c7431ec8786302eea841572 | [
"MIT"
] | null | null | null | ## TODO: insert your ShapeNetCore.v2, textures, training and testing background paths
# NOTE that HDF5 is not generated here, to convert the dataset to HDF5 use dataloaders/conversion.py
g_datasets_path = '/mnt/lascar/rozumden/dataset'
g_shapenet_path = g_datasets_path + '/ShapeNetv2/ShapeNetCore.v2'
g_textures_path = g_datasets_path + '/ShapeNetv2/textures'
g_train_backgrounds_path = g_datasets_path + '/vot/'
g_test_backgrounds_path = g_datasets_path + '/sports1m/seq/'
## TODO: insert path to save the generated dataset
g_generated_dataset_path = 'mnt/lascar/rozumden/dataset/ShapeNetv2'
## TODO: insert your blender-2.79b path
g_blender_excutable_path = '/home.stud/rozumden/src/blender-2.79b-linux-glibc219-x86_64/blender'
g_view_point_file = {'view_points/chair.txt', 'view_points/bottle.txt', 'view_points/diningtable.txt', 'view_points/sofa.txt', 'view_points/bed.txt'}
g_render_objs_train = ['table','jar', 'skateboard', 'bottle' , 'tower' ,'chair' ,'bookshelf' ,'camera' ,'laptop' ,'basket' , 'sofa' ,'knife' , 'can' , 'rifle' , 'train' , 'lamp' , 'trash bin' , 'mailbox' , 'watercraft' , 'motorbike' , 'dishwasher' , 'bench' , 'pistol' , 'rocket' , 'loudspeaker' , 'file cabinet' , 'bag' , 'cabinet' , 'bed' , 'birdhouse' , 'display' , 'piano' , 'earphone' , 'telephone' , 'stove' , 'microphone', 'mug', 'remote', 'bathtub' , 'bowl' , 'keyboard', 'guitar' , 'washer', 'faucet' , 'printer' , 'cap' , 'clock', 'helmet', 'flowerpot', 'microwaves']
g_render_objs = g_render_objs_train
if True:
print('Rendering training dataset')
g_number_per_category = 1000
g_texture_path = g_textures_path+'/textures_train/'
g_background_image_path = g_train_backgrounds_path
else:
print('Rendering testing dataset')
g_number_per_category = 20
g_texture_path = g_textures_path+'/textures_test/'
g_background_image_path = g_test_backgrounds_path
g_max_trials = 50 ## max trials per sample to generate a nice FMO (inside image, etc)
#folders to store synthetic data
g_syn_rgb_folder = g_generated_dataset_path+'/ShapeBlur'+str(g_number_per_category)+'STA/' # small textured light average-light
g_temp = g_syn_rgb_folder+g_render_objs[0]+'/'
#camera:
#enum in [QUATERNION, XYZ, XZY, YXZ, YZX, ZXY, ZYX, AXIS_ANGLE]
g_rotation_mode = 'XYZ'
#output:
g_fmo_steps = 24
#enum in [BW, RGB, RGBA], default BW
g_rgb_color_mode = 'RGBA'
#enum in [8, 10, 12, 16, 32], default 8
g_rgb_color_depth = '16'
g_rgb_color_max = 2**int(g_rgb_color_depth)
g_rgb_file_format = 'PNG'
g_depth_use_overwrite = True
g_depth_use_file_extension = True
g_use_film_transparent = True
#dimension:
#engine type [CYCLES, BLENDER_RENDER]
g_engine_type = 'CYCLES'
#output image size = (g_resolution_x * resolution_percentage%, g_resolution_y * resolution_percentage%)
g_resolution_x = 640
g_resolution_y = 480
g_resolution_percentage = 100/2
g_render_light = False
g_ambient_light = True
g_apply_texture = True
g_skip_low_contrast = True
g_skip_small = True
g_bg_color = (0.6, 0.6, 0.6) # (1.0,1.0,1.0) # (0.5, .1, 0.6)
#performance:
g_gpu_render_enable = False
#if you are using gpu render, recommand to set hilbert spiral to 256 or 512
#default value for cpu render is fine
g_hilbert_spiral = 512
#total 55 categories
g_shapenet_categlory_pair = {
'table' : '04379243',
'jar' : '03593526',
'skateboard' : '04225987',
'car' : '02958343',
'bottle' : '02876657',
'tower' : '04460130',
'chair' : '03001627',
'bookshelf' : '02871439',
'camera' : '02942699',
'airplane' : '02691156',
'laptop' : '03642806',
'basket' : '02801938',
'sofa' : '04256520',
'knife' : '03624134',
'can' : '02946921',
'rifle' : '04090263',
'train' : '04468005',
'pillow' : '03938244',
'lamp' : '03636649',
'trash bin' : '02747177',
'mailbox' : '03710193',
'watercraft' : '04530566',
'motorbike' : '03790512',
'dishwasher' : '03207941',
'bench' : '02828884',
'pistol' : '03948459',
'rocket' : '04099429',
'loudspeaker' : '03691459',
'file cabinet' : '03337140',
'bag' : '02773838',
'cabinet' : '02933112',
'bed' : '02818832',
'birdhouse' : '02843684',
'display' : '03211117',
'piano' : '03928116',
'earphone' : '03261776',
'telephone' : '04401088',
'stove' : '04330267',
'microphone' : '03759954',
'bus' : '02924116',
'mug' : '03797390',
'remote' : '04074963',
'bathtub' : '02808440',
'bowl' : '02880940',
'keyboard' : '03085013',
'guitar' : '03467517',
'washer' : '04554684',
'mobile phone' : '02992529', #
'faucet' : '03325088',
'printer' : '04004475',
'cap' : '02954340',
'clock' : '03046257',
'helmet' : '03513137',
'flowerpot' : '03991062',
'microwaves' : '03761084'
}
# bicycle 02834778 | 35.573529 | 582 | 0.668251 |
1fcc73246e5b2e2deb6ef1a5498a653dfdea012b | 3,094 | py | Python | pynm/feature/extract/nmf.py | ohtaman/pynm | b003962201e4270d0dab681ede37f2d8edd560f2 | [
"MIT"
] | 1 | 2018-08-16T20:48:52.000Z | 2018-08-16T20:48:52.000Z | pynm/feature/extract/nmf.py | ohtaman/pynm | b003962201e4270d0dab681ede37f2d8edd560f2 | [
"MIT"
] | 5 | 2015-01-12T20:40:46.000Z | 2017-11-17T01:27:41.000Z | pynm/feature/extract/nmf.py | ohtaman/pynm | b003962201e4270d0dab681ede37f2d8edd560f2 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
import numpy
import numpy.random
import numpy.linalg
from . import svd
def nmf(matrix,
dim=None,
distance="euclid",
init=svd_init,
max_iter=10000,
threshould=0.001,
epsilon=1e-9,
seed=None):
"""Non-negative Matrix Factorization function
:param numpy.array matrix: Matrix to decompose
:param int dim: dimension of matrix
:param float distance: distance to minimize. choose "euclid" or "kl".
euclid: Euclid distance
k: Kullback Leibler divergence
default: "euclid"
:param int max_iter: max #iteration of calculation
defau:t] 10000
:param float thresould: threshould to regard as converged
:param float epsilon: epsilon to avoid zero division
:param int seed: random seed
:return: factorized matrix w and h
"""
max_rank = min(matrix.shape)
dim = min(dim, max_rank) if dim is not None else max_rank
if distance == "euclid":
_improve = _improve_euclidean_distance
elif distance == "kl":
_improve = _improve_kl_diveregence
elif distance == "beta":
_improve = _improve_beta_divergence
w, h = init(matrix, dim, seed)
wh = w.dot(h)
prev_norm = numpy.linalg.norm(matrix - wh)
for _ in range(max_iter):
wh, w, h = _improve(matrix, wh, w, h, epsilon)
norm = numpy.linalg.norm(matrix - wh)
improvement = (prev_norm - norm)/prev_norm
if improvement < threshould:
break
prev_norm = norm
return w, h
| 29.75 | 94 | 0.597931 |
1fccf8df9831cb035ab2861081b74267181cefc9 | 6,052 | py | Python | examples/demo_livepeer.py | scout-cool/Bubbletea | f0312d6f1c7fde4098d500e811f0503796973d07 | [
"Apache-2.0"
] | 10 | 2021-08-29T14:58:09.000Z | 2022-02-07T21:03:07.000Z | examples/demo_livepeer.py | scout-cool/Bubbletea | f0312d6f1c7fde4098d500e811f0503796973d07 | [
"Apache-2.0"
] | null | null | null | examples/demo_livepeer.py | scout-cool/Bubbletea | f0312d6f1c7fde4098d500e811f0503796973d07 | [
"Apache-2.0"
] | null | null | null | import datetime
import datetime
from altair.vegalite.v4.schema.core import Legend
import pandas
from pandas.core.frame import DataFrame
import streamlit as st
import time
import bubbletea
st.header("LIVEPEER Stake Movement")
urlvars = bubbletea.parse_url_var([{'key':'startdate','type':'datetime'}, {'key':'enddate','type':'datetime'}])
try:
end_date = urlvars['enddate']
except KeyError:
end_date = datetime.date.today() - datetime.timedelta(days=0)
try:
start_date = urlvars['startdate']
except KeyError:
start_date = end_date - datetime.timedelta(days=7)
date_range = st.date_input("Date range", (start_date, end_date))
if not len(date_range) == 2:
st.warning("*Please select a date range.*")
st.stop()
start_date = date_range[0]
end_date = date_range[1]
start_timestamp = int(time.mktime(start_date.timetuple()))
end_timestamp = int(time.mktime(end_date.timetuple()))
bubbletea.update_url({'startdate': start_date, 'enddate':end_date})
subgraph_url = "https://api.thegraph.com/subgraphs/name/livepeer/livepeer"
query_date_clause = "{timestamp_gte:%s,timestamp_lt:%s}" % (
start_timestamp,
end_timestamp,
)
query = """
{
bondEvents(where: %s, bypassPagination:true)
{
timestamp,
bondedAmount,
round {id},
newDelegate {id},
oldDelegate {id},
delegator {id},
},
unbondEvents(where: %s, bypassPagination:true)
{
timestamp,
amount,
withdrawRound,
round {id},
delegate {id},
delegator {id},
},
rebondEvents(where: %s, bypassPagination:true)
{
timestamp,
amount,
round {id},
delegate {id},
delegator {id},
}
}
""" % (
query_date_clause,
query_date_clause,
query_date_clause,
)
with st.spinner("Loading data from the graph"):
df = bubbletea.beta_load_subgraph(subgraph_url, query, useBigDecimal=True)
df_bond = df["bondEvents"]
df_bond.rename(columns={"bondedAmount": "amount"}, inplace=True)
df_rebond = df["rebondEvents"]
df_unbond = df["unbondEvents"]
i = 0
df_amount = DataFrame()
for df in [df_bond, df_rebond, df_unbond]:
if len(df) > 0:
if i == None:
df_amount = df[["timestamp", "amount", "round.id"]]
else:
df_amount = df_amount.append(df[["timestamp", "amount", "round.id"]])
i += 1
if len(df_amount) == 0:
st.write('No data vailable')
else:
df_amount = df_amount.reset_index()
df_amount_over_time = bubbletea.beta_aggregate_timeseries(
df_amount,
time_column="timestamp",
interval=bubbletea.TimeseriesInterval.DAILY,
columns=[
bubbletea.ColumnConfig(
name="amount",
type=bubbletea.ColumnType.bigdecimal,
aggregate_method=bubbletea.AggregateMethod.SUM,
na_fill_value=0.0,
)
],
)
df_amount_over_time.index.names = ["time"]
st.subheader("Stake moved over time")
st.write(df_amount_over_time)
bubbletea.beta_plot_line(
df_amount_over_time,
x={
"field": "time",
},
y={
"title":"Amount",
"data": [{"title": "Amount", "field": "amount"}],
},
legend="none",
)
df_amount_over_round = bubbletea.beta_aggregate_groupby(
df_amount,
by_column="round.id",
columns=[
bubbletea.ColumnConfig(
name="amount",
type=bubbletea.ColumnType.bigdecimal,
aggregate_method=bubbletea.AggregateMethod.SUM,
na_fill_value=0.0,
)
],
)
df_amount_over_round.index.names = ["round"]
st.write(df_amount_over_round)
bubbletea.beta_plot_line(
df_amount_over_round,
title='Stake moved over rounds',
x={"field": "round", "title": "Round", "type":"ordinal"},# ['quantitative', 'ordinal', 'temporal', 'nominal']
y={
"title":"Amount",
"data": [{"title": "Amount", "field": "amount"}],
},
legend="none"
)
st.subheader("Transcoder Stake Changes")
df_transcoders = process_transcoders()
df_loss_gains = bubbletea.beta_aggregate_groupby(
df_transcoders,
"transcoder",
columns=[
bubbletea.ColumnConfig(
name="loss",
type=bubbletea.ColumnType.bigdecimal,
aggregate_method=bubbletea.AggregateMethod.SUM,
na_fill_value=0.0,
),
bubbletea.ColumnConfig(
name="gain",
type=bubbletea.ColumnType.bigdecimal,
aggregate_method=bubbletea.AggregateMethod.SUM,
na_fill_value=0.0,
),
],
)
df_loss_gains["total"] = df_loss_gains["loss"] + df_loss_gains["gain"]
st.write(df_loss_gains)
| 28.682464 | 117 | 0.594019 |
1fcde10af6e71da8c4ae91b2cecfc62ef747de93 | 956 | py | Python | tests/utils/test_match.py | jeremyschlatter/vaccine-feed-ingest | 215f6c144fe5220deaccdb5db3e96f28b7077b3f | [
"MIT"
] | null | null | null | tests/utils/test_match.py | jeremyschlatter/vaccine-feed-ingest | 215f6c144fe5220deaccdb5db3e96f28b7077b3f | [
"MIT"
] | 65 | 2021-05-04T13:05:01.000Z | 2022-03-31T10:13:49.000Z | tests/utils/test_match.py | jeremyschlatter/vaccine-feed-ingest | 215f6c144fe5220deaccdb5db3e96f28b7077b3f | [
"MIT"
] | null | null | null | from vaccine_feed_ingest.utils import match
| 36.769231 | 83 | 0.848326 |
1fce94867341b2964e24bbb0a90fa03bff2006d5 | 2,201 | py | Python | PyRods/examples/user_info.py | kaldrill/irodspython | 9a1018429acf9e86af8fb7ea6f37fb397e0010da | [
"CNRI-Python"
] | null | null | null | PyRods/examples/user_info.py | kaldrill/irodspython | 9a1018429acf9e86af8fb7ea6f37fb397e0010da | [
"CNRI-Python"
] | null | null | null | PyRods/examples/user_info.py | kaldrill/irodspython | 9a1018429acf9e86af8fb7ea6f37fb397e0010da | [
"CNRI-Python"
] | null | null | null | # Copyright (c) 2013, University of Liverpool
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# Author : Jerome Fuselier
#
from irods import *
if __name__ == "__main__":
status, myEnv = getRodsEnv()
conn, errMsg = rcConnect(myEnv.rodsHost, myEnv.rodsPort,
myEnv.rodsUserName, myEnv.rodsZone)
status = clientLogin(conn)
# Get the information present in the iCAT
print getUserInfo(conn, myEnv.rodsUserName)
#print getUserInfo(conn, myEnv.rodsUserName, myEnv.rodsZone)
# Get an irodsUser object, the zone is optional
user = getUser(conn, myEnv.rodsUserName)
#user = getUser(conn, myEnv.rodsUserName, myEnv.rodsZone)
print "Id:", user.getId()
print "Name:", user.getName()
print "Type:", user.getTypeName()
print "Zone:", user.getZone()
print "Info:", user.getInfo()
print "Comment:", user.getComment()
print "Create TS:", user.getCreateTs()
print "Modify TS:", user.getModifyTs()
# You can modify some of the fields if you are admin
#user.setComment("Useful Comment")
#user.setInfo("Useful info")
# Be careful if you remove your user from rodsadmin you will have trouble to put it back
#user.setTypeName("rodsuser")
# Be careful with this one as changing the zone will change the authentication
#user.setZone("newZone")
# You can get the groups the user belongs to. You obtain irodsGroup instances
print "Member of :"
for g in user.getGroups():
print " -", g.getName()
conn.disconnect() | 37.305085 | 92 | 0.685597 |
1fd17f1089fdee8a486a2a65c3fb934cc9195151 | 1,072 | py | Python | sml_iris_knn_dtc.py | drishtim17/supervisedML | 3981d283a9937bfce793237c171fa95764846558 | [
"Apache-2.0"
] | null | null | null | sml_iris_knn_dtc.py | drishtim17/supervisedML | 3981d283a9937bfce793237c171fa95764846558 | [
"Apache-2.0"
] | null | null | null | sml_iris_knn_dtc.py | drishtim17/supervisedML | 3981d283a9937bfce793237c171fa95764846558 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
import sklearn
from sklearn.datasets import load_iris
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn import tree
from sklearn.metrics import accuracy_score
#loading iris
iris=load_iris()
#traning flowers.features is stored in iris.data
#output accordingly is stored in iris.target
#now splitting into test and train data sets
train_iris,test_iris,train_target,test_target=train_test_split(iris.data,iris.target,test_size=0.2)
#calling knn algo
knnclf=KNeighborsClassifier(n_neighbors=3)
#calling dsc algo
dsclf=tree.DecisionTreeClassifier()
#data training
knntrained=knnclf.fit(train_iris,train_target)
dsctrained=dsclf.fit(train_iris,train_target)
#testing algo
#predicted output
knnoutput=knntrained.predict(test_iris)
print(knnoutput)
dscoutput=knntrained.predict(test_iris)
print(dscoutput)
#original output
print(test_target)
#calculating accuracy
knnpct=accuracy_score(test_target,knnoutput)
print(knnpct)
dscpct=accuracy_score(test_target,dscoutput)
print(dscpct)
| 24.363636 | 99 | 0.841418 |
1fd3b3ac45b4ed570227a76c3f4f622771cac325 | 2,762 | py | Python | Python/Exercises/Humanize/humanize.py | Gjacquenot/training-material | 16b29962bf5683f97a1072d961dd9f31e7468b8d | [
"CC-BY-4.0"
] | 115 | 2015-03-23T13:34:42.000Z | 2022-03-21T00:27:21.000Z | Python/Exercises/Humanize/humanize.py | Gjacquenot/training-material | 16b29962bf5683f97a1072d961dd9f31e7468b8d | [
"CC-BY-4.0"
] | 56 | 2015-02-25T15:04:26.000Z | 2022-01-03T07:42:48.000Z | Python/Exercises/Humanize/humanize.py | Gjacquenot/training-material | 16b29962bf5683f97a1072d961dd9f31e7468b8d | [
"CC-BY-4.0"
] | 59 | 2015-11-26T11:44:51.000Z | 2022-03-21T00:27:22.000Z | #!/usr/bin/env python
def humanize(n, base=10, digits=1, unit=''):
'''convert a floating point number to a human-readable format
Parameters
----------
n : float or str
number to convert, it can a string representation of
a floating point number
base : int
base to use, either 2 or 10, default is 10
digits : int
decimal digits to use in format string, default is 1
unit : str
unit to use in format string, default is ''
Returns
-------
str
formatted string
Raises
------
ValueError
raised when base is neither 2 nor 10
Examples
--------
>>> humanize(1234)
'1.2 K'
>>> humanize(1234, digits=2)
'1.23 K'
>>> humanize(1234, base=2, digits=2)
'1.21 K'
>>> humanize(1234, unit='B')
'1.2 KB'
>>> humanize('1234.56', digits=4, unit='B')
'1.2346 KB'
>>> humanize(0.0123)
'12.3 m'
'''
import math
if base != 2 and base != 10:
raise ValueError('base should be 2 or 10, not {:d}'.format(base))
thousands = 3 if base == 10 else 10
orders = {
-3: 'n',
-2: 'u',
-1: 'm',
0: '',
1: 'K',
2: 'M',
3: 'G',
4: 'T',
5: 'P',
}
fmt_str = '{{0:.{}f}} {{1:s}}{{2:s}}'.format(digits)
exp = math.log(math.fabs(float(n)), base**thousands)
exp = int(exp - (1 if exp < 0 else 0))
number = float(n)/base**(exp*thousands)
return fmt_str.format(number, orders[exp], unit)
if __name__ == '__main__':
from argparse import ArgumentParser
import sys
arg_parser = ArgumentParser(description='convert numbers to '
'human-readable format')
arg_parser.add_argument('n', type=float, nargs='?',
help='number to convert')
arg_parser.add_argument('-d', type=int, default=1,
help='number of significant digits')
arg_parser.add_argument('-b', action='store_true',
help='use base 2')
arg_parser.add_argument('-u', default='', help='unit to display')
options = arg_parser.parse_args()
base = 2 if options.b else 10
if options.n:
print('{0:s}'.format(humanize(options.n, base=base, digits=options.d,
unit=options.u)))
else:
for line in sys.stdin:
if check_line(line):
print('{0:s}'.format(humanize(line.strip(), base=base,
digits=options.d,
unit=options.u)))
| 28.474227 | 77 | 0.513034 |
1fd529b1fbfbcec29e94685aeef6fbda0d26c559 | 1,337 | py | Python | data/Latent.py | YoungjuNa-KR/Gaze_estimator_implementation | 95482db40ddef413870f51dadc907910d624ee6e | [
"MIT"
] | null | null | null | data/Latent.py | YoungjuNa-KR/Gaze_estimator_implementation | 95482db40ddef413870f51dadc907910d624ee6e | [
"MIT"
] | null | null | null | data/Latent.py | YoungjuNa-KR/Gaze_estimator_implementation | 95482db40ddef413870f51dadc907910d624ee6e | [
"MIT"
] | 1 | 2022-02-03T11:11:21.000Z | 2022-02-03T11:11:21.000Z | import os
import PIL
import torch
from glob import glob
from torch.utils.data import DataLoader
from torchvision.transforms.functional import pil_to_tensor
| 29.711111 | 68 | 0.604338 |
1fd676c1868fb5496119162edb66de118a176730 | 876 | py | Python | scripts/mklanguages.py | yasen-m/dosage | 81fe088621ad335cac2a53fcbc7b9b37f49ddce2 | [
"MIT"
] | null | null | null | scripts/mklanguages.py | yasen-m/dosage | 81fe088621ad335cac2a53fcbc7b9b37f49ddce2 | [
"MIT"
] | null | null | null | scripts/mklanguages.py | yasen-m/dosage | 81fe088621ad335cac2a53fcbc7b9b37f49ddce2 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# update languages.py from pycountry
import os
import codecs
import pycountry
basepath = os.path.dirname(os.path.dirname(__file__))
def main():
"""Update language information in dosagelib/languages.py."""
fn =os.path.join(basepath, 'dosagelib', 'languages.py')
encoding = 'utf-8'
with codecs.open(fn, 'w', encoding) as f:
f.write('# -*- coding: %s -*-%s' % (encoding, os.linesep))
f.write('# ISO 693-1 language codes from pycountry%s' % os.linesep)
write_languages(f)
def write_languages(f):
"""Write language information."""
f.write("Iso2Language = {%s" % os.linesep)
for language in pycountry.languages:
if hasattr(language, 'alpha2'):
f.write(" %r: %r,%s" % (language.alpha2, language.name, os.linesep))
f.write("}%s" % os.linesep)
if __name__ == '__main__':
main()
| 29.2 | 83 | 0.634703 |
1fd6b807f6071d9b5d2c510c8209a51bbbc35084 | 531 | py | Python | reference/for_and_while.py | SeanSyue/TensorflowReferences | 2c93f4c770e2713ef4769f287e022d03e7097188 | [
"MIT"
] | null | null | null | reference/for_and_while.py | SeanSyue/TensorflowReferences | 2c93f4c770e2713ef4769f287e022d03e7097188 | [
"MIT"
] | null | null | null | reference/for_and_while.py | SeanSyue/TensorflowReferences | 2c93f4c770e2713ef4769f287e022d03e7097188 | [
"MIT"
] | null | null | null | import tensorflow as tf
x = tf.Variable(0, name='x')
model = tf.global_variables_initializer()
with tf.Session() as session:
for i in range(5):
session.run(model)
x = x + 1
print(session.run(x))
x = tf.Variable(0., name='x')
threshold = tf.constant(5.)
model = tf.global_variables_initializer()
with tf.Session() as session:
session.run(model)
while session.run(tf.less(x, threshold)):
x = x + 1
x_value = session.run(x)
print(x_value)
| 19.666667 | 46 | 0.589454 |
1fd6f57e7b90621a24c47afd31d7bbd91668d230 | 59 | py | Python | raising_exception_3.py | godontop/python-work | ea22e0df8b0b17605f5a434e556a388d1f75aa47 | [
"MIT"
] | null | null | null | raising_exception_3.py | godontop/python-work | ea22e0df8b0b17605f5a434e556a388d1f75aa47 | [
"MIT"
] | null | null | null | raising_exception_3.py | godontop/python-work | ea22e0df8b0b17605f5a434e556a388d1f75aa47 | [
"MIT"
] | null | null | null | try:
num = 5 / 0
except:
print("An error occured")
raise | 11.8 | 26 | 0.644068 |
1fd7ed8a83b56f175881d6f318fa389d67ee450a | 732 | py | Python | bewerte/muendlich.py | jupfi81/NotenManager | ee96a41088bb898c025aed7b3c904741cb71d004 | [
"MIT"
] | null | null | null | bewerte/muendlich.py | jupfi81/NotenManager | ee96a41088bb898c025aed7b3c904741cb71d004 | [
"MIT"
] | null | null | null | bewerte/muendlich.py | jupfi81/NotenManager | ee96a41088bb898c025aed7b3c904741cb71d004 | [
"MIT"
] | null | null | null | """Berechnet die mndliche Note"""
import csv
with open('bewertung.csv', encoding='utf-8', mode='r') as bewertung:
TABELLE = []
DATA = csv.reader(bewertung, delimiter=',')
for row in DATA:
TABELLE.append([element.strip() for element in row])
OUTPUT = [TABELLE[0] + ["Note"]]
del TABELLE[0]
for row in TABELLE:
if len(row) > 3:
note = 20*float(row[2]) + 20*float(row[3]) + 40*float(row[4]) + 20*float(row[5])
note = round(note/25, 0)/4
row = row + [note]
OUTPUT.append(row)
with open('note.csv', encoding='utf-8', mode='w') as safe:
WRITER = csv.writer(safe, delimiter=',')
for row in OUTPUT:
WRITER.writerow(row)
| 31.826087 | 92 | 0.562842 |
1fd7f7aa485ce2ad0b848a0e2bbaa8cf36a6c24a | 410 | py | Python | python3/tests/test_edit_distance.py | qianbinbin/leetcode | 915cecab0c940cd13847683ec55b17b77eb0f39b | [
"MIT"
] | 4 | 2018-03-05T02:27:16.000Z | 2021-03-15T14:19:44.000Z | python3/tests/test_edit_distance.py | qianbinbin/leetcode | 915cecab0c940cd13847683ec55b17b77eb0f39b | [
"MIT"
] | null | null | null | python3/tests/test_edit_distance.py | qianbinbin/leetcode | 915cecab0c940cd13847683ec55b17b77eb0f39b | [
"MIT"
] | 2 | 2018-07-22T10:32:10.000Z | 2018-10-20T03:14:28.000Z | from unittest import TestCase
from leetcodepy.edit_distance import *
solution1 = Solution1()
word11 = "horse"
word12 = "ros"
expected1 = 3
word21 = "intention"
word22 = "execution"
expected2 = 5
| 17.083333 | 74 | 0.731707 |
1fd8f8fea0aa37bc2adfbcbf6dda99e537d99a7f | 805 | py | Python | pageobject/commands/index.py | lukas-linhart/pageobject | 6ae83680ae62a94f93cefc394e4f3cc6999aeead | [
"MIT"
] | 1 | 2017-01-12T06:15:36.000Z | 2017-01-12T06:15:36.000Z | pageobject/commands/index.py | lukas-linhart/pageobject | 6ae83680ae62a94f93cefc394e4f3cc6999aeead | [
"MIT"
] | null | null | null | pageobject/commands/index.py | lukas-linhart/pageobject | 6ae83680ae62a94f93cefc394e4f3cc6999aeead | [
"MIT"
] | null | null | null | def index(self, value):
"""
Return index of the first child containing the specified value.
:param str value: text value to look for
:returns: index of the first child containing the specified value
:rtype: int
:raises ValueError: if the value is not found
"""
self.logger.info('getting index of text "{}" within page object list {}'.format(value, self._log_id_short))
self.logger.debug('getting index of text "{}" within page object list; {}'.format(value, self._log_id_long))
index = self.text_values.index(value)
self.logger.info('index of text "{}" within page object list {} is {}'.format(value, self._log_id_short, index))
self.logger.debug('index of text "{}" within page object is {}; {}'.format(value, index, self._log_id_long))
return index
| 47.352941 | 116 | 0.690683 |
1fda8ca8896b2d1bcde84055f16e53f955e23e9c | 2,724 | py | Python | vlsopt/data_factory/transaction_factory.py | violas-core/bvexchange | 74cf3197aad02e0f5e2dac457266d11c9c8cc746 | [
"MIT"
] | null | null | null | vlsopt/data_factory/transaction_factory.py | violas-core/bvexchange | 74cf3197aad02e0f5e2dac457266d11c9c8cc746 | [
"MIT"
] | null | null | null | vlsopt/data_factory/transaction_factory.py | violas-core/bvexchange | 74cf3197aad02e0f5e2dac457266d11c9c8cc746 | [
"MIT"
] | 1 | 2022-01-05T04:39:47.000Z | 2022-01-05T04:39:47.000Z | #!/usr/bin/python3
import operator
import sys
import json
import os
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "./"))
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../"))
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../"))
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../lbdiemsdk/src"))
from diem import (
jsonrpc,
)
from factory_base import (
factory_base,
field
)
| 33.62963 | 96 | 0.551762 |
1fdabe81a3501b610902f47c9629b3212106ad89 | 3,746 | py | Python | python/tdk_fetch.py | selcukcihan/namewizard | c2aeb3fd1eb3ce839d0e3a145bdf2a6df354d568 | [
"CC0-1.0"
] | null | null | null | python/tdk_fetch.py | selcukcihan/namewizard | c2aeb3fd1eb3ce839d0e3a145bdf2a6df354d568 | [
"CC0-1.0"
] | null | null | null | python/tdk_fetch.py | selcukcihan/namewizard | c2aeb3fd1eb3ce839d0e3a145bdf2a6df354d568 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python
from BeautifulSoup import BeautifulSoup
import json
import urllib
import urllib2
import re
import time
import os.path
names = {}
if os.path.exists("names.txt"):
with open("names.txt") as f:
for line in f.readlines():
tokens = line.split(" ")
names[tokens[0].decode("utf-8")] = (tokens[0].decode("utf-8"), tokens[1] == "1", int(tokens[2]), tokens[3].decode("utf-8"))
f = open("names.txt", 'a+')
print_counter = 0
searching = "aeiou"
beginning = 0
pageno = 1
page = 1
searchforindex = 0
guid = urllib.urlencode({'guid': "TDK.GTS.574eccc8396288.52796697"})
if os.path.exists('names_input.txt'):
with open('names_input.txt') as ini:
beginning, pageno = map(int, ini.readline().split())
try:
for searchforindex in range(beginning, len(searching)):
searchfor = searching[searchforindex]
pagebegin = 1 if searchforindex > beginning else pageno
tokenq = urllib.urlencode({'name': searchfor})
for page in range(pagebegin, 122):
print "fetching", page, "of", searchfor
pageq = urllib.urlencode({'page': page})
url = 'http://tdk.gov.tr/index.php?option=com_kisiadlari&arama=adlar&like=0&cinsi=0&turu=0&%s&%s&%s' % (guid, pageq, tokenq)
response = None
for _i in range(5):
try:
response = urllib.urlopen(url)
break
except Exception as err:
print err
time.sleep(5)
if not response:
raise Exception("urllib.urlopen not working for " + url)
soup = BeautifulSoup(response)
female_spans = soup.body.findAll('span', attrs={'id' : 'cinsiyet1'})
male_spans = soup.body.findAll('span', attrs={'id' : 'cinsiyet2'})
handle_span(names, female_spans, False, f)
handle_span(names, male_spans, True, f)
except Exception as e:
print e.__doc__
print e.message
ini = open("names_input.txt", 'w+')
ini.write("%d %d\n" % (searchforindex, page))
ini.close()
f.close()
| 32.293103 | 136 | 0.570208 |
1fdadaa704a4a57bab069bbf9519d57e9bc28d25 | 3,703 | py | Python | tests/test_source.py | j18ter/exchangelib | afb0df65c5533999bca92e25be4c00de5c03043c | [
"BSD-2-Clause"
] | null | null | null | tests/test_source.py | j18ter/exchangelib | afb0df65c5533999bca92e25be4c00de5c03043c | [
"BSD-2-Clause"
] | null | null | null | tests/test_source.py | j18ter/exchangelib | afb0df65c5533999bca92e25be4c00de5c03043c | [
"BSD-2-Clause"
] | null | null | null | from exchangelib.errors import (
ErrorAccessDenied,
ErrorFolderNotFound,
ErrorInvalidOperation,
ErrorItemNotFound,
ErrorNoPublicFolderReplicaAvailable,
)
from exchangelib.properties import EWSElement
from .common import EWSTest
| 34.287037 | 105 | 0.533081 |
1fdb3bda49808628500a9864a821b84e3138f89c | 735 | py | Python | {{cookiecutter.project_slug}}/app/utils/mail.py | Bexils/fastapi-project-template | 1d6937c5adce7603c77e01f8560032082392fdbd | [
"MIT"
] | 4 | 2021-04-04T23:19:06.000Z | 2021-04-10T21:32:23.000Z | {{cookiecutter.project_slug}}/app/utils/mail.py | Bexils/fastapi-project-template | 1d6937c5adce7603c77e01f8560032082392fdbd | [
"MIT"
] | null | null | null | {{cookiecutter.project_slug}}/app/utils/mail.py | Bexils/fastapi-project-template | 1d6937c5adce7603c77e01f8560032082392fdbd | [
"MIT"
] | null | null | null | import os
from datetime import datetime
from pathlib import Path
from pydantic import EmailStr
| 28.269231 | 64 | 0.672109 |
1fded2389baa0f710851c0214c487f38445e67b1 | 3,540 | py | Python | predict_btc_future.py | benjaminshi02003220/Bitcoin_price_prediction | f4894614bafa0a4295d08d0b8f53d314c4262724 | [
"MIT"
] | 6 | 2018-03-11T13:47:22.000Z | 2018-07-03T05:03:48.000Z | predict_btc_future.py | benjaminshi02003220/Bitcoin_price_prediction | f4894614bafa0a4295d08d0b8f53d314c4262724 | [
"MIT"
] | null | null | null | predict_btc_future.py | benjaminshi02003220/Bitcoin_price_prediction | f4894614bafa0a4295d08d0b8f53d314c4262724 | [
"MIT"
] | 4 | 2018-03-27T15:38:40.000Z | 2018-07-07T20:04:29.000Z | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 9 17:06:09 2018
@author: v-beshi
"""
import pyodbc
import pandas as pd
raw_data=pd.read_sql('select * from dbo.BitcoinTradeHistory',con)
raw_data['USDT_exceed']=raw_data['huobi_USDT']-raw_data['exchange_rate']
pre_price15=[]
for i in range(0,15):
pre_price15.append(0)
for i in range(15,len(raw_data)):
pre_price15.append((raw_data['ok0330'][i]-raw_data['ok0330'][i-15])/(raw_data['ok0330'][i-15]))
pre_price15=pd.Series(pre_price15,name='pre_price15')
pre_price10=[]
for i in range(0,10):
pre_price10.append(0)
for i in range(10,len(raw_data)):
pre_price10.append((raw_data['ok0330'][i]-raw_data['ok0330'][i-10])/(raw_data['ok0330'][i-10]))
pre_price10=pd.Series(pre_price10,name='pre_price10')
pre_price5=[]
for i in range(0,5):
pre_price5.append(0)
for i in range(5,len(raw_data)):
pre_price5.append((raw_data['ok0330'][i]-raw_data['ok0330'][i-5])/(raw_data['ok0330'][i-5]))
pre_price5=pd.Series(pre_price5,name='pre_price5')
next_price5=[]
for i in range(0,len(raw_data)-5):
if (raw_data['ok0330'][i+5]-raw_data['ok0330'][i])/(raw_data['ok0330'][i])>0:
next_price5.append(1)
else:
next_price5.append(0)
for i in range(0,5):
next_price5.append(0)
next_price5=pd.Series(next_price5,name='next_price5')
next_price10=[]
for i in range(0,len(raw_data)-10):
if (raw_data['ok0330'][i+10]-raw_data['ok0330'][i])/(raw_data['ok0330'][i])>0:
next_price10.append(1)
else:
next_price10.append(0)
for i in range(0,10):
next_price10.append(0)
next_price10=pd.Series(next_price10,name='next_price10')
next_price15=[]
for i in range(0,len(raw_data)-15):
if (raw_data['ok0330'][i+15]-raw_data['ok0330'][i])/(raw_data['ok0330'][i])>0:
next_price15.append(1)
else:
next_price15.append(0)
for i in range(0,15):
next_price15.append(0)
next_price15=pd.Series(next_price15,name='next_price15')
pre_bfx=[0]
for i in range(1,len(raw_data)):
pre_bfx.append((raw_data['bfx_last_price'][i]-raw_data['bfx_last_price'][i-1])/(raw_data['bfx_last_price'][i-1]))
pre_bfx=pd.Series(pre_bfx,name='pre_bfx')
pre_news10=[]
for i in range(0,10):
pre_news10.append(0)
for i in range(10,len(raw_data)):
pre_news10.append((raw_data['news_emotion'][i]-raw_data['news_emotion'][i-10])/(raw_data['news_emotion'][i-10]))
pre_news10=pd.Series(pre_news10,name='pre_news10')
raw_data['bids_wall']=raw_data['bfx_bids_wall']/100
raw_data['asks_wall']=raw_data['bfx_asks_wall']/100
raw_data['total_bids']=raw_data['bfx_total_bids']/100
raw_data['total_asks']=raw_data['bfx_total_asks']/100
raw_data['buy_volumn']=raw_data['bfx_buy_volumn']/50
raw_data['sell_volumn']=raw_data['bfx_sell_volumn']/50
raw_data=raw_data.drop(['ok0330','DateTime','ok_thisweek','huobi_USDT','exchange_rate','bfx_last_price','news_emotion','bfx_bids_wall','bfx_asks_wall','bfx_total_bids','bfx_total_asks','bfx_buy_volumn','bfx_sell_volumn'],axis=1)
agg_data=pd.concat([raw_data,pre_price15,pre_price10,pre_price5,pre_bfx,pre_news10,next_price5,next_price10,next_price15],axis=1)
agg_data=agg_data[15:len(agg_data)-15]
return(agg_data) | 38.478261 | 232 | 0.664124 |
1fe22fd049d8e5e23653953f62233abe237a47e8 | 16,692 | py | Python | bloodbank_rl/pyomo_models/stochastic_model_runner.py | joefarrington/bloodbank_rl | f285581145034b498f01c9b44f95437ceddb042a | [
"MIT"
] | null | null | null | bloodbank_rl/pyomo_models/stochastic_model_runner.py | joefarrington/bloodbank_rl | f285581145034b498f01c9b44f95437ceddb042a | [
"MIT"
] | null | null | null | bloodbank_rl/pyomo_models/stochastic_model_runner.py | joefarrington/bloodbank_rl | f285581145034b498f01c9b44f95437ceddb042a | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import pyomo.environ as pyo
import mpisppy.utils.sputils as sputils
from mpisppy.opt.ef import ExtensiveForm
from pathlib import Path
import os
import sys
path_root = Path(os.path.abspath(__file__)).parents[2]
sys.path.append(str(path_root))
from bloodbank_rl.environments.platelet_bankSR import PoissonDemandProviderSR
import bloodbank_rl.pyomo_models.model_constructors as pyomo_mc
| 37.679458 | 107 | 0.557453 |
1fe41f5dc40be297773f566df8109a75b70ca3b8 | 3,623 | py | Python | ch1/tictactoe.py | T0nyX1ang/Reinforcement-Learning | a86ab92ee628b95c7dbe432c079b7ce04b5e982a | [
"MIT"
] | null | null | null | ch1/tictactoe.py | T0nyX1ang/Reinforcement-Learning | a86ab92ee628b95c7dbe432c079b7ce04b5e982a | [
"MIT"
] | null | null | null | ch1/tictactoe.py | T0nyX1ang/Reinforcement-Learning | a86ab92ee628b95c7dbe432c079b7ce04b5e982a | [
"MIT"
] | null | null | null | import random
import json
if __name__ == '__main__':
tttg = TTTGame()
tttg.combat()
tttg.train(100000)
tttg.dump_state()
| 27.037313 | 134 | 0.619928 |
1fe4750a23a26455a9111641d38426011cdda650 | 141 | py | Python | Chapter 03/ch3_1_38.py | bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE | f6a4194684515495d00aa38347a725dd08f39a0c | [
"MIT"
] | null | null | null | Chapter 03/ch3_1_38.py | bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE | f6a4194684515495d00aa38347a725dd08f39a0c | [
"MIT"
] | null | null | null | Chapter 03/ch3_1_38.py | bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE | f6a4194684515495d00aa38347a725dd08f39a0c | [
"MIT"
] | null | null | null | str1 = ' Happy Life '
str2= ' Happy Life '
if (str1.strip()== str2.strip()):
print("Same")
else:
print("Not same")
# same | 17.625 | 34 | 0.531915 |
1fe4a5c508f25892277d20cf17891a3088bcee69 | 2,601 | py | Python | text_analytic_emotion_load_only.py | agussuarjaya/Text_Analytic_-Emotion- | 01cdf6f3661eaad2cb76111ebaee90ec50b592f0 | [
"MIT"
] | null | null | null | text_analytic_emotion_load_only.py | agussuarjaya/Text_Analytic_-Emotion- | 01cdf6f3661eaad2cb76111ebaee90ec50b592f0 | [
"MIT"
] | 1 | 2020-03-28T16:06:04.000Z | 2020-03-29T02:03:44.000Z | text_analytic_emotion_load_only.py | agussuarjaya/Text_Analytic_-Emotion- | 01cdf6f3661eaad2cb76111ebaee90ec50b592f0 | [
"MIT"
] | 2 | 2020-03-28T15:02:48.000Z | 2020-03-29T12:27:50.000Z | # -*- coding: utf-8 -*-
"""Text Analytic (Emotion) - load_only.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1ec4JMQZ5zoj-PB_a0mUkJWRKotgQSd9f
"""
"""
Text Analytic (Emotion) with TensorFlow
Copyright 2020 I Made Agus Dwi Suarjaya
Gede Ocha Dipa Ananda
Ni Luh Putu Diah Putri Maheswari
Description : Try to analyze Tweets with TensorFlow and classify into 5 emotions (anger, happiness, sadness, love, fear)
Dataset source : https://raw.githubusercontent.com/meisaputri21/Indonesian-Twitter-Emotion-Dataset/master/Twitter_Emotion_Dataset.csv
"""
#Setup
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
import csv
import time
import ast
import numpy as np
import pandas as pd
#--------------------------------------------------------------------------------------------------------------------------
model_path = './1585378332_model'
encoder_path = './1585378332_encoder'
dict_path = './1585378332_dict'
#--------------------------------------------------------------------------------------------------------------------------
#Load the model (Optional for Transfer Learning)
reloaded_model = tf.keras.models.load_model(model_path)
model = reloaded_model
#Load the encoder (Optional for Transfer Learning)
encoder = tfds.features.text.TokenTextEncoder.load_from_file(encoder_path)
#Load the dictionary (Optional for Transfer Learning)
with open(dict_path) as dict_file:
d = ast.literal_eval(dict_file.readline())
#Classify some tweets with model predict
tweet = []
tweet.append('Tahukah kamu, bahwa saat itu papa memejamkan matanya dan menahan gejolak dan batinnya. Bahwa papa sangat ingin mengikuti keinginanmu tapu lagi-lagi dia HARUS menjagamu?')
tweet.append('[Idm] My, masa gua tadi ketemu tmn SD yg pas SD ngejar gua dan ngasih surat tiap minggunya, asdfghjkl bgt, gk tau knp ngerasa takut gua :v hadeuh jaman SD ngerti apa coba :v')
tweet.append('Sedih bny penulisan resep yg tidak baku sdm, sdt, ruas, sejumput, secukupnya, even biji/buah termasuk tidak baku :(')
tweet.append('Paling nyampah org suka compare kan aku dgn org lain, dia dia ah aku aku ah. Tak suka boleh blah lah -__-')
tweet.append('Agak telat ramai nya ya dok...sudah paham sejak lama banget jadi geli aja baru pada ramai sekarang hehehe...')
for text in range(len(tweet)):
predictions = model.predict(encoder.encode(tweet[text]))
predictions[0]
print(d[np.argmax(predictions[0])], ' <- ', tweet[text])
| 38.820896 | 189 | 0.685506 |
1fe6e5bdf88233acf9a9c841722eff52d327f1f2 | 13,160 | py | Python | Server.py | HackintoshwithUbuntu/Python-Chat-App | d5af370e33a092c52702efed6b1074d458c593ac | [
"MIT"
] | 2 | 2021-08-30T03:19:10.000Z | 2021-09-06T21:51:02.000Z | Server.py | HackintoshwithUbuntu/Python-Chat-App | d5af370e33a092c52702efed6b1074d458c593ac | [
"MIT"
] | null | null | null | Server.py | HackintoshwithUbuntu/Python-Chat-App | d5af370e33a092c52702efed6b1074d458c593ac | [
"MIT"
] | null | null | null | # Imports
import socket # Communication
import threading # Communication with multiple users at once
import pickle # Serialising data
import hashlib # Hashing passwords
from Crypto.Cipher import AES # AES encryption algorithms
from Crypto.Random import get_random_bytes # For generating random keys and nonces
# A list of codes used in this program to prefix messages, so client knows their meaning
'''
______________________________________
| CODE | MEANING |
|____________________________________|
? | Signup |
! | Signin |
$ | Control |
@ | Direct Message |
^ | Everyone Message |
* | Request list |
+ | New user online |
- | User logged off |
= | Request pics dict |
p | New profile pic |
_____________________________________|
'''
# A dictionary storing usernames and passwords
logins = {}
# dictionary to store corresponding socket to username
record = {}
# dictionary to username to socket
records = {}
# dictionary to store username to server key
keys = {}
# Dictionary storing profile pictures
pics = {}
# List to keep track of socket descriptors
connected_list = []
# A dictionary for working with logins (note: this is just so we can use the data in the file)
loginss = {}
# Starting the server socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Note: code skips to end as these function are not used until later
# A custom made function for sending double-layer encyrpted data to clients
# A custom function to recieve client data, then decrypt, then verify
# A custom function for sending data to all clients, except sender
# A custom function for sending a message to all users
# A custom function for telling all clients about a new logon
# A custom function to check if a file exists without throwing errors
# A utility function to allow quick updating of saved passwords and profile pictures
# The main function for communicating with clients on a new thread
# This handles most work and messaging duties
# NOTE: this is run on one thread per client
# Code skips to here
# Check if both files exist and populate memory with their contents it they do
# If they don't, set memory contents to empty and create files
# Also log it at the end, so the server runner knows what just happened
if file_exists("loginss") == False:
file = open("loginss.txt", "w+")
file.close()
with open('loginss.txt', 'rb') as file:
try:
loginss = pickle.load(file)
except:
print("DEBUG: Failed reading file (the login file is probably empty, no need to worry)")
if file_exists("pic") == False:
file = open("pic.txt", "w+")
file.close()
with open('pic.txt', 'rb') as file:
try:
pics = pickle.load(file)
except:
print("DEBUG: Failed reading file (the pic file is probably empty, no need to worry)")
# Telling the host that it doesn't need to filter ips
host = ''
# Setting the port
port = 443
# Bind to the port
s.bind((host, port))
# Allow up to ten messages stcked up
s.listen(10)
# Now wait for client connection.
print("DEBUG: Started on:", (host, port))
print("DEBUG: Ready for clients")
while True:
# Blocking call, waits to accept a connection
conn, addr = s.accept()
# Log it
print("NETWORK: Connected to " + addr[0] + ":" + str(addr[1]))
# Start a new thread to new client
threading.Thread(target=on_new_client, args=(conn,addr)).start()
print("\nDEBUG: Started new thread")
# Main thread continues listening loop to assingn new threads to new clients
# In the rare case we are here, close down the server socket gracefully and then quit
s.close() | 38.820059 | 115 | 0.572188 |
1fe7b45de50e9ea21f771782230c1d73959dc62a | 215 | py | Python | devmine/config/environments/production.py | sniperkit/snk.fork.devmine-core | 6ab43abd0c1041831ecb86dcba55ffd9e05ce615 | [
"BSD-3-Clause"
] | null | null | null | devmine/config/environments/production.py | sniperkit/snk.fork.devmine-core | 6ab43abd0c1041831ecb86dcba55ffd9e05ce615 | [
"BSD-3-Clause"
] | null | null | null | devmine/config/environments/production.py | sniperkit/snk.fork.devmine-core | 6ab43abd0c1041831ecb86dcba55ffd9e05ce615 | [
"BSD-3-Clause"
] | null | null | null | # server backend
server = 'cherrypy'
# debug error messages
debug = False
# auto-reload
reloader = False
# database url
db_url = 'sqlite:///devmine/db/devmine.db'
# echo database engine messages
db_echo = False
| 14.333333 | 42 | 0.730233 |
1fe923af70915246d98e2a502a9e9ce347a11d16 | 1,279 | py | Python | gen_screens.py | shurkova/currentVers | 25027f3f4faa9033b69041459f0785c1436c3f31 | [
"CECILL-B"
] | 1 | 2020-09-09T15:30:38.000Z | 2020-09-09T15:30:38.000Z | gen_screens.py | shurkova/currentVers | 25027f3f4faa9033b69041459f0785c1436c3f31 | [
"CECILL-B"
] | null | null | null | gen_screens.py | shurkova/currentVers | 25027f3f4faa9033b69041459f0785c1436c3f31 | [
"CECILL-B"
] | 11 | 2020-05-01T09:03:14.000Z | 2022-02-09T14:17:41.000Z | # generate 500 screens.
import random
objs = []
for i in range(500):
go_to = random.choice([2,3])
for j in range(go_to):
new_obj = {'name': 'non_exist', 'RBs': [], 'set': 'memory', 'analog': i}
width = round(random.random()*20)
hight = round(random.random()*10)
x = round(random.random()*300)
y = round(random.random()*800)
colour = random.choice([255, 155, 55, 100])
new_obj['RBs'].append({'pred_name': 'non_exist', 'pred_sem': [], 'higher_order': False, 'object_name': 'obj'+str(random.random()), 'object_sem': [['x_ext', 1, 'x_ext', 'nil', 'state'], ['x_ext'+str(width), 1, 'x_ext', width, 'value'], ['y_ext', 1, 'y_ext', 'nil', 'state'], ['y_ext'+str(hight), 1, 'y_ext', hight, 'value'], ['total_ext', 1, 'total_ext', 'nil', 'state'], ['total_ext'+str(width*hight), 1, 'total_ext', width*hight, 'value'], ['x', 1, 'x', 'nil', 'state'], ['x'+str(x), 1, 'x', width*hight, 'value'], ['y', 1, 'y', 'nil', 'state'], ['y'+str(x), 1, 'y', width*hight, 'value'], ['colour', 1, 'colour', 'nil', 'state'], [str(colour), 1, 'colour', colour, 'value']], 'P': 'non_exist'})
objs.append(new_obj)
write_file = open('screens.py', 'w')
write_file.write('simType=\'sim_file\' \nsymProps = ' + str(objs))
| 51.16 | 704 | 0.56294 |
1fec0bf47c009cdb0ca6fac21df153c55c6c1431 | 46,269 | py | Python | bot/utils/trackmania.py | NottCurious/TMIndiaBot | 824c171fa2f41aa21631796c384f70a34a721364 | [
"MIT"
] | 1 | 2022-02-12T16:40:17.000Z | 2022-02-12T16:40:17.000Z | bot/utils/trackmania.py | NottCurious/TMIndiaBot | 824c171fa2f41aa21631796c384f70a34a721364 | [
"MIT"
] | 78 | 2021-10-14T05:32:54.000Z | 2022-01-21T09:22:37.000Z | bot/utils/trackmania.py | NottCurious/TMIndiaBot | 824c171fa2f41aa21631796c384f70a34a721364 | [
"MIT"
] | null | null | null | import asyncio
import json
import os
import shutil
import typing
from datetime import datetime, timezone, timedelta
from matplotlib import pyplot as plt
import cv2
import country_converter as coco
import flag
import requests
import discord
from bot.api import APIClient
from bot.log import get_logger
from bot.utils.commons import Commons
from bot.utils.database import Database
from bot.utils.discord import EZEmbed
log = get_logger(__name__)
def _get_royal_data(self, raw_player_data: dict) -> str:
"""Gets the royal data of the player as a string"""
log.debug("Getting Player Data")
try:
royal_data = raw_player_data["matchmaking"][1]
rank = royal_data["info"]["rank"]
wins = royal_data["info"]["progression"]
current_div = royal_data["info"]["division"]["position"]
if wins != 0:
progression_to_next_div = (
round(
(wins - royal_data["info"]["division"]["minwins"])
/ (
royal_data["info"]["division"]["maxwins"]
- royal_data["info"]["division"]["minwins"]
+ 1
),
4,
)
* 100
)
else:
log.debug("Player Has Not Won a Single Royal Match")
progression_to_next_div = "0"
log.debug(
f"Creating Royal Data String with {rank}, {wins}, {current_div} and {progression_to_next_div}"
)
royal_data_string = f"```Rank: {rank}\nWins: {wins}\nCurrent Division: {current_div}\nProgression to Next Division: {progression_to_next_div}%```"
log.debug(f"Created Royal Data String -> {royal_data_string}")
return royal_data_string
except:
return (
"An Error Occured While Getting Royal Data, Player has not played Royal"
)
def _get_matchmaking_data(self, raw_player_data: dict) -> str:
"""Gets the matchmaking data of the player as a string"""
log.debug("Getting Matchmaking Data")
try:
matchmaking_data = raw_player_data["matchmaking"][0]
rank = matchmaking_data["info"]["rank"]
score = matchmaking_data["info"]["score"]
current_div = int(matchmaking_data["info"]["division"]["position"])
log.debug("Opening the MM Ranks File")
with open(
"./bot/resources/json/mm_ranks.json", "r", encoding="UTF-8"
) as file:
mm_ranks = json.load(file)
current_div = mm_ranks["rank_data"][str(current_div - 1)]
log.debug("Calculating Progression to Next Division")
progression_to_next_div = (
round(
(score - matchmaking_data["info"]["division"]["minpoints"])
/ (
matchmaking_data["info"]["division"]["maxpoints"]
- matchmaking_data["info"]["division"]["minpoints"]
+ 1
),
4,
)
* 100
)
log.debug(
f"Creating Matchmaking Data String with {rank}, {score}, {current_div}, {progression_to_next_div}"
)
matchmaking_data_string = f"```Rank: {rank}\nScore: {score}\nCurrent Division: {current_div}\nProgression to Next Division: {progression_to_next_div}%```"
log.debug(f"Created Matchmaking Data String -> {matchmaking_data_string}")
return matchmaking_data_string
except:
log.error("Player has never Played Matchmaking")
return "An error Occured While Getting Matchmaking Data, Player has not played Matchmaking"
def _get_trophy_count(self, raw_player_data: dict) -> str:
"""The trophy counts as a string"""
log.debug("Getting Trophy Counts")
trophy_count_string = "```\n"
log.debug("Adding Total Points")
total_points = Commons.add_commas(raw_player_data["trophies"]["points"])
trophy_count_string += f"Total Points: {total_points}\n\n"
log.debug(f"Added Total Points -> {total_points}")
for i, trophy_count in enumerate(raw_player_data["trophies"]["counts"]):
trophy_count_string = (
trophy_count_string + f"Trophy {i + 1}: {trophy_count}\n"
)
trophy_count_string += "```"
log.debug(f"Final Trophy Count -> {trophy_count_string}")
return trophy_count_string
def _get_zones_and_positions(self, raw_player_data) -> str:
"""
Converts raw_player_data into location and their ranks
"""
ranks_string = ""
log.debug("Getting Zones")
zone_one = raw_player_data["trophies"]["zone"]["name"]
zone_two = raw_player_data["trophies"]["zone"]["parent"]["name"]
zone_three = raw_player_data["trophies"]["zone"]["parent"]["parent"]["name"]
try:
zone_four = raw_player_data["trophies"]["zone"]["parent"]["parent"][
"parent"
]["name"]
except:
zone_four = ""
log.debug(f"Got Zones -> {zone_one}, {zone_two}, {zone_three}, {zone_four}")
log.debug("Getting Position Data")
raw_zone_positions = raw_player_data["trophies"]["zonepositions"]
zone_one_position = raw_zone_positions[0]
zone_two_position = raw_zone_positions[1]
zone_three_position = raw_zone_positions[2]
if zone_four != "":
zone_four_position = raw_zone_positions[3]
else:
zone_four_position = -1
log.debug("Got Position Data")
log.debug("Making string for position data")
ranks_string = "```\n"
ranks_string += f"{zone_one} - {zone_one_position}\n"
ranks_string += f"{zone_two} - {zone_two_position}\n"
ranks_string += f"{zone_three} - {zone_three_position}\n"
if zone_four != "":
ranks_string += f"{zone_four} - {zone_four_position}\n"
ranks_string += "```"
log.debug(f"Final Ranks String is {ranks_string}")
log.debug("Creating Zones String")
zones_string = f"```\n{zone_one}, {zone_two}, {zone_three}"
if zone_four != "":
zones_string += f", {zone_four}"
zones_string += "\n```"
return zones_string, ranks_string
def _add_meta_details(
self,
player_page: discord.Embed,
raw_player_data: dict,
) -> discord.Embed:
"""Adds the Metadata of a player to the first page of the embed
Args:
player_page (discord.Embed): the first page of player details
raw_player_data (dict): player data from the api
Returns:
discord.Embed: First page of the embed after metadata has been added
"""
log.debug("Adding Meta Details for Player")
meta_data = raw_player_data["meta"]
try:
log.debug("Checking if Player has Twitch")
twitch_name = meta_data["twitch"]
player_page.add_field(
name="[<:twitch:895250576751853598>] Twitch",
value=f"[{twitch_name}](https://twitch.tv/{twitch_name})",
inline=True,
)
log.debug("Twitch Added for Player")
except:
log.debug("Player does not have a Twitch Account Linked to TMIO")
try:
log.debug("Checking if Player has Twitter")
twitter_name = meta_data["twitter"]
player_page.add_field(
name="[<:twitter:895250587157946388>] Twitter",
value=f" [{twitter_name}](https://twitter.com/{twitter_name})",
inline=True,
)
log.debug("Twitter Added for Player")
except:
log.debug("Player does not have a Twitter Account Linked to TMIO")
try:
log.debug("Checking if Player has YouTube")
youtube_link = meta_data["youtube"]
player_page.add_field(
name="[<:youtube:895250572599513138>] YouTube",
value=f"[YouTube](https://youtube.com/channel/{youtube_link})",
inline=True,
)
log.debug("YouTube Added for Player")
except:
log.debug("Player does not have a YouTube Account Linked to TMIO")
log.debug("Adding TMIO")
display_name = raw_player_data["displayname"]
player_id = raw_player_data["accountid"]
player_page.add_field(
name="TMIO",
value=f"[{display_name}](https://trackmania.io/#/player/{player_id})",
)
try:
log.debug("Checking if TMGL Player")
if meta_data["tmgl"] is True:
player_page.add_field(
name="TMGL", value="This Player Participates in TMGL", inline=True
)
log.debug("Added TMGL Field")
except:
log.debug("Player does not participate in TMGL")
log.debug("Added TMIO Link")
log.debug(f"Returning {player_page}")
return player_page
class TOTDUtils:
| 35.756569 | 195 | 0.578832 |
1fed6ebbcca1ccb5af62d7ab28474d73bafe114f | 4,535 | py | Python | src/vehicle_core/model/throttle_model.py | decabyte/vehicle_core | 623e1e993445713ab2ba625ac54be150077c2f1e | [
"BSD-3-Clause"
] | 1 | 2016-12-14T11:48:02.000Z | 2016-12-14T11:48:02.000Z | src/vehicle_core/model/throttle_model.py | decabyte/vehicle_core | 623e1e993445713ab2ba625ac54be150077c2f1e | [
"BSD-3-Clause"
] | null | null | null | src/vehicle_core/model/throttle_model.py | decabyte/vehicle_core | 623e1e993445713ab2ba625ac54be150077c2f1e | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2014, Ocean Systems Laboratory, Heriot-Watt University, UK.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Heriot-Watt University nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Original authors:
# Valerio De Carolis, Marian Andrecki, Corina Barbalata, Gordon Frost
from __future__ import division
import numpy as np
import scipy as sci
import scipy.signal
##pythran export predict_throttle(float[], float[], float[], float, float)
def predict_throttle(throttle_request, b, a, offset, limit):
"""This function returns the predicted throttle for each thruster given a throttle request using a low-pass filter
IIR filtering. See (http://en.wikipedia.org/wiki/Infinite_impulse_response) for more details.
The use of scipy is not possible if the pythran optimizer is employed with this module.
:param throttle_request: matrix of throttle request (N x M) (rows are different thrusters and columns are samples)
:param b: low-pass filter b coefficients
:param a: low-pass filter a coefficients
:param offset: samples offset in the throttle request
:param limit: throttle value hard limit
:return: throttle_model is the predicted value of the throttle
"""
# apply latency delay (offset is positive)
throttle_delayed = throttle_request[:, 0:-(offset + 1)]
throttle_model = np.zeros_like(throttle_delayed)
# apply low-pass filter (using scipy)
throttle_model = sci.signal.lfilter(b, a, throttle_delayed)
# # apply low-pass filter (using custom implementation)
# P = len(b)
# Q = len(a)
# N = throttle_delayed.shape[0]
# M = throttle_delayed.shape[1]
# K = np.maximum(P, Q)
#
# for i in xrange(N):
# for j in xrange(K, M):
#
# x = throttle_delayed[i, j-P:j]
# y = throttle_model[i, j-Q:j-1]
#
# throttle_model[i,j] = (np.sum(b[::-1] * x) - np.sum(a[:0:-1] * y)) / a[0]
# calculate the result and apply limits
return np.clip(throttle_model[:,-1], -limit, limit)
##pythran export rate_limiter(float[], float[], float, float)
def rate_limiter(new_throttle, last_throttle, rising_limit, falling_limit):
"""Models the change in thruster's throttle.
http://www.mathworks.co.uk/help/simulink/slref/ratelimiter.html
:param last_throttle: result of a previous iteration
:param new_throttle:
:param rising_limit: rising rate limit between two samples
:param falling_limit: falling rate limit between two samples
:return: next_throttle: the new throttle after applying rate limits
"""
diff_throttle = new_throttle - last_throttle
next_throttle = np.zeros_like(new_throttle)
for i, dth in enumerate(diff_throttle):
if dth > rising_limit:
next_throttle[i] = last_throttle[i] + rising_limit
elif dth < -falling_limit:
next_throttle[i] = last_throttle[i] - falling_limit
else:
next_throttle[i] = new_throttle[i]
return next_throttle
| 40.855856 | 118 | 0.714443 |
1fee9ed72e23e0f9892bd14d8b33f1a360d24471 | 1,605 | py | Python | social_friends_finder/backends/vkontakte_backend.py | haremmaster/django-social-friends-finder | cad63349b19b3c301626c24420ace13c63f45ad7 | [
"BSD-3-Clause"
] | 19 | 2015-01-01T16:23:06.000Z | 2020-01-02T22:42:17.000Z | social_friends_finder/backends/vkontakte_backend.py | haremmaster/django-social-friends-finder | cad63349b19b3c301626c24420ace13c63f45ad7 | [
"BSD-3-Clause"
] | 2 | 2015-01-01T16:34:59.000Z | 2015-03-26T10:30:59.000Z | social_friends_finder/backends/vkontakte_backend.py | laplacesdemon/django-social-friends-finder | cad63349b19b3c301626c24420ace13c63f45ad7 | [
"BSD-3-Clause"
] | 11 | 2015-01-16T18:39:34.000Z | 2021-08-13T00:46:41.000Z | from social_friends_finder.backends import BaseFriendsProvider
from social_friends_finder.utils import setting
if not setting("SOCIAL_FRIENDS_USING_ALLAUTH", False):
from social_auth.backends.contrib.vk import VKOAuth2Backend
USING_ALLAUTH = False
else:
from allauth.socialaccount.models import SocialToken, SocialAccount, SocialApp
USING_ALLAUTH = True
import vkontakte
| 33.4375 | 114 | 0.684112 |
1feefa448dd4d27276c85f5a38d04e04d811d4b4 | 56 | py | Python | tests/cms_bundles/__init__.py | ff0000/scarlet | 6c37befd810916a2d7ffff2cdb2dab57bcb6d12e | [
"MIT"
] | 9 | 2015-10-13T04:35:56.000Z | 2017-03-16T19:00:44.000Z | tests/cms_bundles/__init__.py | ff0000/scarlet | 6c37befd810916a2d7ffff2cdb2dab57bcb6d12e | [
"MIT"
] | 32 | 2015-02-10T21:09:18.000Z | 2017-07-18T20:26:51.000Z | tests/cms_bundles/__init__.py | ff0000/scarlet | 6c37befd810916a2d7ffff2cdb2dab57bcb6d12e | [
"MIT"
] | 3 | 2017-07-13T13:32:21.000Z | 2019-04-08T20:18:58.000Z | default_app_config = 'tests.cms_bundles.apps.AppConfig'
| 28 | 55 | 0.839286 |
1ff9642e37e0136fb4ef1901be1925b6d57a71f4 | 2,543 | py | Python | app/test/commonJSONStrings.py | rmetcalf9/dockJob | a61acf7ca52e37ff513695a5cc201d346fb4a7fa | [
"MIT"
] | 14 | 2018-03-28T20:37:56.000Z | 2020-08-30T13:29:05.000Z | app/test/commonJSONStrings.py | rmetcalf9/dockJob | a61acf7ca52e37ff513695a5cc201d346fb4a7fa | [
"MIT"
] | 79 | 2018-02-07T14:42:00.000Z | 2022-02-11T22:30:03.000Z | app/test/commonJSONStrings.py | rmetcalf9/dockJob | a61acf7ca52e37ff513695a5cc201d346fb4a7fa | [
"MIT"
] | 6 | 2018-05-08T21:49:40.000Z | 2021-07-30T13:47:37.000Z |
data_simpleJobCreateParams = {
"name": "TestJob",
"repetitionInterval": "HOURLY:03",
"command": "ls",
"enabled": True
}
data_simpleManualJobCreateParams = {
"name": "TestJob",
"repetitionInterval": "",
"command": "ls",
"enabled": False
}
data_simpleJobCreateExpRes = {
"guid": 'IGNORE',
"name": data_simpleJobCreateParams['name'],
"command": data_simpleJobCreateParams['command'],
"enabled": data_simpleJobCreateParams['enabled'],
"repetitionInterval": data_simpleJobCreateParams['repetitionInterval'],
"nextScheduledRun": 'IGNORE',
"creationDate": "IGNORE",
"lastUpdateDate": "IGNORE",
"lastRunDate": None,
"lastRunReturnCode": None,
"lastRunExecutionGUID": "",
"mostRecentCompletionStatus": "Unknown",
"pinned": False,
"overrideMinutesBeforeMostRecentCompletionStatusBecomesUnknown": None,
"AfterFailJobGUID": None,
"AfterFailJobNAME": None,
"AfterSuccessJobGUID": None,
"AfterSuccessJobNAME": None,
"AfterUnknownJobGUID": None,
"AfterUnknownJobNAME": None,
"StateChangeSuccessJobGUID": None,
"StateChangeSuccessJobNAME": None,
"StateChangeFailJobGUID": None,
"StateChangeFailJobNAME": None,
"StateChangeUnknownJobGUID": None,
"StateChangeUnknownJobNAME": None,
"objectVersion": 1
}
data_simpleManualJobCreateParamsWithAllOptionalFields = dict(data_simpleJobCreateParams)
data_simpleManualJobCreateParamsWithAllOptionalFields['pinned'] = True
data_simpleManualJobCreateParamsWithAllOptionalFields['overrideMinutesBeforeMostRecentCompletionStatusBecomesUnknown'] = 357
data_simpleManualJobCreateParamsWithAllOptionalFields['StateChangeSuccessJobGUID'] = '' #Can't provide valid non default value as other jobs don't exist
data_simpleManualJobCreateParamsWithAllOptionalFields['StateChangeFailJobGUID'] = '' #
data_simpleManualJobCreateParamsWithAllOptionalFields['StateChangeUnknownJobGUID'] = '' #
data_simpleManualJobCreateParamsWithAllOptionalFieldsExpRes = dict(data_simpleJobCreateExpRes)
data_simpleManualJobCreateParamsWithAllOptionalFieldsExpRes['pinned'] = True
data_simpleManualJobCreateParamsWithAllOptionalFieldsExpRes['overrideMinutesBeforeMostRecentCompletionStatusBecomesUnknown'] = 357
data_simpleJobExecutionCreateExpRes = {
"guid": 'IGNORE',
"stage": 'Pending',
"executionName": 'TestExecutionName',
"resultReturnCode": 0,
"jobGUID": 'OVERRIDE',
"jobName": 'TestJob',
"jobCommand": 'OVERRIDE',
"resultSTDOUT": '',
"manual": True,
"dateCreated": 'IGNORE',
"dateStarted": 'IGNORE',
"dateCompleted": 'IGNORE'
}
| 35.319444 | 152 | 0.773889 |
1ff9b69a4019a1762d86b4de69764598a30ea2b6 | 8,228 | py | Python | dial/metrics.py | neukg/KAT-TSLF | 91bff10312ba5fbbd46978b268a1c97a5d627dcd | [
"MIT"
] | 11 | 2021-11-19T06:17:10.000Z | 2022-03-11T07:12:30.000Z | dial/metrics.py | neukg/KAT-TSLF | 91bff10312ba5fbbd46978b268a1c97a5d627dcd | [
"MIT"
] | 3 | 2021-11-20T14:00:24.000Z | 2022-03-03T19:41:01.000Z | dial/metrics.py | neukg/KAT-TSLF | 91bff10312ba5fbbd46978b268a1c97a5d627dcd | [
"MIT"
] | null | null | null | from nltk.translate.bleu_score import corpus_bleu, sentence_bleu, SmoothingFunction
from nltk import word_tokenize
# import language_evaluation
from typing import List
from collections import defaultdict, Counter
import re
import math
import sys
def _calc_cover_rate(cands, golds, ngram):
"""
calc_cover_rate
"""
cover = 0.0
total = 0.000001
for cand_tokens, gold_tokens in zip(cands, golds):
cur_cover, cur_total = _calc_cover(cand_tokens, gold_tokens, ngram)
cover += cur_cover
total += cur_total
return cover / total
# def calc_corpus_bleu_new(cands, golds):
# golds = [[gold] for gold in golds]
# sf = SmoothingFunction().method7
# bleu1 = corpus_bleu(golds, cands, smoothing_function=sf, weights=[1, 0, 0, 0])
# bleu2 = corpus_bleu(golds, cands, smoothing_function=sf, weights=[0.5, 0.5, 0, 0])
# bleu3 = corpus_bleu(golds, cands, smoothing_function=sf, weights=[0.34, 0.33, 0.33, 0])
# return bleu1, bleu2, bleu3
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
re_art = re.compile(r'\b(a|an|the)\b')
re_punc = re.compile(r'[!"#$%&()*+,-./:;<=>?@\[\]\\^`{|}~_\']')
return white_space_fix(remove_articles(remove_punc(lower(s)))).split(' ')
if __name__ == "__main__":
cand_file = sys.argv[1]
gold_file = sys.argv[2]
file_dialogue_evaluation(cand_file, gold_file)
| 35.465517 | 113 | 0.653986 |
1ffa89e42119c66f0b38cae0145de37c497cd8de | 896 | py | Python | 06_packet_sniffer/packet_sniffer.py | maks-nurgazy/ethical-hacking | 0f9f2b943b5afa9b11251270e4672e0965ec1769 | [
"MIT"
] | null | null | null | 06_packet_sniffer/packet_sniffer.py | maks-nurgazy/ethical-hacking | 0f9f2b943b5afa9b11251270e4672e0965ec1769 | [
"MIT"
] | null | null | null | 06_packet_sniffer/packet_sniffer.py | maks-nurgazy/ethical-hacking | 0f9f2b943b5afa9b11251270e4672e0965ec1769 | [
"MIT"
] | null | null | null | import scapy.all as scapy
from scapy.layers import http
sniff("eth0")
| 27.151515 | 90 | 0.631696 |
1ffb6e885c207ea205ef242e09f2cabe5866ad26 | 3,705 | py | Python | cameraToWorld.py | blguweb/Tap-Tap-computer | 4e2007b5a31e6d5f902b1e3ca58206870331ef07 | [
"MIT"
] | null | null | null | cameraToWorld.py | blguweb/Tap-Tap-computer | 4e2007b5a31e6d5f902b1e3ca58206870331ef07 | [
"MIT"
] | null | null | null | cameraToWorld.py | blguweb/Tap-Tap-computer | 4e2007b5a31e6d5f902b1e3ca58206870331ef07 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from typing import NoReturn
import cv2 as cv
import numpy as np
from numpy import mat
import xml.etree.ElementTree as ET
import math
camera_angle = 315
camera_intrinsic = {
# #
# matlab
"camera_matrix": [871.086328150675740,0.0, 314.319098669115306,
0.0, 868.410697770935144, 254.110678266434348,
0.0, 0.0, 1.0],
#
"camera_distortion": [0.182040359674805,-0.564946010535902,0.001566542339394, 0.003396709692351,0.000000000000000 ],
# # #
"camera_rvec": [-1.57079633, 0.0, 0.0],
#
# "camera_tvec": ['-29.046143504451425', '1126.526303382564', '736.155158603123']
"camera_tvec": [0.0, 0.0, 0.0],
# #
# "rvec_matrix": [[1.0,0.0,0.0],
# [0.0,0.0,-1.0],
# [0.0,1.0,0.0]]
}
if __name__ == '__main__':
mctoworld = CtoWorld() #
# x,y,depth
points = [355,218,1]
depth = 1540
#
camera_points = mctoworld.pixel_c(points,depth)
w_points = mctoworld.c_w(camera_points)
# IMU
mes = "-42.60 6.91 0.67"
x,y,z = mctoworld.imu_get(mes)
mvector = mctoworld.unit_vector_get(x,y,z)
tx,tz = mctoworld.target_not(w_points,mvector)
print("tx: ",tx)
print("tz: ",tz)
if -2000 < tx < -1380 and 840 < tz < 1300:
print("true")
else:
print("false") | 33.080357 | 129 | 0.550877 |
1ffb6f2d2eca765ba18ee0ccc397d70767e06533 | 5,004 | py | Python | compilers/labs/lab2/gui.py | vampy/university | 9496cb63594dcf1cc2cec8650b8eee603f85fdab | [
"MIT"
] | 6 | 2015-06-22T19:43:13.000Z | 2019-07-15T18:08:41.000Z | compilers/labs/lab2/gui.py | vampy/university | 9496cb63594dcf1cc2cec8650b8eee603f85fdab | [
"MIT"
] | null | null | null | compilers/labs/lab2/gui.py | vampy/university | 9496cb63594dcf1cc2cec8650b8eee603f85fdab | [
"MIT"
] | 1 | 2015-09-26T09:01:54.000Z | 2015-09-26T09:01:54.000Z | #!/usr/bin/python
import os
from log import Log
from enum import IntEnum, unique
from grammar import Grammar
from automaton import FiniteAutomaton
| 33.139073 | 108 | 0.552158 |
1ffbe3042328109603927698807569c875283801 | 180 | py | Python | atividades/ex31.py | Fleen66/Python_exercises | fd05fdf1181da833a1a1bc9f4a476afc8f467977 | [
"MIT"
] | null | null | null | atividades/ex31.py | Fleen66/Python_exercises | fd05fdf1181da833a1a1bc9f4a476afc8f467977 | [
"MIT"
] | null | null | null | atividades/ex31.py | Fleen66/Python_exercises | fd05fdf1181da833a1a1bc9f4a476afc8f467977 | [
"MIT"
] | null | null | null | distancia = int(input('Digite a distancia de sua viagem: '))
if distancia <= 200:
preco = distancia * 0.50
print(preco)
else:
preco = distancia * 0.40
print(preco)
| 22.5 | 60 | 0.644444 |
1ffc42584a05c85ceb4b5e649094a2917f366627 | 7,947 | py | Python | src/triangle.py | songrun/VectorSkinning | a19dff78215b51d824adcd39c7dcdf8dc78ec617 | [
"Apache-2.0"
] | 18 | 2015-04-29T20:54:15.000Z | 2021-12-13T17:48:05.000Z | src/triangle.py | songrun/VectorSkinning | a19dff78215b51d824adcd39c7dcdf8dc78ec617 | [
"Apache-2.0"
] | null | null | null | src/triangle.py | songrun/VectorSkinning | a19dff78215b51d824adcd39c7dcdf8dc78ec617 | [
"Apache-2.0"
] | 8 | 2017-04-23T17:52:13.000Z | 2022-03-14T11:01:56.000Z | import sys
import subprocess
import os
from numpy import asarray
#triangle_path = os.path.join( "C:\\Users\\Mai\\Dropbox\\Research\\Deformation\\src\\py\\triangle", "triangle.exe")
triangle_path = os.path.join( os.path.dirname( __file__ ), "triangle", "triangle" )
if not os.path.exists( triangle_path ):
raise ImportError, "Triangle not found: " + triangle_path
def triangles_for_points( points, boundary_edges = None ):
'''
Given a sequence of 2D points 'points' and
optional sequence of 2-tuples of indices into 'points' 'boundary_edges',
returns a triangulation of the points as a sequence
of length-three tuples ( i, j, k ) where i,j,k are
the indices of the triangle's vertices in 'points'.
If 'boundary_edges' is not specified or is an empty sequence,
a convex triangulation will be returned.
Otherwise, 'boundary_edges' indicates the boundaries of the desired mesh.
'''
import os, subprocess
### http://www.cs.cmu.edu/~quake/triangle.switch.html
## -q Quality mesh generation with no angles smaller than 20 degrees. An alternate minimum angle may be specified after the `q'.
## -a Imposes a maximum triangle area constraint. A fixed area constraint (that applies to every triangle) may be specified after the `a', or varying area constraints may be read from a .poly file or .area file.
## -g Outputs the mesh to an Object File Format (.off) file, suitable for viewing with the Geometry Center's Geomview package.
options = [ '-q', '-a100', '-g' ]
# options = [ '-q' ]
if boundary_edges is None: boundary_edges = []
if len( boundary_edges ) == 0:
input_path = write_node_file( points )
print triangle_path, input_path
subprocess.call( [ triangle_path ] + options + [ input_path ] )
else:
input_path = write_poly_file( points, boundary_edges )
## -p Triangulates a Planar Straight Line Graph (.poly file).
subprocess.call( [ triangle_path ] + options + [ '-p', input_path ] )
ele_path = os.path.splitext( input_path )[0] + '.1.ele'
triangles = read_ele_file( ele_path )
node_path = os.path.splitext( input_path )[0] + '.1.node'
points = read_node_file( node_path)
#os.remove( poly_path )
#os.remove( ele_path )
return points, triangles
def __write_node_portion_of_file_to_object( obj, points, boundary_indices = set() ):
'''
Given an object 'obj' that can be passed as a parameter to
print >> 'obj', "Something to print",
a sequence of 2D points 'points', and
an optional set of indices in 'points' that are to be considered 'boundary_indices',
writes the '.node' portion of the file suitable for passing to 'triangle'
( http://www.cs.cmu.edu/~quake/triangle.node.html ).
Does not return a value.
'''
## 'points' must be a non-empty sequence of x,y positions.
points = asarray( points )
assert points.shape == ( len( points ), 2 )
assert points.shape[0] > 0
## The elements in 'boundary_indices' must be a subset of indices into 'points'.
## NOTE: set.issuperset() returns True if the sets are the same.
assert set(range(len(points))).issuperset( boundary_indices )
print >> obj, '## The vertices'
print >> obj, len( points ), 2, 0, len( boundary_indices )
for i, ( x, y ) in enumerate( points ):
print >> obj, i, x, y, ( 1 if i in boundary_indices else 0 )
def write_poly_file( points, boundary_edges ):
'''
Given a sequence of 2D points 'points'
and a potentially empty sequence 'boundary_edges' of
2-tuples of indices into 'points',
writes a '.poly' file suitable for passing to 'triangle'
( http://www.cs.cmu.edu/~quake/triangle.poly.html )
and returns the path to the '.poly' file.
'''
## Each of the two elements of each 2-tuple in 'boundary_edges'
## must be indices into 'points'.
assert all([ i >= 0 and i < len( points ) and j >= 0 and j < len( points ) and i != j for i,j in boundary_edges ])
## They must be unique and undirected.
assert len( boundary_edges ) == len( set([ frozenset( edge ) for edge in boundary_edges ]) )
## Create 'boundary_indices', the set of all indices that appear
## in 'boundary_edges'.
boundary_indices = frozenset( asarray( boundary_edges ).ravel() )
import tempfile
## This only works on Python 2.6+
#poly_file = tempfile.NamedTemporaryFile( suffix = '.poly', delete = False )
#poly_file_name = poly_file.name
poly_file, poly_file_name = tempfile.mkstemp( suffix = '.poly' )
poly_file = os.fdopen( poly_file, 'w' )
print >> poly_file, '## Written by triangle.py'
__write_node_portion_of_file_to_object( poly_file, points, boundary_indices )
print >> poly_file, ''
print >> poly_file, '## The segments'
print >> poly_file, len( boundary_edges ), len( boundary_edges )
for i, ( e0, e1 ) in enumerate( boundary_edges ):
print >> poly_file, i, e0, e1, 1
print >> poly_file, ''
print >> poly_file, '## The holes'
print >> poly_file, 0
poly_file.close()
return poly_file_name
def write_node_file( points ):
'''
Given a sequence of 2D points 'points',
writes a '.node' file suitable for passing to 'triangle'
( http://www.cs.cmu.edu/~quake/triangle.node.html )
and returns the path to the '.node' file.
'''
import tempfile
## This only works on Python 2.6+
#node_file = tempfile.NamedTemporaryFile( suffix = '.node', delete = False )
#node_file_name = node_file.name
node_file, node_file_name = tempfile.mkstemp( suffix = '.node' )
node_file = os.fdopen( node_file, 'w' )
print >> node_file, '## Written by triangle.py'
__write_node_portion_of_file_to_object( node_file, points )
node_file.close()
return node_file_name
def read_ele_file( ele_path ):
'''
Reads a '.ele' file generated by 'triangle'.
Returns the list of triangles as indices into the
corresponding '.node' file.
'''
ele_file = open( ele_path )
## Ignore top line.
ele_file.readline()
triangles = []
for line in ele_file:
sline = line.strip().split()
if len( sline ) == 0: continue
if sline[0][0] == '#': continue
triangles.append( tuple([ int( index ) for index in sline[1:4] ]) )
assert len( triangles[-1] ) == 3
ele_file.close()
return triangles
def read_node_file( node_path ):
'''
Reads a '.node' file generated by 'triangle'.
Returns the list of points as tuples.
'''
node_file = open( node_path )
## Ignore top line.
node_file.readline()
triangles = []
for line in node_file:
sline = line.strip().split()
if len( sline ) == 0: continue
if sline[0][0] == '#': continue
triangles.append( tuple([ float( index ) for index in sline[1:4] ]) )
#assert len( triangles[-1] ) == 3
node_file.close()
return triangles
# def main():
# pts = [ ( -1,-1 ), ( 1, -1 ), ( 1, 1 ), ( -1, 1 ), ( 0, 0 ) ]
# edges = [ ( 0, 1 ), ( 1, 2 ), ( 2, 3 ), ( 3, 0 ) ]
#
# ## This isn't very good, because 4 random points may be self-intersecting
# ## when viewed as a polyline loop.
# #import random
# #pts = [ ( random.uniform( -1, 1 ), random.uniform( -1, 1 ) ) for i in xrange(4) ]
#
# print 'pts:', pts
#
# points, triangles = triangles_for_points( pts )
# print 'points (no boundary edges):', points
# print 'triangles (no boundary edges):', triangles
#
# print 'width edges:', edges
# points, triangles = triangles_for_points( pts, edges )
# print 'points (with edges):', points
# print 'triangles (with edges):', triangles
#
# if __name__ == '__main__': main()
| 36.287671 | 215 | 0.630804 |
1ffe75b4736bb2daa16ad12967f532235a2b0677 | 4,559 | py | Python | edbdeploy/spec/baremetal.py | vincentp7212/postgres-deployment | ea0ed0e06a4eb99cc28600398eddcf2320778113 | [
"BSD-3-Clause"
] | 58 | 2020-02-24T21:02:50.000Z | 2022-03-28T14:51:56.000Z | edbdeploy/spec/baremetal.py | vincentp7212/postgres-deployment | ea0ed0e06a4eb99cc28600398eddcf2320778113 | [
"BSD-3-Clause"
] | 108 | 2020-09-18T12:53:44.000Z | 2022-02-02T09:02:31.000Z | edbdeploy/spec/baremetal.py | vincentp7212/postgres-deployment | ea0ed0e06a4eb99cc28600398eddcf2320778113 | [
"BSD-3-Clause"
] | 47 | 2020-03-04T15:51:01.000Z | 2022-02-27T13:48:05.000Z | from . import SpecValidator
BaremetalSpec = {
'EDB-RA-1': {
'ssh_user': SpecValidator(type='string', default=None),
'pg_data': SpecValidator(type='string', default=None),
'pg_wal': SpecValidator(type='string', default=None),
'postgres_server_1': {
'name': SpecValidator(type='string', default='pg1'),
'public_ip': SpecValidator(type='ipv4', default=None),
'private_ip': SpecValidator(type='ipv4', default=None),
},
'pem_server_1': {
'name': SpecValidator(type='string', default='pem1'),
'public_ip': SpecValidator(type='ipv4', default=None),
'private_ip': SpecValidator(type='ipv4', default=None),
},
'backup_server_1': {
'name': SpecValidator(type='string', default='backup1'),
'public_ip': SpecValidator(type='ipv4', default=None),
'private_ip': SpecValidator(type='ipv4', default=None),
}
},
'EDB-RA-2': {
'ssh_user': SpecValidator(type='string', default=None),
'pg_data': SpecValidator(type='string', default=None),
'pg_wal': SpecValidator(type='string', default=None),
'postgres_server_1': {
'name': SpecValidator(type='string', default='pg1'),
'public_ip': SpecValidator(type='ipv4', default=None),
'private_ip': SpecValidator(type='ipv4', default=None),
},
'postgres_server_2': {
'name': SpecValidator(type='string', default='pg2'),
'public_ip': SpecValidator(type='ipv4', default=None),
'private_ip': SpecValidator(type='ipv4', default=None),
},
'postgres_server_3': {
'name': SpecValidator(type='string', default='pg3'),
'public_ip': SpecValidator(type='ipv4', default=None),
'private_ip': SpecValidator(type='ipv4', default=None),
},
'pem_server_1': {
'name': SpecValidator(type='string', default='pem1'),
'public_ip': SpecValidator(type='ipv4', default=None),
'private_ip': SpecValidator(type='ipv4', default=None),
},
'backup_server_1': {
'name': SpecValidator(type='string', default='backup1'),
'public_ip': SpecValidator(type='ipv4', default=None),
'private_ip': SpecValidator(type='ipv4', default=None),
}
},
'EDB-RA-3': {
'ssh_user': SpecValidator(type='string', default=None),
'pg_data': SpecValidator(type='string', default=None),
'pg_wal': SpecValidator(type='string', default=None),
'postgres_server_1': {
'name': SpecValidator(type='string', default='pg1'),
'public_ip': SpecValidator(type='ipv4', default=None),
'private_ip': SpecValidator(type='ipv4', default=None),
},
'postgres_server_2': {
'name': SpecValidator(type='string', default='pg2'),
'public_ip': SpecValidator(type='ipv4', default=None),
'private_ip': SpecValidator(type='ipv4', default=None),
},
'postgres_server_3': {
'name': SpecValidator(type='string', default='pg3'),
'public_ip': SpecValidator(type='ipv4', default=None),
'private_ip': SpecValidator(type='ipv4', default=None),
},
'pooler_server_1': {
'name': SpecValidator(type='string', default='pooler1'),
'public_ip': SpecValidator(type='ipv4', default=None),
'private_ip': SpecValidator(type='ipv4', default=None),
},
'pooler_server_2': {
'name': SpecValidator(type='string', default='pooler2'),
'public_ip': SpecValidator(type='ipv4', default=None),
'private_ip': SpecValidator(type='ipv4', default=None),
},
'pooler_server_3': {
'name': SpecValidator(type='string', default='pooler3'),
'public_ip': SpecValidator(type='ipv4', default=None),
'private_ip': SpecValidator(type='ipv4', default=None),
},
'pem_server_1': {
'name': SpecValidator(type='string', default='pem1'),
'public_ip': SpecValidator(type='ipv4', default=None),
'private_ip': SpecValidator(type='ipv4', default=None),
},
'backup_server_1': {
'name': SpecValidator(type='string', default='backup1'),
'public_ip': SpecValidator(type='ipv4', default=None),
'private_ip': SpecValidator(type='ipv4', default=None),
}
}
}
| 45.59 | 68 | 0.573591 |
1ffec07dcf5a4c57c0d689934f15fff735336375 | 2,382 | py | Python | ml-scripts/ss_calib/scripts/ss_charge_cali.py | YashengFu/exo-200_scripts | d33a1a2eeda5f072409656b96e8730f2de53ee0b | [
"MIT"
] | null | null | null | ml-scripts/ss_calib/scripts/ss_charge_cali.py | YashengFu/exo-200_scripts | d33a1a2eeda5f072409656b96e8730f2de53ee0b | [
"MIT"
] | null | null | null | ml-scripts/ss_calib/scripts/ss_charge_cali.py | YashengFu/exo-200_scripts | d33a1a2eeda5f072409656b96e8730f2de53ee0b | [
"MIT"
] | null | null | null | import numpy as np
import time
import argparse
import pandas as pd
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from scipy import special
from tqdm import tqdm
from scipy.optimize import curve_fit
from utils.build_hist import build_hist
if __name__ == "__main__":
start_time = time.time()
test_object = SS_Charge("/dybfs2/nEXO/fuys/EXO-200/shape_agreement/2019_0vbb/Phase1/fv_162_10_182_173_3d0.6/data/ml_rec_data/",["run_6255_ml.h5"])
test_object.select_ss_data(1)
bin_centers, hist_data = test_object.check_data()
bin_centers, hist_data, bin_centers_mask, c_energy_mask, popt, perr = test_object.fit_data()
print(f"time costs: {(time.time() -start_time)/60} min")
| 38.419355 | 184 | 0.673804 |
1fff4ed247e76eafdf9461ae3d7ab7dc88f2b73c | 97,747 | py | Python | ExoplanetPocketknife.py | ScottHull/Exoplanet-Pocketknife | 15b49ff3612adc3b31a78c27379fb8b2f47c6c8f | [
"CC0-1.0"
] | null | null | null | ExoplanetPocketknife.py | ScottHull/Exoplanet-Pocketknife | 15b49ff3612adc3b31a78c27379fb8b2f47c6c8f | [
"CC0-1.0"
] | null | null | null | ExoplanetPocketknife.py | ScottHull/Exoplanet-Pocketknife | 15b49ff3612adc3b31a78c27379fb8b2f47c6c8f | [
"CC0-1.0"
] | null | null | null | # python /usr/bin/env/python
# /// The Exoplanet Pocketknife
# /// Scott D. Hull, The Ohio State University 2015-2017
# /// All usage must include proper citation and a link to the Github repository
# /// https://github.com/ScottHull/Exoplanet-Pocketknife
import os, csv, time, sys, shutil, subprocess
from threading import Timer
from math import *
import pandas as pd
import matplotlib.pyplot as plt
from scipy import integrate as inte
import numpy as np
import bisect
bsp_run = False
morb_run = False
gravity = 9.8
# plate_thickness = 10.0 # This is in km!
plate_thickness = 10 * 1000 # This is in m!
na_atwt = 22.98976928
mg_atwt = 24.305
al_atwt = 26.9815386
si_atwt = 28.0855
ca_atwt = 40.078
ti_atwt = 47.867
cr_atwt = 51.9961
fe_atwt = 55.845
ni_atwt = 58.6934
na2o_molwt = 61.9785
mgo_molwt = 40.3040
al2o3_molwt = 101.9601
sio2_molwt = 60.0835
cao_molwt = 56.0770
tio2_molwt = 79.8650
cr2o3_molwt = 151.9892
feo_molwt = 71.8440
nio_molwt = 74.6924
fe2o3_molwt = 159.687
num_na2o_cations = 2
num_mgo_cations = 1
num_al2o3_cations = 2
num_sio2_cations = 1
num_cao_cations = 1
num_tio2_cations = 1
num_cr2o3_cations = 2
num_feo_cations = 1
num_nio_cations = 1
num_fe2o3_cations = 2
asplund_na = 1479108.388
asplund_mg = 33884415.61
asplund_al = 2344228.815
asplund_si = 32359365.69
asplund_ca = 2041737.945
asplund_ti = 79432.82347
asplund_cr = 436515.8322
asplund_fe = 28183829.31
asplund_ni = 1698243.652
asplund_sivsfe = asplund_si / asplund_fe
asplund_navsfe = asplund_na / asplund_fe
mcd_earth_fe = 29.6738223341739
mcd_earth_na = 0.40545783900173
mcd_earth_mg = 32.812015232308
mcd_earth_al = 3.05167459380979
mcd_earth_si = 29.6859892035662
mcd_earth_ca = 2.20951970229211
mcd_earth_ni = 1.60579436264263
mcd_earth_ti = 0.0876307681103416
mcd_earth_cr = 0.468095964095391
mc_earth_ni = 1.60579436264263
mcd_sivsfe = mcd_earth_si / mcd_earth_fe
mcd_navsfe = mcd_earth_na / mcd_earth_fe
adjust_si = mcd_sivsfe / asplund_sivsfe
adjust_na = mcd_navsfe / asplund_navsfe
modelearth_mgo = 11.84409812845
gale_mgo = 7.65154964069009
mgo_fix = gale_mgo / modelearth_mgo
depth_trans_zone = [0, 6, 19.7, 28.9, 36.4, 43.88, 51.34, 58.81, 66.36, 73.94, 81.5, 88.97, 96.45, 103.93, 111.41,
118.92, 126.47, 134.01, 141.55, 149.09, 156.64, 164.18, 171.72, 179.27, 186.79, 194.27, 201.75,
209.23, 216.71, 224.09, 231.4, 238.7, 246.01, 253.31, 260.62, 267.9, 275.16, 282.42, 289.68,
296.94, 304.19, 311.41, 318.44, 325.47, 332.5, 339.53, 346.56, 353.59, 360.62, 367.66, 374.69,
381.72, 388.75, 395.78, 402.78, 409.72, 416.67, 423.61, 430.56, 437.5, 444.44, 451.32, 457.89,
464.47, 471.05, 477.63, 484.21, 490.79, 497.37, 503.75, 510, 516.25, 522.5, 528.75, 535, 541.25,
547.5, 553.95, 560.53, 567.11, 573.68]
inputfile_list = []
home_dir = []
# star_names = []
# na_h = []
# mg_h = []
# al_h = []
# si_h = []
# ca_h = []
# ti_h = []
# cr_h = []
# fe_h = []
#
# star_index = []
# na_index = []
# mg_index = []
# al_index = []
# si_index = []
# ca_index = []
# ti_index = []
# cr_index = []
# fe_index = []
#
# na_mol_abundances = []
# mg_mol_abundances = []
# al_mol_abundances = []
# si_mol_abundances = []
# ca_mol_abundances = []
# ti_mol_abundances = []
# cr_mol_abundances = []
# fe_mol_abundances = []
initialization()
| 46.7689 | 207 | 0.541234 |
9500f8ddc8a192d5b326bf23ad973aa2e9a8109b | 4,074 | py | Python | tools/extract_observable.py | pauxy-qmc/pauxy | 1da80284284769b59361c73cfa3c2d914c74a73f | [
"Apache-2.0"
] | 16 | 2020-08-05T17:17:17.000Z | 2022-03-18T04:06:18.000Z | tools/extract_observable.py | pauxy-qmc/pauxy | 1da80284284769b59361c73cfa3c2d914c74a73f | [
"Apache-2.0"
] | 4 | 2020-05-17T21:28:20.000Z | 2021-04-22T18:05:50.000Z | tools/extract_observable.py | pauxy-qmc/pauxy | 1da80284284769b59361c73cfa3c2d914c74a73f | [
"Apache-2.0"
] | 5 | 2020-05-18T01:03:18.000Z | 2021-04-13T15:36:29.000Z | #!/usr/bin/env python
'''Exctact element of green's function'''
import argparse
import sys
import numpy
import os
import pandas as pd
import json
_script_dir = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(_script_dir, 'analysis'))
import matplotlib.pyplot as plt
# from pauxy.analysis.extraction import analysed_itcf
# from pauxy.analysis.extraction import analysed_energies, extract_hdf5_simple
from pauxy.analysis.extraction import (
extract_mixed_estimates,
get_metadata
)
import matplotlib.pyplot as pl
def parse_args(args):
"""Parse command-line arguments.
Parameters
----------
args : list of strings
command-line arguments.
Returns
-------
options : :class:`argparse.ArgumentParser`
Command line arguments.
"""
parser = argparse.ArgumentParser(description = __doc__)
parser.add_argument('-s', '--spin', type=str, dest='spin',
default=None, help='Spin component to extract.'
'Options: up/down')
parser.add_argument('-t', '--type', type=str, dest='type',
default=None, help='Type of green\'s function to extract.'
'Options: lesser/greater')
parser.add_argument('-k', '--kspace', dest='kspace', action='store_true',
default=False, help='Extract kspace green\'s function.')
parser.add_argument('-e', '--elements',
type=lambda s: [int(item) for item in s.split(',')],
dest='elements', default=None,
help='Element to extract.')
parser.add_argument('-o', '--observable', type=str, dest='obs',
default='None', help='Data to extract')
parser.add_argument('-p', '--plot-energy', action='store_true', dest='plot',
default=False, help='Plot energy trace.')
parser.add_argument('-f', nargs='+', dest='filename',
help='Space-separated list of files to analyse.')
options = parser.parse_args(args)
if not options.filename:
parser.print_help()
sys.exit(1)
return options
def main(args):
"""Extract observable from analysed output.
Parameters
----------
args : list of strings
command-line arguments.
Returns
-------
results : :class:`pandas.DataFrame`
Anysed results.
"""
options = parse_args(args)
print_index = False
if options.obs == 'itcf':
results = analysed_itcf(options.filename[0], options.elements,
options.spin, options.type, options.kspace)
elif options.obs == 'energy':
results = analysed_energies(options.filename[0], 'mixed')
elif options.obs == 'back_propagated':
results = analysed_energies(options.filename[0], 'back_propagated')
elif 'correlation' in options.obs:
ctype = options.obs.replace('_correlation', '')
results = correlation_function(options.filename[0],
ctype,
options.elements)
print_index = True
elif options.plot:
data = extract_mixed_estimates(options.filename[0])
md = get_metadata(options.filename[0])
fp = md['propagators']['free_projection']
dt = md['qmc']['dt']
mc = md['qmc']['nsteps']
data = data[abs(data.Weight) > 0.0]
tau = numpy.arange(0,len(data)) * mc * dt
if fp:
pl.plot(tau, numpy.real(data.ENumer/data.EDenom))
pl.xlabel(r"$\tau$ (au)")
pl.ylabel(r"Energy (au)")
pl.show()
else:
pl.plot(tau, data[options.obs].real)
pl.xlabel(r"$\tau$ (au)")
pl.ylabel(r"{} (au)".format(options.obs))
pl.show()
else:
print ('Unknown observable')
if not options.plot:
print (results.to_string())
results.to_csv("%s"%options.obs)
if __name__ == '__main__':
main(sys.argv[1:])
| 33.393443 | 82 | 0.579774 |
950130b7d174e4ab134e14783a96e2c70ef6e914 | 12,854 | py | Python | datasets.py | shivakanthsujit/FMMRNet | 12742398e3b981938a69e44b3f37d285904929b4 | [
"MIT"
] | null | null | null | datasets.py | shivakanthsujit/FMMRNet | 12742398e3b981938a69e44b3f37d285904929b4 | [
"MIT"
] | null | null | null | datasets.py | shivakanthsujit/FMMRNet | 12742398e3b981938a69e44b3f37d285904929b4 | [
"MIT"
] | null | null | null | import glob
import os
import albumentations as A
import kaggle
import numpy as np
import PIL
import pytorch_lightning as pl
import torch
from albumentations.pytorch import ToTensorV2
from torch.utils.data import random_split
from torch.utils.data.dataloader import DataLoader
from utils import show_images
train_transform = get_train_transforms()
valid_transform = get_valid_transforms()
BATCH_SIZE = 4
SEED = 42
NUM_WORKERS = 4
kaggle.api.authenticate()
def get_train_valid_loader(
train_data,
valid_data,
batch_size=4,
valid_size=0.1,
show_sample=False,
num_workers=NUM_WORKERS,
pin_memory=False,
shuffle=True,
seed=SEED,
):
error_msg = "[!] valid_size should be in the range [0, 1]."
assert (valid_size >= 0) and (valid_size <= 1), error_msg
num_train = len(train_data)
indices = list(range(num_train))
split = int(np.floor(valid_size * num_train))
if shuffle:
np.random.seed(seed)
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
train_dataset = torch.utils.data.Subset(train_data, train_idx)
valid_dataset = torch.utils.data.Subset(valid_data, valid_idx)
train_loader = DataLoader(
train_dataset,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=pin_memory,
)
valid_loader = DataLoader(
valid_dataset,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=pin_memory,
)
print("Training Batches: ", len(train_loader))
print("Validation Batches: ", len(valid_loader))
# visualize some images
if show_sample:
x, y, z = next(iter(train_loader))
show_images(torch.cat((x, y, z)))
x, y, z = next(iter(valid_loader))
show_images(torch.cat((x, y, z)))
return train_loader, valid_loader
def get_test_loader(test_data, batch_size=1, shuffle=False, num_workers=NUM_WORKERS, pin_memory=False):
test_loader = DataLoader(
test_data,
batch_size=batch_size,
num_workers=num_workers,
shuffle=shuffle,
pin_memory=pin_memory,
)
print("Testing Batches: ", len(test_loader))
return test_loader
| 33.300518 | 119 | 0.628131 |
9505115c9cbc7843483152234defea7c4da55e5d | 663 | py | Python | 29_Tree/Step03/wowo0709.py | StudyForCoding/BEAKJOON | 84e1c5e463255e919ccf6b6a782978c205420dbf | [
"MIT"
] | null | null | null | 29_Tree/Step03/wowo0709.py | StudyForCoding/BEAKJOON | 84e1c5e463255e919ccf6b6a782978c205420dbf | [
"MIT"
] | 3 | 2020-11-04T05:38:53.000Z | 2021-03-02T02:15:19.000Z | 29_Tree/Step03/wowo0709.py | StudyForCoding/BEAKJOON | 84e1c5e463255e919ccf6b6a782978c205420dbf | [
"MIT"
] | null | null | null | import sys
input = sys.stdin.readline
from collections import deque
# main
V = int(input())
tree = [[] for _ in range(V+1)]
# 1167
for _ in range(V-1):
a,b,c = map(int,input().split())
tree[a].append((c,b))
tree[b].append((c,a))
ds = bfs(1) #
v = ds.index(max(ds)) #
print(max(bfs(v))) # | 24.555556 | 43 | 0.517345 |
9506269afc0618a55f2884b0a52f8b3902a5b1f4 | 997 | py | Python | config.py | anvme/TONTgContractBot | e5fa48d262faec26e2835daa6db764867a369672 | [
"Apache-2.0"
] | null | null | null | config.py | anvme/TONTgContractBot | e5fa48d262faec26e2835daa6db764867a369672 | [
"Apache-2.0"
] | null | null | null | config.py | anvme/TONTgContractBot | e5fa48d262faec26e2835daa6db764867a369672 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ##### TONTgBotContract Config
# Edit starts here
TgBotAPIKey = 'xxxx:yyyy' # API Keythat you get from @BotFather
tg = 11111 # Your id, you can get it by sending command /id to bot @TONTgIDBot
# Edit ends here
tonoscli = '/opt/tonos-cli/target/release/tonos-cli' # Path to tonos-cli
solccompiler = '/opt/ton-solidity-compiler/compiler/build/solc/solc'
tvmlinker = '/opt/ton-tvm-linker/tvm_linker/target/debug/tvm_linker'
compiler = '/opt/tontgbotcontract/data/compiler/' # Path to compiler
tvc = '/opt/tontgbotcontract/data/tvc/' # Path to tvc
sol = '/opt/tontgbotcontract/data/sol/' # Path to sol
keys = '/opt/tontgbotcontract/data/keys/' # Path to keys
tcurl = 'https://net.ton.dev' # tonos-cli net network
gruntabi = "/opt/tontgbotcontract/data/Grunt.abi"
##########
tontgcpath = '/opt/tontgbotcontract' # Folder with this bot.
tontgcpathdb = '/opt/tontgbotcontract/db' # Folder with bot database.
# ##### /TONTgBotContract Config
| 32.16129 | 78 | 0.713139 |
95086bdd5bed5808e0d9ba240d94e656c6d84fab | 1,624 | py | Python | _scripts/pandoc_wiki_filter.py | BenjaminPollak/coursebook | 4646102b5f4c3d283885ba1b221da71a5e509eeb | [
"CC-BY-3.0",
"CC-BY-4.0"
] | null | null | null | _scripts/pandoc_wiki_filter.py | BenjaminPollak/coursebook | 4646102b5f4c3d283885ba1b221da71a5e509eeb | [
"CC-BY-3.0",
"CC-BY-4.0"
] | null | null | null | _scripts/pandoc_wiki_filter.py | BenjaminPollak/coursebook | 4646102b5f4c3d283885ba1b221da71a5e509eeb | [
"CC-BY-3.0",
"CC-BY-4.0"
] | null | null | null | #!/usr/bin/env python3
"""
Pandoc filter to change each relative URL to absolute
"""
from panflute import run_filter, Str, Header, Image, Math, Link, RawInline
import sys
import re
base_raw_url = 'https://raw.githubusercontent.com/illinois-cs241/coursebook/master/'
if __name__ == "__main__":
main()
| 28.491228 | 84 | 0.640394 |
9508ac69c9c25e71d33441ccd8a681ec504ce33e | 8,793 | py | Python | PA_multiagent_game/multiagent_utils.py | salesforce/RIRL | 6f137955bfbe2054be18bb2b15d0e6aedb972b06 | [
"BSD-3-Clause"
] | null | null | null | PA_multiagent_game/multiagent_utils.py | salesforce/RIRL | 6f137955bfbe2054be18bb2b15d0e6aedb972b06 | [
"BSD-3-Clause"
] | null | null | null | PA_multiagent_game/multiagent_utils.py | salesforce/RIRL | 6f137955bfbe2054be18bb2b15d0e6aedb972b06 | [
"BSD-3-Clause"
] | null | null | null | #
# Copyright (c) 2022, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
import sys
import glob
sys.path.insert(0, '..')
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import tqdm
import torch
from torch.distributions import Categorical
from IPython import display
from agents.soft_q import SoftQAgent
from multi_channel_RI import MCCPERDPAgent
######### General #######################################################
##### Training Function ##########################################################
##### Plotting the History ##########################################################
# ###### Function for naming savefiles #########################################
| 35.313253 | 230 | 0.585579 |
950a7c06be019526c5d13e887a482057df6c98cd | 758 | py | Python | UVa Online Judge/v128/12808.py | mjenrungrot/algorithm | e0e8174eb133ba20931c2c7f5c67732e4cb2b703 | [
"MIT"
] | 1 | 2021-12-08T08:58:43.000Z | 2021-12-08T08:58:43.000Z | UVa Online Judge/v128/12808.py | mjenrungrot/algorithm | e0e8174eb133ba20931c2c7f5c67732e4cb2b703 | [
"MIT"
] | null | null | null | UVa Online Judge/v128/12808.py | mjenrungrot/algorithm | e0e8174eb133ba20931c2c7f5c67732e4cb2b703 | [
"MIT"
] | null | null | null | # =============================================================================
# Author: Teerapat Jenrungrot - https://github.com/mjenrungrot/
# FileName: 12808.py
# Description: UVa Online Judge - 12808
# =============================================================================
import math
if __name__ == "__main__":
T = int(input())
for i in range(T):
run()
| 27.071429 | 79 | 0.387863 |
950ac99a04713eeb0672575cefd8c1ec3997841b | 4,377 | py | Python | cnn_implementer/backends/halide.py | lwaeijen/cnn-mapping-tool | a41c2dccb820f6227ddb6d75af9213e187744826 | [
"MIT"
] | null | null | null | cnn_implementer/backends/halide.py | lwaeijen/cnn-mapping-tool | a41c2dccb820f6227ddb6d75af9213e187744826 | [
"MIT"
] | null | null | null | cnn_implementer/backends/halide.py | lwaeijen/cnn-mapping-tool | a41c2dccb820f6227ddb6d75af9213e187744826 | [
"MIT"
] | null | null | null | import os
import jinja2
import networkx as nx
from ..utils import Logger
from math import ceil, floor
from ..model import Segment
#Add function to Segments that generates unique names for internal nodes
#Function is specific for halide backend, hence it is added here and not in the original definition of Segment
Segment.halide_name=halide_name
| 31.042553 | 162 | 0.558145 |
950b9bd680855e1bc01f2dffb96d063d03df4633 | 137 | py | Python | plasmapy/utils/pytest_helpers/__init__.py | seanjunheng2/PlasmaPy | 7b4e4aaf8b03d88b654456bca881329ade09e377 | [
"BSD-2-Clause",
"MIT",
"BSD-2-Clause-Patent",
"BSD-1-Clause",
"BSD-3-Clause"
] | 429 | 2016-10-31T19:40:32.000Z | 2022-03-25T12:27:11.000Z | plasmapy/utils/pytest_helpers/__init__.py | RAJAGOPALAN-GANGADHARAN/PlasmaPy | 6df9583cc47375687a07300c0aa11ba31634d770 | [
"BSD-2-Clause",
"MIT",
"BSD-2-Clause-Patent",
"BSD-1-Clause",
"BSD-3-Clause"
] | 1,400 | 2015-11-24T23:00:44.000Z | 2022-03-30T21:03:25.000Z | plasmapy/utils/pytest_helpers/__init__.py | RAJAGOPALAN-GANGADHARAN/PlasmaPy | 6df9583cc47375687a07300c0aa11ba31634d770 | [
"BSD-2-Clause",
"MIT",
"BSD-2-Clause-Patent",
"BSD-1-Clause",
"BSD-3-Clause"
] | 289 | 2015-11-24T18:54:57.000Z | 2022-03-18T17:26:59.000Z | from plasmapy.utils.pytest_helpers.pytest_helpers import (
assert_can_handle_nparray,
run_test,
run_test_equivalent_calls,
)
| 22.833333 | 58 | 0.79562 |
950c169f450a431d53eeadbbe5cd4c9bc80dac22 | 664 | py | Python | code/Attack/ParameterTypes/Types.py | TomasMadeja/ID2T | 77f51c074d1ff83c7d648ae62ecaed3e5cfde80c | [
"MIT"
] | 33 | 2018-11-21T12:50:52.000Z | 2022-01-12T05:38:12.000Z | code/Attack/ParameterTypes/Types.py | TomasMadeja/ID2T | 77f51c074d1ff83c7d648ae62ecaed3e5cfde80c | [
"MIT"
] | 108 | 2018-11-21T12:33:47.000Z | 2022-02-09T15:56:59.000Z | code/Attack/ParameterTypes/Types.py | TomasMadeja/ID2T | 77f51c074d1ff83c7d648ae62ecaed3e5cfde80c | [
"MIT"
] | 20 | 2018-11-22T13:03:20.000Z | 2022-01-12T00:19:28.000Z | import enum
| 28.869565 | 111 | 0.712349 |
950c84ecd7d7ee95d6bf316b3a497327243be4e4 | 1,984 | py | Python | utils/error_handlrer.py | RobinPaspuel/YtechCode | 219a8492aa5be76c445f3d70f8b2ef74e81c188e | [
"MIT"
] | null | null | null | utils/error_handlrer.py | RobinPaspuel/YtechCode | 219a8492aa5be76c445f3d70f8b2ef74e81c188e | [
"MIT"
] | null | null | null | utils/error_handlrer.py | RobinPaspuel/YtechCode | 219a8492aa5be76c445f3d70f8b2ef74e81c188e | [
"MIT"
] | null | null | null |
from utils.error_with_arrows import *
##### ERRORS ########
################################## | 40.489796 | 106 | 0.655242 |
950dcd67a7917370bcc5ec2201e9aaf688e1aa85 | 2,062 | py | Python | postgres/python-asyncio/main.py | Gelbpunkt/idlebench | fe370f9fa6335cf738a91ca818638aedf0cf1ba3 | [
"Apache-2.0"
] | null | null | null | postgres/python-asyncio/main.py | Gelbpunkt/idlebench | fe370f9fa6335cf738a91ca818638aedf0cf1ba3 | [
"Apache-2.0"
] | null | null | null | postgres/python-asyncio/main.py | Gelbpunkt/idlebench | fe370f9fa6335cf738a91ca818638aedf0cf1ba3 | [
"Apache-2.0"
] | 4 | 2020-08-16T22:23:42.000Z | 2020-08-17T20:15:33.000Z | import asyncio
import asyncpg
VALUES = [
356091260429402122,
"Why are you reading",
9164,
6000000,
14,
0,
0,
0,
463318425901596672,
"https://i.imgur.com/LRV2QCK.png",
15306,
["Paragon", "White Sorcerer"],
0,
0,
647,
"Leader",
None,
0,
"10.0",
"10.0",
30,
2,
1,
0,
0,
"1.0",
None,
0,
"Elf",
2,
2,
0,
0,
0,
{"red": 255, "green": 255, "blue": 255, "alpha": 0.8},
]
VALUES_100 = [VALUES for _ in range(100)]
asyncio.run(main())
| 25.45679 | 88 | 0.511639 |
950e90e9549308bcb8380f5876c0fc12c6f68485 | 1,112 | py | Python | fv-courseware/exercise-01/counter_formal.py | DonaldKellett/nmigen-beginner | 260ae76a5277e36ec9909aaf6b76acab320aed88 | [
"MIT"
] | 1 | 2020-11-09T13:34:02.000Z | 2020-11-09T13:34:02.000Z | fv-courseware/exercise-01/counter_formal.py | DonaldKellett/nmigen-beginner | 260ae76a5277e36ec9909aaf6b76acab320aed88 | [
"MIT"
] | null | null | null | fv-courseware/exercise-01/counter_formal.py | DonaldKellett/nmigen-beginner | 260ae76a5277e36ec9909aaf6b76acab320aed88 | [
"MIT"
] | null | null | null | from nmigen import *
from nmigen.asserts import Assert
from nmigen.cli import main_parser, main_runner
__all__ = ["Counter"]
"""
Simple counter with formal verification
See slides 50-60 in
https://zipcpu.com/tutorial/class-verilog.pdf
"""
if __name__ == "__main__":
parser = main_parser()
args = parser.parse_args()
m = Module()
m.submodules.counter = counter = Counter(True)
main_runner(parser, args, m, ports = counter.ports()) | 25.272727 | 55 | 0.695144 |
951023fa012fa8c9f93693ace80f46cf9b0de998 | 10,524 | py | Python | regru_cloudapi/__init__.py | plvskiy/regru_cloudapi | e137a391f67b116f51b77b8e33755f8a6c3b170d | [
"MIT"
] | 1 | 2021-03-07T14:25:59.000Z | 2021-03-07T14:25:59.000Z | regru_cloudapi/__init__.py | plvskiy/regru_cloudapi | e137a391f67b116f51b77b8e33755f8a6c3b170d | [
"MIT"
] | null | null | null | regru_cloudapi/__init__.py | plvskiy/regru_cloudapi | e137a391f67b116f51b77b8e33755f8a6c3b170d | [
"MIT"
] | null | null | null | import json
import requests
from regru_cloudapi.utils import Errors
| 33.515924 | 114 | 0.601292 |
9510db3851814a40d1e201c8697a846d403a09e9 | 731 | py | Python | mnist/download.py | hiroog/cppapimnist | 30d7e01954fc43da2eea5fe3ebf034b37e79cfd1 | [
"MIT"
] | null | null | null | mnist/download.py | hiroog/cppapimnist | 30d7e01954fc43da2eea5fe3ebf034b37e79cfd1 | [
"MIT"
] | null | null | null | mnist/download.py | hiroog/cppapimnist | 30d7e01954fc43da2eea5fe3ebf034b37e79cfd1 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import urllib.request
import os
import gzip
DOWNLOAD_URL='http://yann.lecun.com/exdb/mnist/'
file_list=[ 'train-images-idx3-ubyte', 'train-labels-idx1-ubyte', 't10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte' ]
for name in file_list:
if not os.path.exists( name ):
gz_name= name + '.gz'
if not os.path.exists( gz_name ):
print( 'download', gz_name )
with urllib.request.urlopen( DOWNLOAD_URL + gz_name ) as fi:
with open( gz_name, 'wb' ) as fo:
fo.write( fi.read() )
print( 'write', name )
with gzip.open( gz_name, 'rb' ) as fi:
with open( name, 'wb' ) as fo:
fo.write( fi.read() )
| 30.458333 | 118 | 0.575923 |
951110f9319a47de447b38bde1aba4ab72ddd1bd | 2,651 | py | Python | arch/arm64/tests/a64_tbnz.py | Samsung/ADBI | 3e424c45386b0a36c57211da819021cb1929775a | [
"Apache-2.0"
] | 312 | 2016-02-04T11:03:17.000Z | 2022-03-18T11:30:10.000Z | arch/arm64/tests/a64_tbnz.py | NickHardwood/ADBI | 3e424c45386b0a36c57211da819021cb1929775a | [
"Apache-2.0"
] | 4 | 2016-02-04T11:05:40.000Z | 2017-07-27T04:22:27.000Z | arch/arm64/tests/a64_tbnz.py | NickHardwood/ADBI | 3e424c45386b0a36c57211da819021cb1929775a | [
"Apache-2.0"
] | 85 | 2016-02-04T12:48:30.000Z | 2021-01-14T06:23:24.000Z | import random
from common import *
| 38.42029 | 94 | 0.488118 |
95128ff73c5b19e12278311e5737397a3c5afe40 | 6,943 | py | Python | infrastructure/cdn-in-a-box/ort/traffic_ops_ort/utils.py | hbeatty/incubator-trafficcontrol | 13ed991531778c60298eb8f532b2a4862f7cb67b | [
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 | 2021-04-11T16:55:27.000Z | 2021-04-11T16:55:27.000Z | infrastructure/cdn-in-a-box/ort/traffic_ops_ort/utils.py | hbeatty/incubator-trafficcontrol | 13ed991531778c60298eb8f532b2a4862f7cb67b | [
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 3 | 2021-03-12T22:35:02.000Z | 2021-12-09T23:00:11.000Z | infrastructure/cdn-in-a-box/ort/traffic_ops_ort/utils.py | hbeatty/incubator-trafficcontrol | 13ed991531778c60298eb8f532b2a4862f7cb67b | [
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains miscellaneous utilities, typically dealing with string
manipulation or user input/output
"""
import logging
from sys import stderr
import requests
import typing
def getYesNoResponse(prmpt:str, default:str = None) -> bool:
"""
Utility function to get an interactive yes/no response to the prompt `prmpt`
:param prmpt: The prompt to display to users
:param default: The default response; should be one of ``'y'``, ``"yes"``, ``'n'`` or ``"no"``
(case insensitive)
:raises AttributeError: if 'prmpt' and/or 'default' is/are not strings
:returns: the parsed response as a boolean
"""
if default:
prmpt = prmpt.rstrip().rstrip(':') + '['+default+"]:"
while True:
choice = input(prmpt).lower()
if choice in {'y', 'yes'}:
return True
if choice in {'n', 'no'}:
return False
if not choice and default is not None:
return default.lower() in {'y', 'yes'}
print("Please enter a yes/no response.", file=stderr)
def getTextResponse(uri:str, cookies:dict = None, verify:bool = True) -> str:
"""
Gets the plaintext response body of an HTTP ``GET`` request
:param uri: The full path to a resource for the request
:param cookies: An optional dictionary of cookie names mapped to values
:param verify: If :const:`True`, the SSL keys used to communicate with the full URI will be
verified
:raises ConnectionError: when an error occurs trying to communicate with the server
:raises ValueError: if the server's response cannot be interpreted as a UTF-8 string - e.g.
when the response body is raw binary data but the response headers claim it's UTF-16
"""
logging.info("Getting plaintext response via 'HTTP GET %s'", uri)
response = requests.get(uri, cookies=cookies, verify=verify)
if response.status_code not in range(200, 300):
logging.warning("Status code (%d) seems to indicate failure!", response.status_code)
logging.debug("Response: %r\n%r", response.headers, response.content)
return response.text
def getJSONResponse(uri:str, cookies:dict = None, verify:bool = True) -> dict:
"""
Retrieves a JSON object from some HTTP API
:param uri: The URI to fetch
:param cookies: A dictionary of cookie names mapped to values
:param verify: If this is :const:`True`, the SSL keys will be verified during handshakes with
'https' URIs
:returns: The decoded JSON object
:raises ConnectionError: when an error occurs trying to communicate with the server
:raises ValueError: when the request completes successfully, but the response body
does not represent a JSON-encoded object.
"""
logging.info("Getting JSON response via 'HTTP GET %s", uri)
try:
response = requests.get(uri, cookies=cookies, verify=verify)
except (ValueError, ConnectionError, requests.exceptions.RequestException) as e:
raise ConnectionError from e
if response.status_code not in range(200, 300):
logging.warning("Status code (%d) seems to indicate failure!", response.status_code)
logging.debug("Response: %r\n%r", response.headers, response.content)
return response.json()
def parse_multipart(raw: str) -> typing.List[typing.Tuple[str, str]]:
"""
Parses a multipart/mixed-type payload and returns each contiguous chunk.
:param raw: The raw payload - without any HTTP status line.
:returns: A list where each element is a tuple where the first element is a chunk of the message. All headers are discarded except 'Path', which is the second element of each tuple if it was found in the chunk.
:raises: ValueError if the raw payload cannot be parsed as a multipart/mixed-type message.
>>> testdata = '''MIME-Version: 1.0\\r
... Content-Type: multipart/mixed; boundary="test"\\r
... \\r
... --test\\r
... Content-Type: text/plain; charset=us-ascii\\r
... Path: /path/to/ats/root/directory/etc/trafficserver/fname\\r
... \\r
... # A fake testing file that wasn't generated at all on some date
... CONFIG proxy.config.way.too.many.period.separated.words INT 1
...
... --test\\r
... Content-Type: text/plain; charset=utf8\\r
... Path: /path/to/ats/root/directory/etc/trafficserver/othername\\r
... \\r
... # The same header again
... CONFIG proxy.config.the.same.insane.chain.of.words.again.but.the.last.one.is.different INT 0
...
... --test--\\r
... '''
>>> output = parse_multipart(testdata)
>>> print(output[0][0])
# A fake testing file that wasn't generated at all on some date
CONFIG proxy.config.way.too.many.period.separated.words INT 1
>>> output[0][1]
'/path/to/ats/root/directory/etc/trafficserver/fname'
>>> print(output[1][0])
# The same header again
CONFIG proxy.config.the.same.insane.chain.of.words.again.but.the.last.one.is.different INT 0
>>> output[1][1]
'/path/to/ats/root/directory/etc/trafficserver/othername'
"""
try:
hdr_index = raw.index("\r\n\r\n")
headers = {line.split(':')[0].casefold(): line.split(':')[1] for line in raw[:hdr_index].splitlines()}
except (IndexError, ValueError) as e:
raise ValueError("Invalid or corrupt multipart header") from e
ctype = headers.get("content-type")
if not ctype:
raise ValueError("Message is missing 'Content-Type' header")
try:
param_index = ctype.index(";")
params = {param.split('=')[0].strip(): param.split('=')[1].strip() for param in ctype[param_index+1:].split(';')}
except (IndexError, ValueError) as e:
raise ValueError("Invalid or corrupt 'Content-Type' header") from e
boundary = params.get("boundary", "").strip('"\'')
if not boundary:
raise ValueError("'Content-Type' header missing 'boundary' parameter")
chunks = raw.split(f"--{boundary}")[1:] # ignore prologue
if chunks[-1].strip() != "--":
logging.warning("Final chunk appears invalid - possible bad message payload")
else:
chunks = chunks[:-1]
ret = []
for i, chunk in enumerate(chunks):
try:
hdr_index = chunk.index("\r\n\r\n")
headers = {line.split(':')[0].casefold(): line.split(':')[1] for line in chunk[:hdr_index].splitlines() if line}
except (IndexError, ValueError) as e:
logging.debug("chunk: %s", chunk)
raise ValueError(f"Chunk #{i} poorly formed") from e
ret.append((chunk[hdr_index+4:].replace("\r","").strip(), headers.get("path").strip()))
return ret
| 38.572222 | 211 | 0.715109 |
9512a6419412924d68f8311278ec236177bb738a | 138 | py | Python | api/models/province.py | krosben/api-ctan | 01d5e29694e6f4e35fbe6797c319b109e5bc1c3f | [
"MIT"
] | null | null | null | api/models/province.py | krosben/api-ctan | 01d5e29694e6f4e35fbe6797c319b109e5bc1c3f | [
"MIT"
] | 6 | 2020-06-05T23:40:32.000Z | 2021-06-10T19:03:25.000Z | api/models/province.py | krosben/api-ctan | 01d5e29694e6f4e35fbe6797c319b109e5bc1c3f | [
"MIT"
] | null | null | null | from django.db import models
| 23 | 76 | 0.76087 |
9513d85dbfeb9ed30b03373fa4dafc60c0d1a5b4 | 7,512 | py | Python | audino/backend/routes/labels.py | UCSD-E4E/Pyrenote | bede2cfae9cb543a855d5cb01133b8d7c4abaa1c | [
"MIT"
] | 11 | 2021-07-09T21:39:05.000Z | 2022-03-06T23:11:44.000Z | audino/backend/routes/labels.py | UCSD-E4E/Pyrenote | bede2cfae9cb543a855d5cb01133b8d7c4abaa1c | [
"MIT"
] | 120 | 2021-07-08T04:15:18.000Z | 2022-02-26T00:21:25.000Z | audino/backend/routes/labels.py | UCSD-E4E/Pyrenote | bede2cfae9cb543a855d5cb01133b8d7c4abaa1c | [
"MIT"
] | 1 | 2021-10-16T04:55:42.000Z | 2021-10-16T04:55:42.000Z | import sqlalchemy as sa
from flask import jsonify, request
from flask_jwt_extended import jwt_required, get_jwt_identity
import csv
from sqlalchemy.sql.expression import false
from backend import app, db
from backend.models import Label, LabelValue, Project
from .helper_functions import (
check_admin,
check_admin_permissions,
general_error,
missing_data
)
from . import api
| 29.574803 | 78 | 0.580804 |
9514c9647a31509619c43b943b315ef73a1f481a | 1,192 | py | Python | tests/test_hw02.py | timm/sinless-swe | b331b9bf4d27fdf357ce8a5ce54f9858103fd64f | [
"MIT"
] | null | null | null | tests/test_hw02.py | timm/sinless-swe | b331b9bf4d27fdf357ce8a5ce54f9858103fd64f | [
"MIT"
] | null | null | null | tests/test_hw02.py | timm/sinless-swe | b331b9bf4d27fdf357ce8a5ce54f9858103fd64f | [
"MIT"
] | 2 | 2021-08-29T19:26:19.000Z | 2021-09-20T17:44:27.000Z | import os
import sys
sys.path.append(os.path.realpath(os.path.dirname(__file__)+"/.."))
from src.hw2 import csv_reader
| 45.846154 | 82 | 0.452181 |
9514f668db331c946ecbf660cfa6375f54adec5b | 2,462 | py | Python | hyperdeck.py | FlantasticDan/hyperdeck-replay | 5d5a62c9342c4e552e6a2d44dbe85cb3dba49f28 | [
"MIT"
] | 1 | 2021-09-06T15:02:34.000Z | 2021-09-06T15:02:34.000Z | hyperdeck.py | FlantasticDan/hyperdeck-replay | 5d5a62c9342c4e552e6a2d44dbe85cb3dba49f28 | [
"MIT"
] | null | null | null | hyperdeck.py | FlantasticDan/hyperdeck-replay | 5d5a62c9342c4e552e6a2d44dbe85cb3dba49f28 | [
"MIT"
] | null | null | null | from telnetlib import Telnet
from threading import Thread
| 34.676056 | 79 | 0.553209 |
95150abc9ac26ff15d14447cfaa884078a1c20b0 | 2,215 | py | Python | tensorwatch/repeated_timer.py | sytelus/longview | 686e43cf187eaf55df18949359fd63d57dc337b2 | [
"MIT"
] | 3,453 | 2019-05-22T15:01:23.000Z | 2022-03-31T07:50:41.000Z | tensorwatch/repeated_timer.py | wgxcow/tensorwatch | 142f83a7cb8c54e47e9bab06cb3a1ef8ae225422 | [
"MIT"
] | 69 | 2019-05-22T17:11:20.000Z | 2022-03-03T09:32:38.000Z | tensorwatch/repeated_timer.py | wgxcow/tensorwatch | 142f83a7cb8c54e47e9bab06cb3a1ef8ae225422 | [
"MIT"
] | 375 | 2019-05-22T17:10:33.000Z | 2022-03-24T07:43:07.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import threading
import time
import weakref | 31.197183 | 99 | 0.602709 |
9515d87797c5883ffb46a5046c9382bbdb71bc8f | 1,037 | py | Python | pushpy_examples/client/tasks/schedule/c_local_schedule.py | briangu/push-examples | 3acf00d9f63523010ee3b70f3117d1be686c3335 | [
"MIT"
] | null | null | null | pushpy_examples/client/tasks/schedule/c_local_schedule.py | briangu/push-examples | 3acf00d9f63523010ee3b70f3117d1be686c3335 | [
"MIT"
] | null | null | null | pushpy_examples/client/tasks/schedule/c_local_schedule.py | briangu/push-examples | 3acf00d9f63523010ee3b70f3117d1be686c3335 | [
"MIT"
] | null | null | null | import time
from pushpy_examples.client.ex_push_manager import ExamplePushManager
m = ExamplePushManager()
m.connect()
repl_code_store = m.repl_code_store()
repl_code_store.set("schedule_task", ScheduleTask, sync=True)
dt = m.local_tasks()
dt.stop("schedule_task")
dt.run("daemon", src="schedule_task", name="schedule_task")
time.sleep(30)
dt.stop("schedule_task")
| 24.690476 | 69 | 0.633558 |
951662a92b08b48e3775881d06dfdde6053f3486 | 453 | py | Python | leetcode/weekly154/balloons.py | jan25/code_sorted | f405fd0898f72eb3d5428f9e10aefb4a009d5089 | [
"Unlicense"
] | 2 | 2018-01-18T11:01:36.000Z | 2021-12-20T18:14:48.000Z | leetcode/weekly154/balloons.py | jan25/code_sorted | f405fd0898f72eb3d5428f9e10aefb4a009d5089 | [
"Unlicense"
] | null | null | null | leetcode/weekly154/balloons.py | jan25/code_sorted | f405fd0898f72eb3d5428f9e10aefb4a009d5089 | [
"Unlicense"
] | null | null | null | '''
https://leetcode.com/contest/weekly-contest-154/problems/maximum-number-of-balloons/
''' | 28.3125 | 84 | 0.479029 |
9516843db83caf5de14579548efc7a35483c1024 | 3,100 | py | Python | app/cache/basic.py | JunyongYao/flask-backend-seed | 9d16f56a9f34ebb1ec32eaab800b7ad6b10d0c9d | [
"MIT"
] | 9 | 2017-10-20T09:26:09.000Z | 2021-01-28T02:54:43.000Z | app/cache/basic.py | JunyongYao/flask-backend-seed | 9d16f56a9f34ebb1ec32eaab800b7ad6b10d0c9d | [
"MIT"
] | 2 | 2018-03-06T06:27:53.000Z | 2018-04-19T01:47:38.000Z | app/cache/basic.py | JunyongYao/flask-backend-seed | 9d16f56a9f34ebb1ec32eaab800b7ad6b10d0c9d | [
"MIT"
] | 2 | 2019-07-18T22:32:28.000Z | 2020-06-15T14:10:29.000Z | # -*- coding: utf-8 -*-
import logging
import pickle
from abc import ABCMeta, abstractmethod
from app import redis
from app.cache import set_dict_if_key_expire, set_data_if_key_expire, set_redis_dict_with_timeout, \
set_redis_data_with_timeout
from task.asyncTask import refresh_cache
class DictCacheABC(CacheABC):
class DataCacheABC(CacheABC):
| 32.978723 | 120 | 0.684516 |
9518a93eb1a74edc2a091b88692ed0896329bfe9 | 38,343 | py | Python | fraudbot.py | DocGrishka/tetstsss | 9e594333306e6ea8c13f0c81aa5ccb05bc7e9e5e | [
"MIT"
] | null | null | null | fraudbot.py | DocGrishka/tetstsss | 9e594333306e6ea8c13f0c81aa5ccb05bc7e9e5e | [
"MIT"
] | null | null | null | fraudbot.py | DocGrishka/tetstsss | 9e594333306e6ea8c13f0c81aa5ccb05bc7e9e5e | [
"MIT"
] | null | null | null | import discord
import sqlite3
import random
import requests
import pymorphy2
from itertools import product
# , - ,
#
class Fraudbot(discord.Client):
def user_status(self, user_id, get_channel=False):
# .
cur = self.con.cursor()
user = cur.execute("Select * from users WHERE user_id=?", (user_id.replace('#', ''),)).fetchone()
if user is None:
return 'None'
if get_channel:
return user[2]
return user[1]
client = Fraudbot()
client.run(open('token.txt', 'r').readline())
| 68.469643 | 122 | 0.463683 |
9518dbb4f02a3d9f4f06a63e879638510aa4fe07 | 31,698 | py | Python | iocage/lib/ioc_json.py | project-fifo/iocage | 1b8669bc2119718dbea8f2707a4eb4c92197c0f0 | [
"BSD-2-Clause"
] | null | null | null | iocage/lib/ioc_json.py | project-fifo/iocage | 1b8669bc2119718dbea8f2707a4eb4c92197c0f0 | [
"BSD-2-Clause"
] | null | null | null | iocage/lib/ioc_json.py | project-fifo/iocage | 1b8669bc2119718dbea8f2707a4eb4c92197c0f0 | [
"BSD-2-Clause"
] | 1 | 2022-03-06T10:09:18.000Z | 2022-03-06T10:09:18.000Z | """Convert, load or write JSON."""
import json
import logging
import os
import re
import sys
from os import geteuid, path
from subprocess import CalledProcessError, PIPE, Popen, STDOUT, check_call
from iocage.lib.ioc_common import checkoutput, get_nested_key, open_atomic
def _get_pool_and_iocroot():
"""For internal setting of pool and iocroot."""
pool = IOCJson().json_get_value("pool")
iocroot = IOCJson(pool).json_get_value("iocroot")
return (pool, iocroot)
| 39.573034 | 81 | 0.430942 |
951a6328f58a32b162e3ef00d555a91633c30955 | 6,913 | py | Python | FP/V46_faraday_effect/plot.py | nsalewski/laboratory | e30d187a3f5227d5e228b0132c3de4d426d85ffb | [
"MIT"
] | 1 | 2021-05-05T23:00:28.000Z | 2021-05-05T23:00:28.000Z | FP/V46_faraday_effect/plot.py | nsalewski/laboratory | e30d187a3f5227d5e228b0132c3de4d426d85ffb | [
"MIT"
] | null | null | null | FP/V46_faraday_effect/plot.py | nsalewski/laboratory | e30d187a3f5227d5e228b0132c3de4d426d85ffb | [
"MIT"
] | null | null | null | #!usr/bin/env python3
#coding:utf8
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
from astropy.io import ascii
from uncertainties import ufloat
import uncertainties.unumpy as unp
from modules.table import textable
import scipy.constants as const
import math as math
from modules.plot import axislabel as axis
#arr1=[0.4,0.75,1.4]
#arr2=[2,3,4]
#textable.latex_tab(data=[arr1,arr2],names=[r"title column 1",r"title column 2"], filename=r"example.tex",caption=r"Beautiful caption",label=r"important_label",dec_points=[2,0])
#daten importieren
b,z=np.genfromtxt("data/b_feld.txt",unpack=True)
f1,d1_hin,d1_hins,d1_rueck,d1_ruecks=np.genfromtxt("data/1_probe.txt",unpack=True)
f2,d2_hin,d2_hins,d2_rueck,d2_ruecks=np.genfromtxt("data/2_probe.txt",unpack=True)
f3,d3_hin,d3_hins,d3_rueck,d3_ruecks=np.genfromtxt("data/3_probe.txt",unpack=True)
f1=f1*10**(-6)
f2=f2*10**(-6)
f3=f3*10**(-6)
l1=1.296*10**(-3)
l2=1.36*10**(-3)
l3=5.11*10**(-3)
#bogensekunden addieren
grad1_hin=winkel(d1_hin,d1_hins)
grad1_rueck=winkel(d1_rueck,d1_ruecks)
grad2_hin=winkel(d2_hin,d2_hins)
grad2_rueck=winkel(d2_rueck,d2_ruecks)
grad3_hin=winkel(d3_hin,d3_hins)
grad3_rueck=winkel(d3_rueck,d3_ruecks)
#umrechnen auf gleichen Bezugspunkt
grad1_hin=manipulate(grad1_hin)
grad1_rueck=manipulate(grad1_rueck)
grad2_hin=manipulate(grad2_hin)
grad2_rueck=manipulate(grad2_rueck)
grad3_hin=manipulate(grad3_hin)
grad3_rueck=manipulate(grad3_rueck)
grad1=(1/(2*l1)*(grad1_rueck-grad1_hin)*2*np.pi/360)
grad2=(1/(2*l2)*(grad2_rueck-grad2_hin)*2*np.pi/360)
grad3=(1/(2*l3)*(grad3_rueck-grad3_hin)*2*np.pi/360)
#Berechnung delta theta
delta1=grad1-grad3
delta2=grad2-grad3
textable.latex_tab(data=[f1*10**6,grad3,grad1,grad2,delta1,delta2],names=[r"$\lambda$/$\si{\micro\meter}$",r"$\theta_{\mathrm{und}}$/$\si{\radian\per\meter}$",r"$\theta_{\mathrm{d1}}$/$\si{\radian\per\meter}$",r"$\theta_{\mathrm{d2}}$/$\si{\radian\per\meter}$",r"$\Delta \theta_{\mathrm{d1}}$/$\si{\radian\per\meter}$",r"$\Delta \theta_{\mathrm{d2}}$/$\si{\radian\per\meter}$"], filename=r"tables/eff_mass.tex",caption=r"Werte der $\Delta \theta$ zwischen undotiertem und dotiertem $\ce{GaAs}$ zur Bestimmung der effektiven Masse der Kristallelektronen",label=r"eff_mass",dec_points=[2,2,2,2,2,2],tableformat=4.2)
#Tabellen theta
textable.latex_tab(data=[f1*10**6,grad1_hin,grad1_rueck,grad1],names=[r"$\lambda$/$\si{\micro\meter}$",r"$\theta_1$/$\si{\degree}$",r"$\theta_2$/$\si{\degree}$",r"$\theta$/$\si{\radian\per\meter}$"], filename=r"tables/probe1.tex",caption=r"Messwerte der Faraday-Rotation fr die dotierte Probe $\ce{GaAs}_{d1}$",label=r"probe1",dec_points=[2,2,2,2],tableformat=4.2)
textable.latex_tab(data=[f2*10**6,grad2_hin,grad2_rueck,grad2],names=[r"$\lambda$/$\si{\micro\meter}$",r"$\theta_1$/$\si{\degree}$",r"$\theta_2$/$\si{\degree}$",r"$\theta$/$\si{\radian\per\meter}$"], filename=r"tables/probe2.tex",caption=r"Messwerte der Faraday-Rotation fr die dotierte Probe $\ce{GaAs}_{d2}$",label=r"probe2",dec_points=[2,2,2,2],tableformat=4.2)
textable.latex_tab(data=[f3*10**6,grad3_hin,grad3_rueck,grad3],names=[r"$\lambda$/$\si{\micro\meter}$",r"$\theta_1$/$\si{\degree}$",r"$\theta_2$/$\si{\degree}$",r"$\theta$/$\si{\radian\per\meter}$"], filename=r"tables/probe3.tex",caption=r"Messwerte der Faraday-Rotation fr die undotierte Probe $\ce{GaAs}_{und}$",label=r"probe3",dec_points=[2,2,2,2],tableformat=4.2)
#Tabelle Magnetfeld
textable.latex_tab(data=[z-3.1,b],names=[r"$z$/$\si{\centi\meter}$",r"$B$/$\si{\milli\tesla}$"], filename=r"tables/magnetfeld.tex",caption=r"Messung des Magnetfelds in Abhngigkeit zum Ort $z$ (Probe ist etwa bei $\SI{3.1}{\centi\meter}$ platziert)",label=r"magnetfeld",dec_points=[2,0],tableformat=3.2)
z_theo=np.linspace(0,6,50)
#Ausgleichsrechnung Magnetfeld
params, covariance = curve_fit(theorie,z-3.1,b)
errors = np.sqrt(np.diag(covariance))
print(params,errors)
print("Erwartungswert",params[1],errors[1])
delta1_calc=np.delete(delta1,[0,3,7])
f1_calc1=np.delete(f1,[0,3,7])
delta2_calc=np.delete(delta2,[6,7])
f1_calc2=np.delete(f1,[6,7])
#lin regress delta
paramsd1, covarianced1 = curve_fit(lin,(f1_calc1**2),delta1_calc*10**(-6))
errorsd1 = np.sqrt(np.diag(covarianced1))
paramsd2, covarianced2 = curve_fit(lin,(f1_calc2)**2,delta2_calc*10**(-6))
errorsd2 = np.sqrt(np.diag(covarianced2))
a1=ufloat(paramsd1[0],errorsd1[0])*10**(6)
a2=ufloat(paramsd2[0],errorsd2[0])*10**(6)
n=3.3
e0=const.e
eps=const.epsilon_0
c=const.c
B=377.5*10**(-3)
print("Delta_1 Steigung", a1)
print("Delta_2 Steigung", a2)
print("Effektive Masse 1",eff_mass(a1,B,2.8*10**18*10**6),eff_mass(a1,B,2.8*10**18*10**6)/const.m_e)
print("Effektive Masse 2",eff_mass(a2,B,1.2*10**18*10**6),eff_mass(a2,B,1.2*10**18*10**6)/const.m_e)
#Plot Magnetfeld
plt.plot((params[1],params[1]),(-20,400), 'r--', label="Erwartungswert \n der Normalverteilung")
plt.plot(z-3.1,b, 'rx', label="Messwerte $B$")
plt.ylabel(r"$B/\si{\milli\tesla}$")
plt.xlabel(r"z/\si{\centi\meter}")
plt.legend(loc='best')
plt.ylim(-20,400)
axis.labels()
plt.tight_layout()
plt.savefig('pictures/B_feld.pdf')
plt.clf()
#Plot theta
plt.plot(f1*10**6,grad1, 'rx', label=r"Messwerte $\theta_{\mathrm{d1}}$")
plt.plot(f2*10**6,grad2, 'gx', label=r"Messwerte $\theta_{\mathrm{d2}}$")
plt.plot(f3*10**6,grad3, 'bx', label=r"Messwerte $\theta_{\mathrm{und}}$")
plt.ylabel(r"$\theta$/$\si{\radian\per\meter}")
plt.xlabel(r"$\lambda$/$\si{\micro\meter}$")
plt.legend(loc='lower right')
plt.tight_layout()
axis.labels()
plt.xlim(1,3.5)
plt.savefig('pictures/winkel_gg_wellenlaenge.pdf')
plt.clf()
f_theo=np.linspace(0,np.max(f1)+0.1*np.max(f1))
#plot delta
plt.plot((f1)**2*10**11,delta1, 'rx', label=r"$\Delta \theta_{\mathrm{d1}}$")
plt.plot((f_theo)**2*10**11,lin((f_theo)**2,*paramsd1*10**6), 'b-', label="Ausgleichsgrade")
plt.ylabel(r"$\Delta \theta_{\mathrm{d1}}$/$\si{\radian\per\meter}$")
plt.xlabel(r"$\lambda^{2}$/$\si{\square\meter}\cdot \num{e-11}$")
plt.legend(loc='best')
axis.labels()
plt.xlim(0,1.1)
plt.tight_layout()
plt.savefig('pictures/delta1.pdf')
plt.clf()
plt.plot((f1)**2*10**11,delta2, 'rx', label=r"$\Delta \theta_{\mathrm{d2}}$")
plt.plot((f_theo)**2*10**11,lin(f_theo**2,*paramsd2*10**6), 'b-', label="Ausgleichsgrade")
plt.ylabel(r"$\Delta \theta_{\mathrm{d2}}$/$\si{\radian\per\meter}$")
plt.xlabel(r"$\lambda^{2}$/$\si{\square\meter}\cdot\num{e-11}$")
axis.labels()
plt.legend(loc='best')
plt.tight_layout()
plt.xlim(0,1.1)
plt.savefig('pictures/delta2.pdf')
plt.clf()
| 43.20625 | 613 | 0.707363 |
951a6b980e66f06393b5c53d18d14db57345b12d | 2,256 | py | Python | hackzurich_py/test_hist_threshold.py | ejoebstl/hackzurich16 | 81a3b302050a4a464e2191c1d0912f8038c26ed9 | [
"MIT"
] | null | null | null | hackzurich_py/test_hist_threshold.py | ejoebstl/hackzurich16 | 81a3b302050a4a464e2191c1d0912f8038c26ed9 | [
"MIT"
] | null | null | null | hackzurich_py/test_hist_threshold.py | ejoebstl/hackzurich16 | 81a3b302050a4a464e2191c1d0912f8038c26ed9 | [
"MIT"
] | null | null | null | import os
import matplotlib.pyplot as plt
import numpy as np
import cv2
filedir = '/Users/gabrielfior/Dropbox/Hackzurich16/pupils_cutout/'
readbgr = filedir+'left_pupil232.bmp'
frame = plt.imread(readbgr)
white=plt.imread('/Users/gabrielfior/Dropbox/Hackzurich16/pupils_bw/right_pupil61.bmp')
black=plt.imread('/Users/gabrielfior/Dropbox/Hackzurich16/pupils_bw/right_pupil203.bmp')
#convert to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
plt.figure(1)
plt.clf()
img = cv2.imread(readbgr)
color = ('b','g','r')
b = img[:,:,0]
g = img[:,:,1]
r = img[:,:,2]
for i,col in enumerate(color):
histr = cv2.calcHist([img],[i],None,[256],[0,256])
plt.plot(histr,color = col)
plt.xlim([0,256])
plt.show()
plt.figure(2)
plt.clf()
plt.subplot(211)
ret,th1 = cv2.threshold(img[:,:,0],40,60,cv2.THRESH_BINARY)
plt.imshow(th1)
plt.subplot(212)
plt.imshow(hsv)
#Compare blue channel (when it is smaller than red channel)
#plt.figure(3)
new_mask = np.zeros_like(b)
for i in range(b.shape[0]):
for j in range(b.shape[1]):
#if b < r, put 1 else 0
if (img[:,:,0])[i][j] < (img[:,:,2])[i][j]:
new_mask[i][j]=1
plt.figure(3)
plt.clf()
plt.imshow(new_mask)
plt.figure(4)
plt.subplot(211)
plt.title('white')
for i,col in enumerate(color):
histr = cv2.calcHist([white],[i],None,[256],[0,256])
plt.plot(histr,color = col)
plt.xlim([0,256])
plt.subplot(212)
plt.title('black')
for i,col in enumerate(color):
histr = cv2.calcHist([black],[i],None,[256],[0,256])
plt.plot(histr,color = col)
plt.xlim([0,256])
plt.show()
#################
#Compute diff
mask_white = np.zeros_like(white[:,:,0])
for i in range(white.shape[0]):
for j in range(white.shape[1]):
#if b < r, put 1 else 0
if (white[:,:,0])[i][j] < (white[:,:,2])[i][j]:
mask_white[i][j]=1
mask_black = np.zeros_like(black[:,:,0])
for i in range(black.shape[0]):
for j in range(black.shape[1]):
#if b < r, put 1 else 0
if (black[:,:,0])[i][j] < (black[:,:,2])[i][j]:
mask_black[i][j]=1
#Plot masks
plt.figure(5)
plt.subplot(211)
plt.title('white')
plt.imshow(mask_white)
plt.subplot(212)
plt.title('black')
plt.imshow(mask_black)
plt.show()
#Flat fill
| 23.747368 | 88 | 0.626773 |
951fd4c03bbcd55fdd4eaa4cf1d74e5f3dba25ea | 496 | py | Python | Lyft-Dental/Django-WebRtc/home/views.py | Abhik1998/Lyft-sample_project | 3f9a79fb86c7abee713ae37245f5e7971be09139 | [
"MIT"
] | 1 | 2021-01-09T08:42:24.000Z | 2021-01-09T08:42:24.000Z | Lyft-Dental/Django-WebRtc/home/views.py | Abhik1998/Lyft-sample_project | 3f9a79fb86c7abee713ae37245f5e7971be09139 | [
"MIT"
] | null | null | null | Lyft-Dental/Django-WebRtc/home/views.py | Abhik1998/Lyft-sample_project | 3f9a79fb86c7abee713ae37245f5e7971be09139 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from chat.models import *
# Create your views here.
| 20.666667 | 57 | 0.745968 |
9520fdc9ead572486f8211683471cb168ee795b7 | 6,113 | py | Python | Spatial_Scripts/2_gtfs_arnold_stops.py | VolpeUSDOT/gtfs-measures | 0530d3c7193f10d591edd446d7e4985d03a7c48a | [
"CC0-1.0"
] | 3 | 2019-08-29T13:31:14.000Z | 2021-06-18T06:10:06.000Z | Spatial_Scripts/2_gtfs_arnold_stops.py | VolpeUSDOT/gtfs-measures | 0530d3c7193f10d591edd446d7e4985d03a7c48a | [
"CC0-1.0"
] | null | null | null | Spatial_Scripts/2_gtfs_arnold_stops.py | VolpeUSDOT/gtfs-measures | 0530d3c7193f10d591edd446d7e4985d03a7c48a | [
"CC0-1.0"
] | null | null | null | #-------------------------------------------------------------------------------
# Name: GTFS_Arnold_Stops
#
# Purpose: Associate stops with the route shapes that have already been snapped to ARNOLD
#
# Author: Alex Oberg and Gary Baker
#
# Created: 10/17/2016
#
# Last updated 6/15/2017
#-------------------------------------------------------------------------------
# CONFIG
#-------------------------------------------------------------------------------
#MBTA MODEL
sqlite_file = r"C:\tasks\2016_09_12_GTFS_ingest\Model\MBTA\GTFS-MBTA.sqlite"
output_dir = r"c:\tasks\2016_09_12_GTFS_ingest\Model\MBTA\Output"
# SETUP
#-------------------------------------------------------------------------------
import datetime
import sqlite3
import arcpy
import os
#out_file = os.path.join(output_dir, 'test.txt')
#wf = open(out_file, 'w')
#wf.write("shape_id, trip_id, stop_lat, stop_lon, milepost\n")
start_time = datetime.datetime.now()
print('\nStart at ' + str(start_time))
print "Started Step 2: Snapping Stops to Routes"
print "GTFS database being processed: " + sqlite_file
output_gdb = "gtfs_arnold_prelim.gdb"
full_path_to_output_gdb = os.path.join(output_dir, output_gdb)
arcpy.env.workspace = full_path_to_output_gdb
arcpy.env.overwriteOutput = True
WGS84 = arcpy.SpatialReference(4326)
ALBERS_PRJ = arcpy.SpatialReference(102039)
traversed_oid_dict = {}
con = sqlite3.connect(sqlite_file)
# Prepare the output file
# -----------------------
out_lrs_file = os.path.join(output_dir, 'rtshp_lr_stops.txt')
with open(out_lrs_file, 'w') as wf:
wf.write("ROUTE_SHAPE,MP,STOP_ID\n")
#Add dummy values so ArcGIS doesn't mis-identify the field types
with open(out_lrs_file, 'a') as wf:
wf.write("randomtext,0.00,randomtext2\nrandomtext,0.00,randomtext3\nrandomtext,0.00,randomtext4\nrandomtext,0.00,randomtext5\n")
# FOR EACH ROUTE SHAPE ID (AKA CONSTRUCTED ROUTE)
# -----------------------------------------
print "Retrieving stops for each route shape ID..."
sql_shape = '''
select distinct shape_id
from trips t
join routes r on t.route_id = r.route_id
where r.route_type = 3 AND shape_id <> ""
'''
cur_shape_id = con.cursor()
for shape_row in cur_shape_id.execute(sql_shape):
#Cast as string otherwise non-numeric characters in shape_ID can cause many issues (e.g. some can come across as scientific notation).
shape_id = str(shape_row[0])
#print 'processing shape id {}'.format(shape_id)
#Testing on individual route shapes
#if not shape_id == '34E0040':
#continue
#if not shape_id == '850026':
#continue
# GET THE THE CONSTRUCTED ROUTE GEOMETRY FOR THE current ROUTE SHAPE ID
# --------------------------------------------------------
arcpy.MakeFeatureLayer_management ("route_results", "route_results_lyr")
route_results_query = 'name = \'{}\''.format(shape_id)
arcpy.SelectLayerByAttribute_management ("route_results_lyr", "NEW_SELECTION", route_results_query)
if int(arcpy.GetCount_management("route_results_lyr").getOutput(0)) != 1:
print 'Can''t process route shape {} because it doesn''t have a single geography'.format(shape_id)
route_geometry = None
with arcpy.da.SearchCursor("route_results_lyr", ["SHAPE@"]) as scursor:
row = scursor.next()
route_geometry = row[0]
# All stops every seen on the current route shape
# ------------------------------------------------
#Note that tick marks have to be added to __SHAPE_ID__ to work with shape IDs that contain text.
sql_stops = '''
select stop_id, stop_lat, stop_lon
from stops
where stop_id in (
select distinct stop_id
from stop_times
where trip_id in (
select trip_id from trips where shape_id = '__SHAPE_ID__'
)
)
'''
sql_stops = sql_stops.replace('__SHAPE_ID__', (shape_id))
#print sql_stops
with open(out_lrs_file, 'a') as wf:
point = arcpy.Point()
cur_stops = con.cursor()
for stop_row in cur_stops.execute(sql_stops):
stop_id, stop_lat, stop_lon = stop_row
#print '\n{}, {}, {}'.format(stop_id, stop_lat, stop_lon)
point.X = stop_lon
point.Y = stop_lat
point_geom = arcpy.PointGeometry(point, WGS84).projectAs(ALBERS_PRJ)
result = route_geometry.queryPointAndDistance(point_geom, False)
#print result
result_geom = result[0] # TODO make layer from this for use in itegrate step below
#Adding code to deal with milepost rounding issue
if result[1] <> 0:
milepost = result[1]-.01
else:
milepost = result[1]
wf.write('{},{:.2f},{}\n'.format(shape_id, milepost, stop_id))
# Linear reference the stops
print "Linear referencing the stops with the route results..."
arcpy.MakeRouteEventLayer_lr ("route_results", "Name" , out_lrs_file, "ROUTE_SHAPE POINT MP", "stop_events")
# Create a layer from them
arcpy.CopyFeatures_management("stop_events", "stops_lrs_temp")
arcpy.MakeFeatureLayer_management ("stops_lrs_temp", "stops_lrs_temp_lyr")
arcpy.SelectLayerByAttribute_management(in_layer_or_view="stops_lrs_temp_lyr", selection_type="NEW_SELECTION", where_clause="ROUTE_SHAPE <> 'randomtext'")
arcpy.CopyFeatures_management("stops_lrs_temp_lyr", "stops_lrs")
arcpy.Delete_management("stops_lrs_temp")
# Combine stops together that are within a certain distance of each other
print "Integrating stops that are near each other..."
arcpy.Integrate_management(in_features="stops_lrs #", cluster_tolerance="3 Meters")
# Split network by those integrated points (TODO segregate network that had routes from network that didn't and only split them?)
print "Splitting network at stops..."
arcpy.SplitLineAtPoint_management("network/arnold_split_nw","stops_lrs","network/arnold_split_stops_nw","1 Meters")
end_time = datetime.datetime.now()
total_time = end_time - start_time
print ("\nEnd at {}. Total run time {}".format(end_time, total_time)) | 33.773481 | 154 | 0.648454 |
9521b11ea24c3b1975d9331d56438810a026e0f3 | 14,298 | py | Python | tensorflow_federated/python/research/baselines/emnist/models.py | khramtsova/federated | 88b3ca65204a9922696ccefd774ece03ebf5cc8e | [
"Apache-2.0"
] | 1 | 2019-10-10T06:19:52.000Z | 2019-10-10T06:19:52.000Z | tensorflow_federated/python/research/baselines/emnist/models.py | khramtsova/federated | 88b3ca65204a9922696ccefd774ece03ebf5cc8e | [
"Apache-2.0"
] | null | null | null | tensorflow_federated/python/research/baselines/emnist/models.py | khramtsova/federated | 88b3ca65204a9922696ccefd774ece03ebf5cc8e | [
"Apache-2.0"
] | 2 | 2019-10-10T06:19:41.000Z | 2021-01-28T03:06:55.000Z | # Lint as: python3
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build a model for EMNIST classification."""
import functools
import tensorflow as tf
def create_conv_dropout_model(only_digits=True):
"""Recommended model to use for EMNIST experiments.
When `only_digits=True`, the summary of returned model is
```
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
reshape (Reshape) (None, 28, 28, 1) 0
_________________________________________________________________
conv2d (Conv2D) (None, 26, 26, 32) 320
_________________________________________________________________
conv2d_1 (Conv2D) (None, 24, 24, 64) 18496
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 12, 12, 64) 0
_________________________________________________________________
dropout (Dropout) (None, 12, 12, 64) 0
_________________________________________________________________
flatten (Flatten) (None, 9216) 0
_________________________________________________________________
dense (Dense) (None, 128) 1179776
_________________________________________________________________
dropout_1 (Dropout) (None, 128) 0
_________________________________________________________________
dense_1 (Dense) (None, 10) 1290
=================================================================
Total params: 1,199,882
Trainable params: 1,199,882
Non-trainable params: 0
```
For `only_digits=False`, the last dense layer is slightly larger.
Args:
only_digits: If True, uses a final layer with 10 outputs, for use with the
digits only EMNIST dataset. If False, uses 62 outputs for the larger
dataset.
Returns:
A `tf.keras.Model`.
"""
data_format = 'channels_last'
input_shape = [28, 28, 1]
model = tf.keras.models.Sequential([
tf.keras.layers.Reshape(input_shape=(28 * 28,), target_shape=input_shape),
tf.keras.layers.Conv2D(
32,
kernel_size=(3, 3),
activation='relu',
input_shape=input_shape,
data_format=data_format),
tf.keras.layers.Conv2D(
64, kernel_size=(3, 3), activation='relu', data_format=data_format),
tf.keras.layers.MaxPool2D(pool_size=(2, 2), data_format=data_format),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(
10 if only_digits else 62, activation=tf.nn.softmax),
])
return model
def create_original_fedavg_cnn_model(only_digits=True):
"""The CNN model used in https://arxiv.org/abs/1602.05629.
The number of parameters when `only_digits=True` is (1,663,370), which matches
what is reported in the paper.
When `only_digits=True`, the summary of returned model is
```
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
reshape (Reshape) (None, 28, 28, 1) 0
_________________________________________________________________
conv2d (Conv2D) (None, 28, 28, 32) 832
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 14, 14, 32) 0
_________________________________________________________________
conv2d_1 (Conv2D) (None, 14, 14, 64) 51264
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 7, 7, 64) 0
_________________________________________________________________
flatten (Flatten) (None, 3136) 0
_________________________________________________________________
dense (Dense) (None, 512) 1606144
_________________________________________________________________
dense_1 (Dense) (None, 10) 5130
=================================================================
Total params: 1,663,370
Trainable params: 1,663,370
Non-trainable params: 0
```
For `only_digits=False`, the last dense layer is slightly larger.
Args:
only_digits: If True, uses a final layer with 10 outputs, for use with the
digits only EMNIST dataset. If False, uses 62 outputs for the larger
dataset.
Returns:
A `tf.keras.Model`.
"""
data_format = 'channels_last'
input_shape = [28, 28, 1]
max_pool = functools.partial(
tf.keras.layers.MaxPooling2D,
pool_size=(2, 2),
padding='same',
data_format=data_format)
conv2d = functools.partial(
tf.keras.layers.Conv2D,
kernel_size=5,
padding='same',
data_format=data_format,
activation=tf.nn.relu)
model = tf.keras.models.Sequential([
tf.keras.layers.Reshape(input_shape=(28 * 28,), target_shape=input_shape),
conv2d(filters=32, input_shape=input_shape),
max_pool(),
conv2d(filters=64),
max_pool(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(
10 if only_digits else 62, activation=tf.nn.softmax),
])
return model
def create_two_hidden_layer_model(only_digits=True, hidden_units=200):
"""Create a two hidden-layer fully connected neural network.
Args:
only_digits: A boolean that determines whether to only use the digits in
EMNIST, or the full EMNIST-62 dataset. If True, uses a final layer with 10
outputs, for use with the digit-only EMNIST dataset. If False, uses 62
outputs for the larger dataset.
hidden_units: An integer specifying the number of units in the hidden layer.
Returns:
A `tf.keras.Model`.
"""
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(
hidden_units, activation=tf.nn.relu, input_shape=(28 * 28,)),
tf.keras.layers.Dense(hidden_units, activation=tf.nn.relu),
tf.keras.layers.Dense(
10 if only_digits else 62, activation=tf.nn.softmax),
])
return model
# Defining global constants for ResNet model
L2_WEIGHT_DECAY = 2e-4
def _residual_block(input_tensor, kernel_size, filters, base_name):
"""A block of two conv layers with an identity residual connection.
Args:
input_tensor: The input tensor for the residual block.
kernel_size: An integer specifying the kernel size of the convolutional
layers in the residual blocks.
filters: A list of two integers specifying the filters of the conv layers in
the residual blocks. The first integer specifies the number of filters on
the first conv layer within each residual block, the second applies to the
remaining conv layers within each block.
base_name: A string used to generate layer names.
Returns:
The output tensor of the residual block evaluated at the input tensor.
"""
filters1, filters2 = filters
x = tf.keras.layers.Conv2D(
filters1,
kernel_size,
padding='same',
use_bias=False,
name='{}_conv_1'.format(base_name))(
input_tensor)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Conv2D(
filters2,
kernel_size,
padding='same',
use_bias=False,
name='{}_conv_2'.format(base_name))(
x)
x = tf.keras.layers.add([x, input_tensor])
x = tf.keras.layers.Activation('relu')(x)
return x
def _conv_residual_block(input_tensor,
kernel_size,
filters,
base_name,
strides=(2, 2)):
"""A block of two conv layers with a convolutional residual connection.
Args:
input_tensor: The input tensor for the residual block.
kernel_size: An integer specifying the kernel size of the convolutional
layers in the residual blocks.
filters: A list of two integers specifying the filters of the conv layers in
the residual blocks. The first integer specifies the number of filters on
the first conv layer within each residual block, the second applies to the
remaining conv layers within each block.
base_name: A string used to generate layer names.
strides: A tuple of integers specifying the strides lengths in the first
conv layer in the block.
Returns:
The output tensor of the residual block evaluated at the input tensor.
"""
filters1, filters2 = filters
x = tf.keras.layers.Conv2D(
filters1,
kernel_size,
strides=strides,
padding='same',
use_bias=False,
name='{}_conv_1'.format(base_name))(
input_tensor)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Conv2D(
filters2,
kernel_size,
padding='same',
use_bias=False,
name='{}_conv_2'.format(base_name))(
x)
shortcut = tf.keras.layers.Conv2D(
filters2, (1, 1),
strides=strides,
use_bias=False,
name='{}_conv_shortcut'.format(base_name))(
input_tensor)
x = tf.keras.layers.add([x, shortcut])
x = tf.keras.layers.Activation('relu')(x)
return x
def _resnet_block(input_tensor,
size,
kernel_size,
filters,
stage,
conv_strides=(2, 2)):
"""A block which applies multiple residual blocks to a given input.
The resnet block applies a single conv residual block followed by multiple
identity residual blocks to a given input.
Args:
input_tensor: The input tensor for the resnet block.
size: An integer specifying the number of residual blocks. A conv residual
block is applied once, followed by (size - 1) identity residual blocks.
kernel_size: An integer specifying the kernel size of the convolutional
layers in the residual blocks.
filters: A list of two integers specifying the filters of the conv layers in
the residual blocks. The first integer specifies the number of filters on
the first conv layer within each residual block, the second applies to the
remaining conv layers within each block.
stage: An integer representing the the position of the resnet block within
the resnet. Used for generating layer names.
conv_strides: A tuple of integers specifying the strides in the first conv
layer within each conv residual block.
Returns:
The output tensor of the resnet block evaluated at the input tensor.
"""
x = _conv_residual_block(
input_tensor,
kernel_size,
filters,
base_name='res_{}_block_0'.format(stage),
strides=conv_strides)
for i in range(size - 1):
x = _residual_block(
x,
kernel_size,
filters,
base_name='res_{}_block_{}'.format(stage, i + 1))
return x
def create_resnet(num_blocks=5, only_digits=True):
"""Instantiates a ResNet model for EMNIST classification.
Instantiates the ResNet architecture from https://arxiv.org/abs/1512.03385.
The ResNet contains 3 stages of ResNet blocks with each block containing one
conv residual block followed by (num_blocks - 1) idenity residual blocks. Each
residual block has 2 convolutional layers. With the input convolutional
layer and the final dense layer, this brings the total number of trainable
layers in the network to (6*num_blocks + 2). This number is often used to
identify the ResNet, so for example ResNet56 has num_blocks = 9.
Args:
num_blocks: An integer representing the number of residual blocks within
each ResNet block.
only_digits: A boolean that determines whether to only use the digits in
EMNIST, or the full EMNIST-62 dataset. If True, uses a final layer with 10
outputs, for use with the digit-only EMNIST dataset. If False, uses 62
outputs for the larger dataset.
Returns:
A `tf.keras.Model`.
"""
num_classes = 10 if only_digits else 62
target_shape = (28, 28, 1)
img_input = tf.keras.layers.Input(shape=(28 * 28,))
x = img_input
x = tf.keras.layers.Reshape(
target_shape=target_shape, input_shape=(28 * 28,))(
x)
x = tf.keras.layers.ZeroPadding2D(padding=(1, 1), name='initial_pad')(x)
x = tf.keras.layers.Conv2D(
16, (3, 3),
strides=(1, 1),
padding='valid',
use_bias=False,
name='initial_conv')(
x)
x = tf.keras.layers.Activation('relu')(x)
x = _resnet_block(
x,
size=num_blocks,
kernel_size=3,
filters=[16, 16],
stage=2,
conv_strides=(1, 1))
x = _resnet_block(
x,
size=num_blocks,
kernel_size=3,
filters=[32, 32],
stage=3,
conv_strides=(2, 2))
x = _resnet_block(
x,
size=num_blocks,
kernel_size=3,
filters=[64, 64],
stage=4,
conv_strides=(2, 2))
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(
num_classes,
activation=tf.nn.softmax,
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01),
kernel_regularizer=tf.keras.regularizers.l2(L2_WEIGHT_DECAY),
bias_regularizer=tf.keras.regularizers.l2(L2_WEIGHT_DECAY),
name='fully_connected')(
x)
inputs = img_input
model = tf.keras.models.Model(
inputs, x, name='resnet{}'.format(6 * num_blocks + 2))
return model
| 34.873171 | 80 | 0.665268 |
9522282432e0e76392916180e81134140fe248cd | 893 | py | Python | iterdeciser/loader.py | mpavlase/responses-form-evaluator | d0066a44c078ece458ae44577afc207583116638 | [
"MIT"
] | 1 | 2020-02-19T00:39:10.000Z | 2020-02-19T00:39:10.000Z | iterdeciser/loader.py | mpavlase/responses-form-evaluator | d0066a44c078ece458ae44577afc207583116638 | [
"MIT"
] | null | null | null | iterdeciser/loader.py | mpavlase/responses-form-evaluator | d0066a44c078ece458ae44577afc207583116638 | [
"MIT"
] | null | null | null | import csv
from iterdeciser import models
| 27.060606 | 61 | 0.555431 |
95258effa24ad7ea4b397bc2159a4af1349e68bd | 6,146 | py | Python | adapter.py | jain-harshil/Adapter-BERT | fd74ed0eea21b13034f9a834244191846de6b8d5 | [
"Apache-2.0"
] | 4 | 2021-03-14T23:02:14.000Z | 2022-02-14T10:10:12.000Z | adapter.py | jain-harshil/Adapter-BERT | fd74ed0eea21b13034f9a834244191846de6b8d5 | [
"Apache-2.0"
] | null | null | null | adapter.py | jain-harshil/Adapter-BERT | fd74ed0eea21b13034f9a834244191846de6b8d5 | [
"Apache-2.0"
] | 2 | 2020-10-12T09:04:55.000Z | 2021-11-13T03:54:55.000Z | import torch
from torch import nn
from transformers.modeling_bert import BertIntermediate, BertOutput, BertLayer, BertEncoder, BertModel, BertForSequenceClassification
### Bottleneck Adapter
### BERT
### Parallel Adapter
### XLM-R | 35.94152 | 134 | 0.678653 |
9527282622ce1b8a8057c23be87132dc48225952 | 125 | py | Python | test/integration_test/exampleProject/test_module.py | thusoy/grunt-pylint | 1911144b76b144c991e721c794640c06101a8bf1 | [
"MIT"
] | 9 | 2015-03-04T22:35:49.000Z | 2018-08-16T00:51:24.000Z | test/integration_test/exampleProject/test_module.py | thusoy/grunt-pylint | 1911144b76b144c991e721c794640c06101a8bf1 | [
"MIT"
] | 10 | 2015-03-05T14:09:53.000Z | 2019-04-13T21:48:05.000Z | test/integration_test/exampleProject/test_module.py | thusoy/grunt-pylint | 1911144b76b144c991e721c794640c06101a8bf1 | [
"MIT"
] | 5 | 2015-03-04T16:25:05.000Z | 2018-08-13T10:49:47.000Z | """ This module is used for integration testing. """
# pylint: disable=locally-disabled,unused-import
import venv_exclusive
| 25 | 52 | 0.776 |
95277c92e91076992bcacdf611aab098dd6f15f0 | 3,837 | py | Python | models/pixelpick/networks/deeplab.py | martafdezmAM/lessen_supervision | 630dfea2e396b9b6f61a3ad6786bb3ee169da3fd | [
"MIT"
] | 49 | 2021-04-08T07:45:13.000Z | 2022-03-08T03:20:30.000Z | networks/deeplab.py | leiyu1980/PixelPick | f0ae7d35f62c1dda70f5bff1689177a513ab6259 | [
"MIT"
] | 5 | 2021-04-21T02:13:47.000Z | 2022-03-30T12:06:36.000Z | networks/deeplab.py | leiyu1980/PixelPick | f0ae7d35f62c1dda70f5bff1689177a513ab6259 | [
"MIT"
] | 15 | 2021-04-14T01:15:06.000Z | 2022-03-25T05:05:36.000Z | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from .aspp import ASPP
from .decoders import SegmentHead
from .mobilenet_v2 import MobileNetV2
| 37.990099 | 111 | 0.584832 |
95291ef04782317ff7c65177e450a86cba814b66 | 1,224 | py | Python | examples/top_view.py | ryan-mooore/anvil-parser | f2da8e0b7ca84ace49da8c6784363d914b2ca93d | [
"MIT"
] | 70 | 2019-08-12T18:46:09.000Z | 2022-02-22T12:37:29.000Z | examples/top_view.py | ryan-mooore/anvil-parser | f2da8e0b7ca84ace49da8c6784363d914b2ca93d | [
"MIT"
] | 24 | 2020-01-20T04:15:59.000Z | 2022-03-13T20:49:55.000Z | examples/top_view.py | ryan-mooore/anvil-parser | f2da8e0b7ca84ace49da8c6784363d914b2ca93d | [
"MIT"
] | 33 | 2019-12-06T19:22:10.000Z | 2022-03-28T17:08:56.000Z | """
Generates a image of the top view of a chunk
Needs a textures folder with a block folder inside
"""
import sys
if len(sys.argv) == 1:
print('You must give a region file')
exit()
else:
region = sys.argv[1]
chx = int(sys.argv[2])
chz = int(sys.argv[3])
import os
from PIL import Image
import _path
import anvil
chunk = anvil.Chunk.from_region(region, chx, chz)
img = Image.new('RGBA', (16*16,16*16))
grid = [[None for i in range(16)] for j in range(16)]
for y in reversed(range(256)):
for z in range(16):
for x in range(16):
b = chunk.get_block(x, y, z).id
if b == 'air' or grid[z][x] is not None:
continue
grid[z][x] = b
texturesf = os.listdir('textures/block')
textures = {}
for z in range(16):
for x in range(16):
b = grid[z][x]
if b is None:
continue
if b not in textures:
if b+'.png' not in texturesf:
print(f'Skipping {b}')
textures[b] = None
continue
textures[b] = Image.open(f'textures/block/{b}.png')
if textures[b] is None:
continue
img.paste(textures[b], box=(x*16, z*16))
img.show()
| 26.042553 | 63 | 0.555556 |
95293f8eba3bae03a2ebdf267114cb3e46a7731e | 2,468 | py | Python | readthedocs/worker.py | yarons/readthedocs.org | 05c99a0adc222a1d48654d305b492ec142c3026b | [
"MIT"
] | 4,054 | 2015-01-01T00:58:07.000Z | 2019-06-28T05:50:49.000Z | readthedocs/worker.py | yarons/readthedocs.org | 05c99a0adc222a1d48654d305b492ec142c3026b | [
"MIT"
] | 4,282 | 2015-01-01T21:38:49.000Z | 2019-06-28T15:41:00.000Z | readthedocs/worker.py | yarons/readthedocs.org | 05c99a0adc222a1d48654d305b492ec142c3026b | [
"MIT"
] | 3,224 | 2015-01-01T07:38:45.000Z | 2019-06-28T09:19:10.000Z | """Celery worker application instantiation."""
import os
from celery import Celery
from django.conf import settings
from django_structlog.celery.steps import DjangoStructLogInitStep
def create_application():
"""Create a Celery application using Django settings."""
os.environ.setdefault(
'DJANGO_SETTINGS_MODULE',
'readthedocs.settings.dev',
)
application = Celery(settings.CELERY_APP_NAME)
application.config_from_object('django.conf:settings')
application.autodiscover_tasks(None)
# A step to initialize django-structlog
application.steps['worker'].add(DjangoStructLogInitStep)
return application
def register_renamed_tasks(application, renamed_tasks):
"""
Register renamed tasks into Celery registry.
When a task is renamed (changing the function's name or moving it to a
different module) and there are old instances running in production, they
will trigger tasks using the old name. However, the new instances won't
have those tasks registered.
This function re-register the new tasks under the old name to workaround
this problem. New instances will then executed the code for the new task,
but when called under the old name.
This function *must be called after renamed tasks with new names were
already registered/load by Celery*.
When using this function, think about the order the ASG will be deployed.
Deploying webs first will require some type of re-register and deploying
builds may require a different one.
A good way to test this locally is with a code similar to the following:
In [1]: # Register a task with the old name
In [2]: @app.task(name='readthedocs.projects.tasks.update_docs_task')
...: def mytask(*args, **kwargs):
...: return True
...:
In [3]: # Trigger the task
In [4]: mytask.apply_async([99], queue='build:default')
In [5]: # Check it's executed by the worker with the new code
:param application: Celery Application
:param renamed_tasks: Mapping containing the old name of the task as its
and the new name as its value.
:type renamed_tasks: dict
:type application: celery.Celery
:returns: Celery Application
"""
for oldname, newname in renamed_tasks.items():
application.tasks[oldname] = application.tasks[newname]
return application
app = create_application() # pylint: disable=invalid-name
| 32.473684 | 77 | 0.715559 |
952983a05bf28fe82e2cd622f5d71bbde9e46c7c | 876 | py | Python | tr_converter.py | EFatihAydin/contverter_error_utf8 | 971035644425af69d48b869d0de1668127843f01 | [
"MIT"
] | null | null | null | tr_converter.py | EFatihAydin/contverter_error_utf8 | 971035644425af69d48b869d0de1668127843f01 | [
"MIT"
] | null | null | null | tr_converter.py | EFatihAydin/contverter_error_utf8 | 971035644425af69d48b869d0de1668127843f01 | [
"MIT"
] | null | null | null |
file = open("./data.txt" , encoding = 'utf-8')
data = file.readlines()
liste=[]
for string in data:
string=string.replace('','')
string=string.replace('','')
string=string.replace('','')
string=string.replace('','')
string=string.replace('','')
string=string.replace('','')
string=string.replace('','')
string=string.replace('','')
string=string.replace('','')
string=string.replace('','')
string=string.replace('','')
string=string.replace('','')
string=string.replace("","")
string=string.replace("","")
string=string.replace("","")
string=string.replace("","")
string=string.replace("","")
string=string.replace("","")
string=string.lower()
liste.append(string)
with open('./dosya_out.txt' , 'w' , encoding = 'utf-8') as fl:
for i in liste:
fl.write(str(i))
| 27.375 | 63 | 0.615297 |
952d81863666bd0aa65ead158b3c1300284fe4e6 | 1,485 | py | Python | example/simple_example/example_models.py | kun-fang/avro-data-model | 1a657e20e666b534d0196888ae580ad7caddadeb | [
"MIT"
] | 9 | 2019-03-28T16:31:33.000Z | 2022-02-18T03:22:50.000Z | example/simple_example/example_models.py | kun-fang/avro-data-model | 1a657e20e666b534d0196888ae580ad7caddadeb | [
"MIT"
] | 3 | 2019-06-17T17:09:38.000Z | 2021-05-14T03:06:00.000Z | example/simple_example/example_models.py | kun-fang/avro-data-model | 1a657e20e666b534d0196888ae580ad7caddadeb | [
"MIT"
] | 2 | 2019-04-11T18:26:52.000Z | 2022-02-18T03:22:52.000Z | import datetime
import os
from avro_models import avro_schema, AvroModelContainer
EXAMPLE_NAMES = AvroModelContainer(default_namespace="example.avro")
DIRNAME = os.path.dirname(os.path.realpath(__file__))
| 23.203125 | 71 | 0.641077 |
952e3eae671c4397df0072361e08791772e8f4d1 | 5,401 | py | Python | src/lib/Server/Reports/settings.py | pcmxgti/bcfg2 | 33aaf9c6bbeb0d20eef084b1347a0fce42086663 | [
"mpich2"
] | null | null | null | src/lib/Server/Reports/settings.py | pcmxgti/bcfg2 | 33aaf9c6bbeb0d20eef084b1347a0fce42086663 | [
"mpich2"
] | null | null | null | src/lib/Server/Reports/settings.py | pcmxgti/bcfg2 | 33aaf9c6bbeb0d20eef084b1347a0fce42086663 | [
"mpich2"
] | null | null | null | import django
import sys
# Compatibility import
from Bcfg2.Bcfg2Py3k import ConfigParser
# Django settings for bcfg2 reports project.
c = ConfigParser.ConfigParser()
if len(c.read(['/etc/bcfg2.conf', '/etc/bcfg2-web.conf'])) == 0:
raise ImportError("Please check that bcfg2.conf or bcfg2-web.conf exists "
"and is readable by your web server.")
try:
DEBUG = c.getboolean('statistics', 'web_debug')
except:
DEBUG = False
if DEBUG:
print("Warning: Setting web_debug to True causes extraordinary memory "
"leaks. Only use this setting if you know what you're doing.")
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Root', 'root'),
)
MANAGERS = ADMINS
try:
db_engine = c.get('statistics', 'database_engine')
except ConfigParser.NoSectionError:
e = sys.exc_info()[1]
raise ImportError("Failed to determine database engine: %s" % e)
db_name = ''
if c.has_option('statistics', 'database_name'):
db_name = c.get('statistics', 'database_name')
if db_engine == 'sqlite3' and db_name == '':
db_name = "%s/etc/brpt.sqlite" % c.get('server', 'repository')
DATABASES = {
'default': {
'ENGINE': "django.db.backends.%s" % db_engine,
'NAME': db_name
}
}
if db_engine != 'sqlite3':
DATABASES['default']['USER'] = c.get('statistics', 'database_user')
DATABASES['default']['PASSWORD'] = c.get('statistics', 'database_password')
DATABASES['default']['HOST'] = c.get('statistics', 'database_host')
try:
DATABASES['default']['PORT'] = c.get('statistics', 'database_port')
except: # An empty string tells Django to use the default port.
DATABASES['default']['PORT'] = ''
if django.VERSION[0] == 1 and django.VERSION[1] < 2:
DATABASE_ENGINE = db_engine
DATABASE_NAME = DATABASES['default']['NAME']
if DATABASE_ENGINE != 'sqlite3':
DATABASE_USER = DATABASES['default']['USER']
DATABASE_PASSWORD = DATABASES['default']['PASSWORD']
DATABASE_HOST = DATABASES['default']['HOST']
DATABASE_PORT = DATABASES['default']['PORT']
# Local time zone for this installation. All choices can be found here:
# http://docs.djangoproject.com/en/dev/ref/settings/#time-zone
try:
TIME_ZONE = c.get('statistics', 'time_zone')
except:
if django.VERSION[0] == 1 and django.VERSION[1] > 2:
TIME_ZONE = None
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = '/site_media'
if c.has_option('statistics', 'web_prefix'):
MEDIA_URL = c.get('statistics', 'web_prefix').rstrip('/') + MEDIA_URL
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'eb5+y%oy-qx*2+62vv=gtnnxg1yig_odu0se5$h0hh#pc*lmo7'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.doc.XViewMiddleware',
)
ROOT_URLCONF = 'Bcfg2.Server.Reports.urls'
# Authentication Settings
# Use NIS authentication backend defined in backends.py
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',
'Bcfg2.Server.Reports.backends.NISBackend')
# The NIS group authorized to login to BCFG2's reportinvg system
AUTHORIZED_GROUP = ''
#create login url area:
try:
import django.contrib.auth
except ImportError:
raise ImportError('Import of Django module failed. Is Django installed?')
django.contrib.auth.LOGIN_URL = '/login'
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates".
# Always use forward slashes, even on Windows.
'/usr/share/python-support/python-django/django/contrib/admin/templates/',
'Bcfg2.Server.Reports.reports'
)
if django.VERSION[0] == 1 and django.VERSION[1] < 2:
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request'
)
else:
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request'
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'Bcfg2.Server.Reports.reports'
)
| 33.339506 | 79 | 0.695797 |
95300a9bbee2d9246ae4298544114b63521e0cfa | 2,851 | py | Python | arachne/lingo.py | Darumin/arachne | ddae1c9f47e177941a6d6deed84357cbf41ad116 | [
"MIT"
] | 1 | 2020-08-24T05:19:05.000Z | 2020-08-24T05:19:05.000Z | arachne/lingo.py | Darumin/arachne | ddae1c9f47e177941a6d6deed84357cbf41ad116 | [
"MIT"
] | null | null | null | arachne/lingo.py | Darumin/arachne | ddae1c9f47e177941a6d6deed84357cbf41ad116 | [
"MIT"
] | null | null | null | from enum import Enum
import arachne.nouns as a
nouns = (
a.Container,
a.Item,
a.Door,
a.Room,
a.Key,
a.Door
)
# this is an arachne object, in the english grammar sense.
# not to be confused with object types.
# encompasses all known in-game vocabulary, unmatched vocab always default to type Object
lexicon = (
('ARTICLES', '^the$|^a$|^an$|^some$'),
(Compass.NORTH, '^north$|^n$'),
(Compass.EAST, '^east$|^e$'),
(Compass.WEST, '^west$|^w$'),
(Compass.SOUTH, '^south$|^s$'),
(Compass.NORTHEAST, '^northeast$|^ne$'),
(Compass.NORTHWEST, '^northwest$|^nw$'),
(Compass.SOUTHEAST, '^southeast$|^se$'),
(Compass.SOUTHWEST, '^southwest$|^sw$'),
(Compass.UP, '^up$|^u$'),
(Compass.DOWN, '^down$|^d$'),
(Verb.LOOK, '^look$'),
(Verb.TAKE, '^take$|^get$'),
(Verb.DROP, '^drop$'),
(Verb.PUT, '^put$|^store$|^place$'),
(Verb.EXAMINE, '^x$|^check$|^examine$'),
(Verb.INVENTORY, '^i$|^inv$|^inventory$'),
(Verb.USE, '^use$|^consume$|^spend$'),
(Verb.OPEN, '^open$'),
(Verb.CLOSE, '^close$'),
(Verb.UNLOCK, '^unlock$'),
(Verb.LOCK, '^lock$'),
(Prep.WITHIN, '^in$|^inside$|^into$'),
(Prep.ATOP, '^on$|^above$'),
(Prep.SETTING, '^at$|^to$')
)
| 24.577586 | 89 | 0.591371 |
9531452916d8af98d79a18cfcf7c243ec86f577d | 488 | py | Python | src/hera/host_alias.py | bchalk101/hera-workflows | a3e9262f996ba477a35850c7e4b18ce3d5749687 | [
"MIT"
] | 84 | 2021-10-20T17:20:22.000Z | 2022-03-31T17:20:06.000Z | src/hera/host_alias.py | bchalk101/hera-workflows | a3e9262f996ba477a35850c7e4b18ce3d5749687 | [
"MIT"
] | 84 | 2021-10-31T16:05:51.000Z | 2022-03-31T14:25:25.000Z | src/hera/host_alias.py | bchalk101/hera-workflows | a3e9262f996ba477a35850c7e4b18ce3d5749687 | [
"MIT"
] | 18 | 2021-11-01T04:34:39.000Z | 2022-03-29T03:48:19.000Z | from typing import List
from argo_workflows.models import HostAlias as ArgoHostAlias
from pydantic import BaseModel
| 23.238095 | 103 | 0.715164 |
9532e0a3625fbfa97cee2a3c1c1ac08b02e54bbb | 1,297 | py | Python | legacy/lua_data/lua_data_converter.py | kshshkim/factorioCalcPy | 2a7c6ca567a3bf0d2b19f3cf0bc05274f83d4205 | [
"MIT"
] | 1 | 2021-09-21T01:42:05.000Z | 2021-09-21T01:42:05.000Z | legacy/lua_data/lua_data_converter.py | kshshkim/factorioCalcPy | 2a7c6ca567a3bf0d2b19f3cf0bc05274f83d4205 | [
"MIT"
] | null | null | null | legacy/lua_data/lua_data_converter.py | kshshkim/factorioCalcPy | 2a7c6ca567a3bf0d2b19f3cf0bc05274f83d4205 | [
"MIT"
] | null | null | null | from slpp import slpp as lua
import json
'''
lc=LuaConverter()
lc.write('fluid.lua','fluid_dict.py')
'''
| 36.027778 | 112 | 0.591365 |
9533f3d3d51a5a32d60d0e2337d926980cff5177 | 839 | py | Python | odette/scripts/collect_iso_codes.py | mdelhoneux/oDETTE | 1b09bb3a950eb847c409de48c466d6559a010bd8 | [
"Unlicense"
] | 2 | 2017-04-18T13:31:37.000Z | 2017-07-12T21:00:10.000Z | odette/scripts/collect_iso_codes.py | mdelhoneux/oDETTE | 1b09bb3a950eb847c409de48c466d6559a010bd8 | [
"Unlicense"
] | null | null | null | odette/scripts/collect_iso_codes.py | mdelhoneux/oDETTE | 1b09bb3a950eb847c409de48c466d6559a010bd8 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
#==============================================================================
#author :Miryam de Lhoneux
#email :miryam.de_lhoneux@lingfil.uu.se
#date :2015/12/30
#version :1.0
#description :collect iso codes in UD directories
#usage :python scripts/collect_iso_codes.py
#Python version :2.7.6
#==============================================================================
import os
import sys
import pprint
#generate a dictionary of iso_codes from ud treebank directory
codes = {}
ud_dir = sys.argv[1]
for language in os.listdir(ud_dir):
ldir = ud_dir + "/" + language
for f in os.listdir(ldir):
if len(f.split(".")) >1 and f.split(".")[1] == "conllu":
iso_code = f.split("-")[0]
codes[language] = iso_code
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(codes)
| 28.931034 | 79 | 0.54112 |
20f86d70eb09a90cb1a4b918de25a5f97e226d8c | 5,696 | py | Python | airtest/core/ios/mjpeg_cap.py | Cache-Cloud/Airtest | 4f831977a32c2b120dee631631c1154407b34d32 | [
"Apache-2.0"
] | null | null | null | airtest/core/ios/mjpeg_cap.py | Cache-Cloud/Airtest | 4f831977a32c2b120dee631631c1154407b34d32 | [
"Apache-2.0"
] | null | null | null | airtest/core/ios/mjpeg_cap.py | Cache-Cloud/Airtest | 4f831977a32c2b120dee631631c1154407b34d32 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import numpy
import socket
import traceback
from airtest import aircv
from airtest.utils.snippet import reg_cleanup, on_method_ready, ready_method
from airtest.core.ios.constant import ROTATION_MODE, DEFAULT_MJPEG_PORT
from airtest.utils.logger import get_logger
from airtest.utils.safesocket import SafeSocket
LOGGING = get_logger(__name__)
if __name__ == "__main__":
import wda
from airtest.core.ios.instruct_cmd import InstructHelper
addr = "http://localhost:8100"
driver = wda.Client(addr)
info = driver.info
instruct_helper = InstructHelper(info['uuid'])
mjpeg_server = MJpegcap(instruct_helper)
print(len(mjpeg_server.get_frame())) | 33.309942 | 119 | 0.607619 |
20fa7eb3a7346661e1dcc5a7aa474c9102b7df4b | 3,342 | py | Python | happy.py | xiaoqcn/LearnLinuxViaPython | 3c591471bbceefab44161aedb8ff67c2009b8ec0 | [
"Apache-2.0"
] | null | null | null | happy.py | xiaoqcn/LearnLinuxViaPython | 3c591471bbceefab44161aedb8ff67c2009b8ec0 | [
"Apache-2.0"
] | null | null | null | happy.py | xiaoqcn/LearnLinuxViaPython | 3c591471bbceefab44161aedb8ff67c2009b8ec0 | [
"Apache-2.0"
] | null | null | null | import time
import datetime
import os
import sys
import atexit
import signal
from multiprocessing import Pool
from threading import Thread
| 28.084034 | 92 | 0.529623 |
20fa9357a93d7d86c13beaf0a8a806393d553ed4 | 526 | py | Python | functional_tests/test_gallery.py | atypicalrobot/igor_personal_site | 8fd788bc43884792b786abeb34e9fec9e79492f1 | [
"MIT"
] | null | null | null | functional_tests/test_gallery.py | atypicalrobot/igor_personal_site | 8fd788bc43884792b786abeb34e9fec9e79492f1 | [
"MIT"
] | null | null | null | functional_tests/test_gallery.py | atypicalrobot/igor_personal_site | 8fd788bc43884792b786abeb34e9fec9e79492f1 | [
"MIT"
] | null | null | null | from .base import * | 29.222222 | 71 | 0.659696 |
20fb6d839493dfeb4698c4e202a1cd7ca0226dba | 784 | py | Python | plates.py | winksaville/cq-plates | fb175522fae991a8d88cdf26afad273a4b8b9098 | [
"MIT"
] | null | null | null | plates.py | winksaville/cq-plates | fb175522fae991a8d88cdf26afad273a4b8b9098 | [
"MIT"
] | null | null | null | plates.py | winksaville/cq-plates | fb175522fae991a8d88cdf26afad273a4b8b9098 | [
"MIT"
] | null | null | null | import cadquery as cq # type: ignore
nd = 0.4 # Nozzle Diameter
length = 50
width = 20
gap = 5
p1 = (
cq.Workplane("XY", origin=(-(width + gap), 0, 0))
.rect(width, length)
.extrude(nd/2)
)
#show_object(p1)
p2 = (
cq.Workplane("XY", origin=(0, 0, 0))
.rect(width, length)
.extrude(nd)
)
#show_object(p2)
p3 = (
cq.Workplane("XY", origin=(width + gap, 0, 0))
.rect(width, length)
.extrude(nd * 2)
)
#show_object(p3)
# Combine the objects so they all can be slected and exported to stl
#
# Note: you must use .val() otherwise the following generates
# a "AttributeError: 'Workplane' object has no 'wapped'"
# all = cq.Compound.makeCompound([p1, p2, p3])
all = cq.Compound.makeCompound([p1.val(), p2.val(), p3.val()])
show_object(all)
| 21.189189 | 68 | 0.626276 |
20fe1adaa92216baa26b834b33664cd9c78ae67b | 2,430 | py | Python | tests/tonalmodel_tests/test_chromatic_scale.py | dpazel/music_rep | 2f9de9b98b13df98f1a0a2120b84714725ce527e | [
"MIT"
] | 1 | 2021-05-06T19:45:54.000Z | 2021-05-06T19:45:54.000Z | tests/tonalmodel_tests/test_chromatic_scale.py | dpazel/music_rep | 2f9de9b98b13df98f1a0a2120b84714725ce527e | [
"MIT"
] | null | null | null | tests/tonalmodel_tests/test_chromatic_scale.py | dpazel/music_rep | 2f9de9b98b13df98f1a0a2120b84714725ce527e | [
"MIT"
] | null | null | null | import unittest
import logging
from tonalmodel.chromatic_scale import ChromaticScale
if __name__ == "__main__":
unittest.main()
| 38.571429 | 117 | 0.60535 |
20feae08b04eeba7945d6473eedc0730006c75f9 | 3,093 | py | Python | beeseyes/pycode/sampling.py | sosi-org/scientific-code | 395bae0f95fbccb936dc01145c797dc22a1c99a0 | [
"Unlicense"
] | null | null | null | beeseyes/pycode/sampling.py | sosi-org/scientific-code | 395bae0f95fbccb936dc01145c797dc22a1c99a0 | [
"Unlicense"
] | null | null | null | beeseyes/pycode/sampling.py | sosi-org/scientific-code | 395bae0f95fbccb936dc01145c797dc22a1c99a0 | [
"Unlicense"
] | null | null | null | import numpy as np
import math
import polygon_sampler
nan_rgb = np.zeros((3,)) + np.NaN
# sampler session: texture, W_,H_,W,H
'''
Used by `sample_colors_squarepixels()`
Samples a single point.
Using square pixels.
[0, ... ,W-1] (incl.)
By mapping [0,1) -> [0,W) (int)
(mapping u,v)
'''
'''
Simple sampler.
slow.
"Pixel at Centroid" sampler
One pixel is taken for each region
Uses `sample1`
if regions is None, a different irder is used
'''
def sample_colors_squarepixels_pointwise(uv, texture):
'''
Based on `sample_colors_squarepixels` but without regioons.
A simple point-wise sampling.
uv:shape => (6496, 2)
'''
if texture.shape[2] == 4:
texture = texture[:,:, 0:3]
EPS = 0.00000001
(H,W) = texture.shape[0:2]
W_ = (W - EPS)
H_ = (H - EPS)
print('uv.shape', uv.shape)
nf = uv.shape[0]
uvm_for_debug = np.zeros((nf,2),dtype=float)
regions_rgb = np.zeros((nf,3),dtype=float)
for i in range(nf):
um = uv[i, 0]
vm = uv[i, 1]
uvm_for_debug[i, :] = [um, vm]
rgb = sample1(um,vm, texture, W_,H_,W,H)
regions_rgb[i] = rgb
assert np.allclose(uvm_for_debug, uv, equal_nan=True)
return regions_rgb, uvm_for_debug
'''
Choice of sampler method
Choose your hexagon sampler here
regions=None => pointwise, simply smple uv s
regions=not None => forms regions from mhiese points and samples those reggions rom the texture. (For now, it is the median point fo each region/facet)
'''
| 25.991597 | 154 | 0.6172 |
20fedbf1080a9f144951aee297b7d6f393e3751d | 5,237 | py | Python | src/ui/workspace_view.py | weijiang1994/iPost | 008e767c23691bd9ba802eab1e405f98094cce4c | [
"MIT"
] | 2 | 2021-10-18T01:24:04.000Z | 2021-12-14T01:29:22.000Z | src/ui/workspace_view.py | weijiang1994/iPost | 008e767c23691bd9ba802eab1e405f98094cce4c | [
"MIT"
] | null | null | null | src/ui/workspace_view.py | weijiang1994/iPost | 008e767c23691bd9ba802eab1e405f98094cce4c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'workspace_view.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
| 50.355769 | 114 | 0.715104 |
20ff397b31725a7c336cc66646521d603dc8bb92 | 389 | py | Python | task_queuing/tasks/custom.py | joejcollins/lieutenant-dean | eea536a146fb89b2feca244d5c4cf68e662cf2f2 | [
"MIT"
] | null | null | null | task_queuing/tasks/custom.py | joejcollins/lieutenant-dean | eea536a146fb89b2feca244d5c4cf68e662cf2f2 | [
"MIT"
] | null | null | null | task_queuing/tasks/custom.py | joejcollins/lieutenant-dean | eea536a146fb89b2feca244d5c4cf68e662cf2f2 | [
"MIT"
] | null | null | null | """Custom celery task to capitalize text"""
import task_queuing.celery_app as app
# app.queues.tasks.register(Capitalize)
| 19.45 | 43 | 0.694087 |
1f00bbb4cb26e6889fa5994c748463440e235c8e | 654 | py | Python | migrations/versions/d805931e1abd_add_topics.py | cyberinnovationhub/lunch-roulette | 0b0b933188c095b6e3778ee7de9d4e21cd7caae5 | [
"BSD-3-Clause"
] | 4 | 2020-12-03T19:24:20.000Z | 2022-03-16T13:45:11.000Z | migrations/versions/d805931e1abd_add_topics.py | cyberinnovationhub/lunch-roulette | 0b0b933188c095b6e3778ee7de9d4e21cd7caae5 | [
"BSD-3-Clause"
] | 3 | 2020-08-24T08:05:11.000Z | 2021-11-07T06:14:36.000Z | migrations/versions/d805931e1abd_add_topics.py | cyberinnovationhub/lunch-roulette | 0b0b933188c095b6e3778ee7de9d4e21cd7caae5 | [
"BSD-3-Clause"
] | 3 | 2020-08-27T13:58:53.000Z | 2022-03-09T14:09:06.000Z | """add topics
Revision ID: d805931e1abd
Revises: 9430b6bc8d1a
Create Date: 2018-09-18 15:11:45.922659
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd805931e1abd'
down_revision = '9430b6bc8d1a'
branch_labels = None
depends_on = None
| 22.551724 | 84 | 0.689602 |
1f01e1d172c08c2fafb69829e4c50d4807643989 | 726 | py | Python | 1-50/031NextPermutation.py | zhaoxinlu/leetcode-algorithms | f5e1c94c99628e7fb04ba158f686a55a8093e933 | [
"MIT"
] | null | null | null | 1-50/031NextPermutation.py | zhaoxinlu/leetcode-algorithms | f5e1c94c99628e7fb04ba158f686a55a8093e933 | [
"MIT"
] | null | null | null | 1-50/031NextPermutation.py | zhaoxinlu/leetcode-algorithms | f5e1c94c99628e7fb04ba158f686a55a8093e933 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Editor: Zhao Xinlu
School: BUPT
Date: 2018-03-24
"""
if __name__ == '__main__':
print Solution().nextPermutation([1, 3, 2]) | 24.2 | 74 | 0.508264 |
1f04128726942205094994e2b681a53cdfe743aa | 64 | py | Python | h1st/tuner/__init__.py | vophihungvn/h1st | d421995bb0b8de6a5a76788261efef5b26bc7c12 | [
"Apache-2.0"
] | null | null | null | h1st/tuner/__init__.py | vophihungvn/h1st | d421995bb0b8de6a5a76788261efef5b26bc7c12 | [
"Apache-2.0"
] | null | null | null | h1st/tuner/__init__.py | vophihungvn/h1st | d421995bb0b8de6a5a76788261efef5b26bc7c12 | [
"Apache-2.0"
] | null | null | null | from h1st.tuner.hyperparameter_tuner import HyperParameterTuner
| 32 | 63 | 0.90625 |
1f0432871a66053bea5e2a19da56fe363bea9cb9 | 78,296 | py | Python | allesfitter/basement.py | pierfra-ro/allesfitter | a6a885aaeb3253fec0d924ef3b45e8b7c473b181 | [
"MIT"
] | null | null | null | allesfitter/basement.py | pierfra-ro/allesfitter | a6a885aaeb3253fec0d924ef3b45e8b7c473b181 | [
"MIT"
] | null | null | null | allesfitter/basement.py | pierfra-ro/allesfitter | a6a885aaeb3253fec0d924ef3b45e8b7c473b181 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 5 00:17:06 2018
@author:
Dr. Maximilian N. Gnther
European Space Agency (ESA)
European Space Research and Technology Centre (ESTEC)
Keplerlaan 1, 2201 AZ Noordwijk, The Netherlands
Email: maximilian.guenther@esa.int
GitHub: mnguenther
Twitter: m_n_guenther
Web: www.mnguenther.com
"""
from __future__ import print_function, division, absolute_import
#::: modules
import numpy as np
import os
import sys
import fnmatch
import collections
from datetime import datetime
from multiprocessing import cpu_count
import warnings
warnings.formatwarning = lambda msg, *args, **kwargs: f'\n! WARNING:\n {msg}\ntype: {args[0]}, file: {args[1]}, line: {args[2]}\n'
warnings.filterwarnings('ignore', category=np.VisibleDeprecationWarning)
warnings.filterwarnings('ignore', category=np.RankWarning)
from scipy.stats import truncnorm
#::: allesfitter modules
from .exoworlds_rdx.lightcurves.index_transits import index_transits, index_eclipses, get_first_epoch, get_tmid_observed_transits
from .priors.simulate_PDF import simulate_PDF
from .utils.mcmc_move_translator import translate_str_to_move
#::: plotting settings
import seaborn as sns
sns.set(context='paper', style='ticks', palette='deep', font='sans-serif', font_scale=1.5, color_codes=True)
sns.set_style({"xtick.direction": "in","ytick.direction": "in"})
sns.set_context(rc={'lines.markeredgewidth': 1})
###############################################################################
#::: 'Basement' class, which contains all the data, settings, etc.
###############################################################################
| 58.04003 | 224 | 0.481008 |
1f08e87bb685c5de27a28a6c0f75d6ba70a73d31 | 3,334 | py | Python | schematron/ssk.py | SarahTV/SSK | ac7f5b7b1f1c02aefcb706abd80178f86c216cf7 | [
"CC-BY-4.0"
] | null | null | null | schematron/ssk.py | SarahTV/SSK | ac7f5b7b1f1c02aefcb706abd80178f86c216cf7 | [
"CC-BY-4.0"
] | null | null | null | schematron/ssk.py | SarahTV/SSK | ac7f5b7b1f1c02aefcb706abd80178f86c216cf7 | [
"CC-BY-4.0"
] | null | null | null | #coding: utf-8
import re
import os
from lxml import etree as ET
from bs4 import BeautifulSoup
import csv
| 35.468085 | 96 | 0.54889 |
1f098e212077f84f0f80919da194e6c3605bd4fb | 14,798 | py | Python | src/01_eigenprogression_transform.py | lostanlen/nemisig2018 | 2868da84c938ff6db98936d81a830b838eef1131 | [
"MIT"
] | 1 | 2018-09-27T09:07:05.000Z | 2018-09-27T09:07:05.000Z | src/01_eigenprogression_transform.py | lostanlen/nemisig2018 | 2868da84c938ff6db98936d81a830b838eef1131 | [
"MIT"
] | null | null | null | src/01_eigenprogression_transform.py | lostanlen/nemisig2018 | 2868da84c938ff6db98936d81a830b838eef1131 | [
"MIT"
] | null | null | null | import localmodule
import datetime
import h5py
import math
import music21 as m21
import numpy as np
import os
import scipy
import scipy.linalg
import sys
import time
# Parse arguments
args = sys.argv[1:]
composer_str = args[0]
track_str = args[1]
# Define constants.
J_tm = 8
N = 2**10
n_octaves = 8
midi_octave_offset = 2
quantization = 2.0
xi = 0.25
sigma = 0.1
# Print header.
start_time = int(time.time())
print(str(datetime.datetime.now()) + " Start.")
print("Eigenprogression transform.")
print("Composer: " + composer_str + ".")
print("Piece: " + track_str + ".")
print("")
print("h5py version: {:s}".format(h5py.__version__))
print("music21 version: {:s}".format(m21.__version__))
print("numpy version: {:s}".format(np.__version__))
print("scipy version: {:s}".format(scipy.__version__))
print("")
############################# (1) PARSING ##################################
# Start clock.
parsing_start_time = int(time.time())
# Parse Kern score with music21.
data_dir = localmodule.get_data_dir()
dataset_name = localmodule.get_dataset_name()
kern_name = "_".join([dataset_name, "kern"])
kern_dir = os.path.join(data_dir, kern_name)
composer_dir = os.path.join(kern_dir, composer_str)
track_name = track_str + ".krn"
track_path = os.path.join(composer_dir, track_name)
score = m21.converter.parse(track_path)
pianoroll_parts = []
n_parts = len(score.parts)
n_semitones = 12 * n_octaves
# Loop over parts to extract piano rolls.
for part_id in range(n_parts):
part = score.parts[part_id]
pianoroll_part = np.zeros((n_semitones, N), dtype=np.float32)
# Get the measure offsets
measure_offset = {}
for el in part.recurse(classFilter=('Measure')):
measure_offset[el.measureNumber] = el.offset
# Loop over notes
for note in part.recurse(classFilter=('Note')):
note_start = int(math.ceil(
(measure_offset[note.measureNumber] +\
note.offset) *\
quantization))
note_end = int(math.ceil((
measure_offset[note.measureNumber] +\
note.offset +\
note.duration.quarterLength) *\
quantization))
pianoroll_part[
note.midi - midi_octave_offset * 12,
note_start:note_end] = 1
pianoroll_parts.append(pianoroll_part)
# Stack parts into piano roll.
mtrack_pianoroll = np.stack(pianoroll_parts, 2)
pianoroll = mtrack_pianoroll.max(axis=2)
# Print elapsed time.
elapsed_time = time.time() - int(parsing_start_time)
elapsed_str = "{:>05.2f}".format(elapsed_time)
print("Parsing took " + elapsed_str + " seconds.")
####################### (2) WAVELET TRANSFORM ##############################
# Start clock.
wavelet_start_time = int(time.time())
# Setup wavelet filter bank over time.
wavelet_filterbank_ft = np.zeros((1, N, J_tm), dtype=np.float32)
for j in range(J_tm-1):
xi_j = xi * 2**(-j)
sigma_j = sigma * 2**(-j)
center = xi_j * N
den = 2 * sigma_j * sigma_j * N * N
psi_ft = localmodule.morlet(center, den, N, n_periods=4)
wavelet_filterbank_ft[0, :, -1 - j] = psi_ft
# Append scaling function phi (average).
wavelet_filterbank_ft[0, 0, 0] = 1
# Convolve pianoroll with filterbank.
pianoroll_ft = scipy.fftpack.fft(pianoroll, axis=1)
pianoroll_ft = np.expand_dims(pianoroll_ft, axis=2)
wavelet_transform_ft = pianoroll_ft * wavelet_filterbank_ft
wavelet_transform = scipy.fftpack.ifft(wavelet_transform_ft, axis=1)
# Print elapsed time.
elapsed_time = time.time() - int(parsing_start_time)
elapsed_str = "{:>05.2f}".format(elapsed_time)
print("Wavelet transform took " + elapsed_str + " seconds.")
####################### (3) EIGENTRIAD TRANSFORM ###########################
# Start clock.
eigentriad_start_time = int(time.time())
# Reshape MIDI axis to chromagram
chromagram = np.reshape(wavelet_transform,
(12, -1, wavelet_transform.shape[1], wavelet_transform.shape[2]), 'F')
# Construct eigentriads
cosine_basis = np.array([[np.cos(2*np.pi*omega*t/3)
for omega in range(3)] for t in range(3)]).T
sine_basis = np.array([[np.sin(2*np.pi*omega*t/3)
for omega in range(3)] for t in range(3)]).T
fourier_basis = cosine_basis + 1.0j * sine_basis
major_template = [0, 4, 7]
minor_template = [0, 3, 7]
major_eigentriads = np.zeros((12, 3), dtype=np.complex64)
minor_eigentriads = np.zeros((12, 3), dtype=np.complex64)
for omega in range(3):
for t, p in enumerate(major_template):
major_eigentriads[p, omega] = fourier_basis[t, omega]
for t, p in enumerate(minor_template):
minor_eigentriads[p, omega] = fourier_basis[t, omega]
eigentriads = np.stack(
(major_eigentriads, minor_eigentriads), axis=1)
eigentriads = eigentriads.astype(np.complex64)
# Convolve chromagram with eigentriads
chromagram_ft = scipy.fftpack.fft(chromagram, axis=0)
chromagram_ft = chromagram_ft[:, np.newaxis, :, :, :, np.newaxis]
eigentriads_ft = scipy.fftpack.fft(eigentriads, axis=0)
eigentriads_ft = eigentriads_ft[:, :, np.newaxis,
np.newaxis, np.newaxis, :]
eigentriad_transform_ft = chromagram_ft * eigentriads_ft
eigentriad_transform = scipy.fftpack.fft(
eigentriad_transform_ft, axis=0)
# Apply modulus nonlinearity
eigentriad_transform_modulus = np.abs(eigentriad_transform)
# Print elapsed time.
elapsed_time = time.time() - int(eigentriad_start_time)
elapsed_str = "{:>05.2f}".format(elapsed_time)
print("Eigentriad transform took " + elapsed_str + " seconds.")
####################### (4) SCATTERING TRANSFORM ###########################
# Start clock.
scattering_start_time = int(time.time())
# Setup scattering filter bank over time.
scattering_filterbank_ft = np.zeros((1, N, 2*J_tm-1), dtype=np.float32)
for j in range(J_tm-1):
xi_j = xi * 2**(-j)
sigma_j = sigma * 2**(-j)
center = xi_j * N
den = 2 * sigma_j * sigma_j * N * N
psi_ft = localmodule.morlet(center, den, N, n_periods=4)
conj_psi_ft = np.roll(psi_ft, -1)[::-1]
scattering_filterbank_ft[0, :, -1 - 2*j] = psi_ft
scattering_filterbank_ft[0, :, -1 - (2*j+1)] = conj_psi_ft
scattering_filterbank_ft[0, 0, 0] = 1
# Convolve eigentriad transform with filterbank again.
# This is akin to a scattering transform.
# We remove the finest scale (last two coefficients).
eigentriad_transform_modulus_ft =\
scipy.fftpack.fft(eigentriad_transform_modulus, axis=3)
eigentriad_transform_modulus_ft =\
eigentriad_transform_modulus_ft[:, :, :, :, :, :, np.newaxis]
scattering_filterbank_ft =\
wavelet_filterbank_ft[:, np.newaxis, np.newaxis, :,
np.newaxis, np.newaxis, :-2]
scattering_transform_ft =\
eigentriad_transform_modulus_ft * scattering_filterbank_ft
scattering_transform = scipy.fftpack.ifft(scattering_transform_ft, axis=3)
# Print elapsed time.
elapsed_time = time.time() - int(scattering_start_time)
elapsed_str = "{:>05.2f}".format(elapsed_time)
print("Scattering transform took " + elapsed_str + " seconds.")
###################### (5) EIGENPROGRESSION TRANSFORM ######################
# Start clock.
eigenprogression_start_time = int(time.time())
# Reshape chroma and quality into a chord axis
sc_shape = scattering_transform.shape
tonnetz_shape = (
sc_shape[0]*sc_shape[1], sc_shape[2],
sc_shape[3], sc_shape[4], sc_shape[5],
sc_shape[6])
tonnetz = np.reshape(scattering_transform,
tonnetz_shape, 'F')
# Build adjacency matrix for Tonnetz graph
# (1/3) Major to minor transitions.
major_edges = np.zeros((12,), dtype=np.float32)
# Parallel minor (C major to C minor)
major_edges[0] = 1
# Relative minor (C major to A minor)
major_edges[9] = 1
# Leading tone minor (C major to E minor)
major_edges[4] = 1
# (2/3) Minor to major transitions
minor_edges = np.zeros((12,))
# Parallel major (C minor to C major)
minor_edges[0] = 1
# Relative major (C minor to Eb major)
minor_edges[3] = 1
# Leading tone major (C major to Ab minor)
minor_edges[8] = 1
# (2/3) Build full adjacency matrix by 4 blocks.
major_adjacency = scipy.linalg.toeplitz(major_edges, minor_edges)
minor_adjacency = scipy.linalg.toeplitz(minor_edges, major_edges)
tonnetz_adjacency = np.zeros((24, 24), dtype=np.float32)
tonnetz_adjacency[:12, 12:] = minor_adjacency
tonnetz_adjacency[12:, :12] = major_adjacency
# Define Laplacian on the Tonnetz graph.
tonnetz_laplacian = 3 * np.eye(24, dtype=np.float32) - tonnetz_adjacency
# Compute eigenprogressions, i.e. eigenvectors of the Tonnetz Laplacian
eigvecs, eigvals = np.linalg.eig(tonnetz_laplacian)
# Diagonalize Laplacian.
eigvals, eigvecs = np.linalg.eig(tonnetz_laplacian)
sorting_indices = np.argsort(eigvals)
eigvals = eigvals[sorting_indices]
eigvecs = eigvecs[:, sorting_indices]
# Key invariance
phi = eigvecs[:, 0]
# Tonic invariance with quality covariance
psi_quality = eigvecs[:, 23]
# C -> C# -> D ... simultaneously with Cm -> C#m -> ...
# Major third periodicity.
psi_chromatic = eigvecs[:, 1] + 1j * eigvecs[:, 2]
# Major keys: pentatonic pattern (C D F G A) moving up a minor third.
# Major keys: minor seventh pattern (B D E A) moving down a minor third.
psi_pentatonic_up = eigvecs[:, 3] + 1j * eigvecs[:, 4]
# Cm -> B -> Bm -> Bb -> Am -> ...
# Minor third periodicity
psi_Cm_B_Bm_Bb = eigvecs[:, 5] + 1j * eigvecs[:, 6]
# C -> Am -> A -> Cm -> C ...
# Relative (R) followed by parallel (P).
# Major third periodicity
j = np.complex(np.cos(2*np.pi/3), np.sin(2*np.pi/3))
jbar = np.complex(np.cos(-2*np.pi/3), np.sin(-2*np.pi/3))
psi_RP = eigvecs[:, 7] + j * eigvecs[:, 8] + jbar * eigvecs[:, 9]
# C -> Bm -> Bb -> Am -> Ab -> ...
psi_C_Bm_Bb_Am = eigvecs[:, 10] + 1j * eigvecs[:, 11]
# Upwards minor third. Qualities in phase opposition.
psi_minorthird_quality = eigvecs[:, 12] + 1j * eigvecs[:, 13]
# Ab is simultaneous with Am.
# Abstract notion of "third" degree with quality invariance?
# Tritone periodicity
j = np.complex(np.cos(2*np.pi/3), np.sin(2*np.pi/3))
jbar = np.complex(np.cos(-2*np.pi/3), np.sin(-2*np.pi/3))
psi_third_tritone = eigvecs[:, 14] + j * eigvecs[:, 15] + jbar * eigvecs[:, 16]
# C -> C#m -> D -> D#m -> ...
# Minor third periodicity.
psi_C_Dbm_D_Ebm = eigvecs[:, 17] + 1j * eigvecs[:, 18]
# Major keys: pentatonic pattern (C D F G A) moving down a minor third.
# Major keys: minor seventh pattern (B D E A) moving up a minor third.
psi_pentatonic_down = eigvecs[:, 19] + 1j * eigvecs[:, 20]
# C is simultaneous with Dm.
# Abstract notion of minor key?
# Major third periodicity.
psi_minorkey = eigvecs[:, 21] + 1j * eigvecs[:, 22]
# Concatenate eigenprogressions.
eigenprogressions = np.stack((
phi,
psi_quality,
psi_chromatic,
psi_pentatonic_up,
psi_Cm_B_Bm_Bb,
psi_RP,
psi_C_Bm_Bb_Am,
psi_C_Bm_Bb_Am,
psi_minorthird_quality,
psi_third_tritone,
psi_C_Dbm_D_Ebm,
psi_pentatonic_down,
psi_minorkey), axis=-1)
eigenprogressions = np.reshape(eigenprogressions, (12, 2, -1), 'F')
eigenprogressions = eigenprogressions.astype(np.complex64)
# Apply eigenprogression transform.
scattering_transform_ft = scipy.fftpack.fft(scattering_transform, axis=0)
scattering_transform_ft = scattering_transform_ft[:, :, :, :, :, :, :, np.newaxis]
eigenprogressions_ft = scipy.fftpack.fft(eigenprogressions, axis=0)
eigenprogressions_ft = eigenprogressions_ft[
:, :, np.newaxis, np.newaxis, np.newaxis, np.newaxis, np.newaxis]
eigenprogression_transform_ft = scattering_transform_ft * eigenprogressions_ft
eigenprogression_transform = scipy.fftpack.ifft(eigenprogression_transform_ft, axis=0)
# Print elapsed time.
elapsed_time = time.time() - int(eigenprogression_start_time)
elapsed_str = "{:>05.2f}".format(elapsed_time)
print("Eigenprogression transform took " + elapsed_str + " seconds.")
###################### (5) SPIRAL TRANSFORM ######################
# Start clock.
spiral_start_time = int(time.time())
# Setup wavelet filter bank across octaves.
# This is comparable to a spiral scattering transform.
J_oct = 3
octave_filterbank_ft = np.zeros((n_octaves, 2*J_oct-1), dtype=np.float32)
for j in range(J_oct-1):
xi_j = xi * 2**(-j)
sigma_j = sigma * 2**(-j)
center = xi_j * n_octaves
den = 2 * sigma_j * sigma_j * n_octaves * n_octaves
psi_ft = localmodule.morlet(center, den, n_octaves, n_periods=4)
conj_psi_ft = np.roll(psi_ft, -1)[::-1]
octave_filterbank_ft[:, -1 - 2*j] = psi_ft
octave_filterbank_ft[:, -1 - (2*j+1)] = conj_psi_ft
octave_filterbank_ft[0, 0] = 1
octave_filterbank_ft = octave_filterbank_ft[
np.newaxis, np.newaxis, :,
np.newaxis, np.newaxis,
np.newaxis, np.newaxis, np.newaxis]
# Apply octave transform.
eigenprogression_transform_ft = scipy.fftpack.fft(
eigenprogression_transform, axis=2)
eigenprogression_transform_ft = eigenprogression_transform_ft[
:, :, :, :, :, :, :, :, np.newaxis]
spiral_transform_ft =\
eigenprogression_transform_ft * octave_filterbank_ft
spiral_transform = scipy.fftpack.fft(
spiral_transform_ft, axis=2)
# Print elapsed time.
elapsed_time = time.time() - int(spiral_start_time)
elapsed_str = "{:>05.2f}".format(elapsed_time)
print("Spiral transform took " + elapsed_str + " seconds.")
######################## (6) MODULUS AND AVERAGING #########################
modulus_start_time = time.time()
# Apply second-order modulus nonlinearity.
U2 = np.abs(spiral_transform)
# Average over chroma, quality, octave, and time.
S2 = np.sum(U2, axis=(0, 1, 2, 3))
# Print elapsed time.
elapsed_time = time.time() - int(modulus_start_time)
elapsed_str = "{:>05.2f}".format(elapsed_time)
print("Averaging took " + elapsed_str + " seconds.")
############################### (7) STORAGE #################################
# Store to HDF5 container
hdf5_name = "_".join([dataset_name, "eigenprogression-transforms"])
hdf5_dir = os.path.join(data_dir, hdf5_name)
os.makedirs(hdf5_dir, exist_ok=True)
composer_dir = os.path.join(hdf5_dir, composer_str)
os.makedirs(composer_dir, exist_ok=True)
out_path = os.path.join(composer_dir,
"_".join([
dataset_name,
"eigenprogression-transform",
composer_str,
track_str + ".hdf5"]))
out_file = h5py.File(out_path)
hdf5_dataset_size = S2.shape
hdf5_dataset_key = "_".join([
"eigenprogression-transform",
composer_str,
track_str])
hdf5_dataset = out_file.create_dataset(hdf5_dataset_key, hdf5_dataset_size)
hdf5_dataset[:] = S2
out_file.close()
# Print elapsed time.
print(str(datetime.datetime.now()) + " Finish.")
elapsed_time = time.time() - int(start_time)
elapsed_hours = int(elapsed_time / (60 * 60))
elapsed_minutes = int((elapsed_time % (60 * 60)) / 60)
elapsed_seconds = elapsed_time % 60.
elapsed_str = "{:>02}:{:>02}:{:>05.2f}".format(elapsed_hours,
elapsed_minutes,
elapsed_seconds)
print("Total elapsed time: " + elapsed_str + ".")
| 34.334107 | 86 | 0.68462 |